From 9ee37c626cd1e39c0ab25d4a14c46a7beb51bfe7 Mon Sep 17 00:00:00 2001 From: adam-fowler Date: Thu, 27 Jun 2024 11:40:04 +0000 Subject: [PATCH] Update models from aws-sdk-go-v2 release-2024-06-26 --- Package.swift | 14 +- Sources/Soto/Services/ACMPCA/ACMPCA_api.swift | 7 +- .../AccessAnalyzer/AccessAnalyzer_api.swift | 75 + .../AccessAnalyzer_shapes.swift | 265 +- .../Soto/Services/Account/Account_api.swift | 41 +- .../Services/Account/Account_shapes.swift | 127 +- .../Soto/Services/Amplify/Amplify_api.swift | 8 +- .../Services/Amplify/Amplify_shapes.swift | 10 +- .../Soto/Services/AppTest/AppTest_api.swift | 578 ++ .../Services/AppTest/AppTest_shapes.swift | 3450 ++++++++ .../ApplicationSignals_api.swift | 437 + .../ApplicationSignals_shapes.swift | 1528 ++++ .../Services/Artifact/Artifact_shapes.swift | 6 +- Sources/Soto/Services/Athena/Athena_api.swift | 2 +- .../Soto/Services/Athena/Athena_shapes.swift | 14 +- .../AuditManager/AuditManager_api.swift | 15 +- .../AuditManager/AuditManager_shapes.swift | 89 +- .../AutoScaling/AutoScaling_api.swift | 26 +- .../AutoScaling/AutoScaling_shapes.swift | 88 +- Sources/Soto/Services/B2bi/B2bi_shapes.swift | 20 +- .../BackupStorage/BackupStorage_api.swift | 270 - .../BackupStorage/BackupStorage_shapes.swift | 660 -- Sources/Soto/Services/Batch/Batch_api.swift | 13 + .../Soto/Services/Batch/Batch_shapes.swift | 62 +- .../Soto/Services/Bedrock/Bedrock_api.swift | 10 + .../BedrockAgent/BedrockAgent_shapes.swift | 120 +- .../BedrockAgentRuntime_api.swift | 2 +- .../BedrockAgentRuntime_shapes.swift | 499 +- .../BedrockRuntime/BedrockRuntime_api.swift | 26 + .../BedrockRuntime_shapes.swift | 1579 +++- .../Soto/Services/Budgets/Budgets_api.swift | 45 +- .../Services/Budgets/Budgets_shapes.swift | 320 +- .../Soto/Services/Chatbot/Chatbot_api.swift | 39 + .../Services/Chatbot/Chatbot_shapes.swift | 173 +- Sources/Soto/Services/Cloud9/Cloud9_api.swift | 28 + .../CloudFormation_shapes.swift | 22 +- .../CloudHSMV2/CloudHSMV2_shapes.swift | 39 +- .../Services/CloudTrail/CloudTrail_api.swift | 4 +- .../CloudTrail/CloudTrail_shapes.swift | 43 +- .../CodeArtifact/CodeArtifact_api.swift | 4 +- .../CodeArtifact/CodeArtifact_shapes.swift | 51 +- .../Services/CodeBuild/CodeBuild_shapes.swift | 139 +- .../CodeGuruSecurity_api.swift | 16 +- .../CodeGuruSecurity_shapes.swift | 97 +- .../CodePipeline/CodePipeline_api.swift | 4 +- .../CodePipeline/CodePipeline_shapes.swift | 3 +- .../ComputeOptimizer_api.swift | 39 + .../ComputeOptimizer_shapes.swift | 663 +- .../Soto/Services/Connect/Connect_api.swift | 110 +- .../Services/Connect/Connect_shapes.swift | 597 +- .../ControlTower/ControlTower_api.swift | 132 +- .../ControlTower/ControlTower_shapes.swift | 316 +- .../CostOptimizationHub_shapes.swift | 101 +- .../CustomerProfiles_api.swift | 17 +- .../CustomerProfiles_shapes.swift | 74 +- .../Services/DataSync/DataSync_shapes.swift | 1 + .../Soto/Services/DataZone/DataZone_api.swift | 121 + .../Services/DataZone/DataZone_shapes.swift | 606 +- .../DirectConnect/DirectConnect_api.swift | 2 + Sources/Soto/Services/DocDB/DocDB_api.swift | 6 +- .../Soto/Services/DynamoDB/DynamoDB_api.swift | 24 +- .../Services/DynamoDB/DynamoDB_shapes.swift | 22 +- Sources/Soto/Services/EC2/EC2_api.swift | 192 +- Sources/Soto/Services/EC2/EC2_shapes.swift | 498 +- Sources/Soto/Services/ECR/ECR_shapes.swift | 3 +- Sources/Soto/Services/ECS/ECS_api.swift | 4 +- Sources/Soto/Services/ECS/ECS_shapes.swift | 87 +- Sources/Soto/Services/EKS/EKS_api.swift | 4 +- Sources/Soto/Services/EKS/EKS_shapes.swift | 161 +- .../EMRContainers/EMRContainers_api.swift | 2 + .../EMRServerless/EMRServerless_api.swift | 46 + .../EMRServerless/EMRServerless_shapes.swift | 263 +- .../ElastiCache/ElastiCache_api.swift | 2 +- .../ElastiCache/ElastiCache_shapes.swift | 8 +- .../ElasticLoadBalancingV2_shapes.swift | 11 +- .../EventBridge/EventBridge_api.swift | 31 +- .../EventBridge/EventBridge_shapes.swift | 155 +- Sources/Soto/Services/FMS/FMS_api.swift | 1 + Sources/Soto/Services/FMS/FMS_shapes.swift | 8 +- Sources/Soto/Services/FSx/FSx_api.swift | 6 +- Sources/Soto/Services/FSx/FSx_shapes.swift | 111 +- .../Soto/Services/Firehose/Firehose_api.swift | 4 +- .../Services/Firehose/Firehose_shapes.swift | 126 +- .../GlobalAccelerator_shapes.swift | 20 +- Sources/Soto/Services/Glue/Glue_api.swift | 97 +- Sources/Soto/Services/Glue/Glue_shapes.swift | 769 +- .../Soto/Services/Grafana/Grafana_api.swift | 143 +- .../Services/Grafana/Grafana_shapes.swift | 403 +- .../GreengrassV2/GreengrassV2_api.swift | 2 +- .../GreengrassV2/GreengrassV2_shapes.swift | 30 +- .../Services/GuardDuty/GuardDuty_api.swift | 65 + .../Services/GuardDuty/GuardDuty_shapes.swift | 447 +- .../Services/Honeycode/Honeycode_api.swift | 404 - .../Services/Honeycode/Honeycode_shapes.swift | 1654 ---- .../IVSRealTime/IVSRealTime_api.swift | 88 +- .../IVSRealTime/IVSRealTime_shapes.swift | 333 +- .../Imagebuilder/Imagebuilder_shapes.swift | 48 +- .../IoTFleetWise/IoTFleetWise_api.swift | 2 + .../IoTFleetWise/IoTFleetWise_shapes.swift | 21 +- .../IoTTwinMaker/IoTTwinMaker_shapes.swift | 1 + .../IoTWireless/IoTWireless_shapes.swift | 3 + Sources/Soto/Services/KMS/KMS_api.swift | 18 +- Sources/Soto/Services/KMS/KMS_shapes.swift | 112 +- .../Soto/Services/Kafka/Kafka_shapes.swift | 42 +- Sources/Soto/Services/Kendra/Kendra_api.swift | 1 + .../KinesisAnalyticsV2_api.swift | 156 +- .../KinesisAnalyticsV2_shapes.swift | 314 +- .../KinesisVideo/KinesisVideo_api.swift | 14 + .../KinesisVideoArchivedMedia_api.swift | 14 + .../KinesisVideoMedia_api.swift | 14 + .../KinesisVideoSignaling_api.swift | 14 + .../KinesisVideoWebRTCStorage_api.swift | 14 + .../LakeFormation/LakeFormation_api.swift | 13 + .../LakeFormation/LakeFormation_shapes.swift | 17 + .../Soto/Services/Lambda/Lambda_shapes.swift | 1 + .../LaunchWizard/LaunchWizard_api.swift | 60 +- .../LaunchWizard/LaunchWizard_shapes.swift | 283 +- .../Services/Lightsail/Lightsail_shapes.swift | 54 +- .../Soto/Services/Location/Location_api.swift | 63 +- .../Services/Location/Location_shapes.swift | 506 +- Sources/Soto/Services/MWAA/MWAA_shapes.swift | 48 +- Sources/Soto/Services/Macie2/Macie2_api.swift | 71 +- .../Soto/Services/Macie2/Macie2_shapes.swift | 236 +- .../MailManager/MailManager_api.swift | 957 ++ .../MailManager/MailManager_shapes.swift | 3958 +++++++++ .../ManagedBlockchain_shapes.swift | 8 +- .../MediaConvert/MediaConvert_api.swift | 49 +- .../MediaConvert/MediaConvert_shapes.swift | 110 +- .../Services/MediaLive/MediaLive_shapes.swift | 14 +- .../MediaPackageV2_shapes.swift | 96 +- .../MedicalImaging/MedicalImaging_api.swift | 4 +- .../MedicalImaging_shapes.swift | 10 +- .../Soto/Services/Neptune/Neptune_api.swift | 6 +- .../NetworkManager_shapes.swift | 392 +- Sources/Soto/Services/OSIS/OSIS_shapes.swift | 138 +- .../OpenSearch/OpenSearch_shapes.swift | 211 +- .../Soto/Services/OpsWorks/OpsWorks_api.swift | 54 +- .../Services/OpsWorks/OpsWorks_shapes.swift | 262 +- Sources/Soto/Services/PI/PI_api.swift | 1 + Sources/Soto/Services/PI/PI_shapes.swift | 32 +- .../PcaConnectorScep_api.swift | 302 + .../PcaConnectorScep_shapes.swift | 774 ++ .../Services/Pinpoint/Pinpoint_shapes.swift | 41 +- Sources/Soto/Services/Pipes/Pipes_api.swift | 4 +- .../Soto/Services/Pipes/Pipes_shapes.swift | 263 +- .../Soto/Services/Polly/Polly_shapes.swift | 19 +- .../Services/QBusiness/QBusiness_shapes.swift | 44 +- .../Services/QuickSight/QuickSight_api.swift | 26 + .../QuickSight/QuickSight_shapes.swift | 171 +- Sources/Soto/Services/RAM/RAM_api.swift | 3 - Sources/Soto/Services/RDS/RDS_api.swift | 8 +- Sources/Soto/Services/RDS/RDS_shapes.swift | 98 +- .../Soto/Services/Redshift/Redshift_api.swift | 2 +- .../Services/Redshift/Redshift_shapes.swift | 28 +- .../Resiliencehub/Resiliencehub_api.swift | 42 + .../Resiliencehub/Resiliencehub_shapes.swift | 140 +- .../Route53Profiles/Route53Profiles_api.swift | 2 +- .../Route53Profiles_shapes.swift | 6 +- .../Route53Resolver_shapes.swift | 18 +- Sources/Soto/Services/S3/S3_api.swift | 12 +- .../Services/S3Control/S3Control_api.swift | 4 + Sources/Soto/Services/SESv2/SESv2_api.swift | 8 +- .../Soto/Services/SESv2/SESv2_shapes.swift | 29 +- Sources/Soto/Services/SNS/SNS_api.swift | 2 +- Sources/Soto/Services/SNS/SNS_shapes.swift | 10 +- Sources/Soto/Services/SQS/SQS_api.swift | 57 +- Sources/Soto/Services/SQS/SQS_shapes.swift | 71 +- .../Soto/Services/SSOOIDC/SSOOIDC_api.swift | 2 +- .../Services/SSOOIDC/SSOOIDC_shapes.swift | 37 +- Sources/Soto/Services/SWF/SWF_api.swift | 28 +- Sources/Soto/Services/SWF/SWF_shapes.swift | 49 + .../Services/SageMaker/SageMaker_api.swift | 195 +- .../Services/SageMaker/SageMaker_shapes.swift | 1015 ++- .../SecretsManager/SecretsManager_api.swift | 4 +- .../SecretsManager_shapes.swift | 31 +- .../SecurityHub/SecurityHub_shapes.swift | 133 +- .../SecurityLake/SecurityLake_api.swift | 2 + .../SecurityLake/SecurityLake_shapes.swift | 40 +- Sources/Soto/Services/SsmSap/SsmSap_api.swift | 69 + .../Soto/Services/SsmSap/SsmSap_shapes.swift | 180 + .../StorageGateway/StorageGateway_api.swift | 9 +- .../StorageGateway_shapes.swift | 58 +- .../TaxSettings/TaxSettings_api.swift | 208 + .../TaxSettings/TaxSettings_shapes.swift | 1343 +++ .../Soto/Services/Transfer/Transfer_api.swift | 1 + .../Services/Transfer/Transfer_shapes.swift | 12 +- .../Services/VPCLattice/VPCLattice_api.swift | 22 +- .../VPCLattice/VPCLattice_shapes.swift | 114 +- .../VerifiedPermissions_api.swift | 6 +- .../VerifiedPermissions_shapes.swift | 757 +- .../Soto/Services/WAFV2/WAFV2_shapes.swift | 55 +- .../WorkSpacesThinClient_shapes.swift | 35 +- .../WorkSpacesWeb/WorkSpacesWeb_api.swift | 2 +- .../WorkSpacesWeb/WorkSpacesWeb_shapes.swift | 37 +- models/accessanalyzer.json | 757 +- models/account.json | 350 +- models/acm-pca.json | 35 +- models/acm.json | 76 +- models/amplify.json | 22 +- models/application-auto-scaling.json | 17 +- models/application-discovery-service.json | 15 +- models/application-signals.json | 3192 +++++++ models/apptest.json | 6288 +++++++++++++ models/artifact.json | 6 + models/athena.json | 31 +- models/auditmanager.json | 219 +- models/auto-scaling.json | 194 +- models/b2bi.json | 62 +- models/backupstorage.json | 2071 ----- models/batch.json | 121 +- models/bedrock-agent-runtime.json | 1248 ++- models/bedrock-agent.json | 126 +- models/bedrock-runtime.json | 2410 ++++- models/budgets.json | 416 +- models/chatbot.json | 338 + models/cloudformation.json | 33 +- models/cloudfront.json | 44 +- models/cloudhsm-v2.json | 97 +- models/cloudtrail.json | 113 +- models/codeartifact.json | 60 +- models/codebuild.json | 161 +- models/codeguru-security.json | 198 +- models/codepipeline.json | 58 +- models/cognito-identity-provider.json | 25 +- models/compute-optimizer.json | 6126 ++++++++----- models/config-service.json | 15 +- models/connect.json | 1022 ++- models/controltower.json | 830 +- models/cost-and-usage-report-service.json | 15 +- models/cost-optimization-hub.json | 140 +- models/customer-profiles.json | 218 +- models/datasync.json | 6 + models/datazone.json | 1084 ++- models/direct-connect.json | 68 +- models/directory-service.json | 17 +- models/dynamodb.json | 48 +- models/ec2.json | 1045 ++- models/ecr.json | 8 +- models/ecs.json | 97 +- models/efs.json | 17 +- models/eks.json | 292 +- models/elastic-load-balancing-v2.json | 18 +- models/elastic-transcoder.json | 55 +- models/elasticache.json | 10 +- models/emr-serverless.json | 392 + models/endpoints/endpoints.json | 635 +- models/eventbridge.json | 424 +- models/firehose.json | 137 +- models/fms.json | 12 +- models/fsx.json | 150 +- models/global-accelerator.json | 12 + models/glue.json | 1216 ++- models/grafana.json | 894 +- models/greengrassv2.json | 59 +- models/guardduty.json | 795 +- models/honeycode.json | 3710 -------- models/imagebuilder.json | 24 +- models/iot-wireless.json | 18 + models/iotfleetwise.json | 40 +- models/iottwinmaker.json | 4 + models/ivs-realtime.json | 620 +- models/kafka.json | 57 + models/kinesis-analytics-v2.json | 495 +- models/kms.json | 213 +- models/lakeformation.json | 55 + models/launch-wizard.json | 618 +- models/lightsail.json | 171 +- models/location.json | 1176 ++- models/macie2.json | 462 +- models/mailmanager.json | 7827 +++++++++++++++++ models/managedblockchain.json | 8 +- models/mediaconvert.json | 179 +- models/medialive.json | 27 + models/mediapackagev2.json | 232 +- models/medical-imaging.json | 18 +- models/mwaa.json | 64 +- models/networkmanager.json | 663 +- models/opensearch.json | 298 +- models/opsworks.json | 392 +- models/osis.json | 236 +- models/pca-connector-scep.json | 2334 +++++ models/pi.json | 92 +- models/pinpoint.json | 50 + models/pipes.json | 627 +- models/polly.json | 39 +- models/qbusiness.json | 58 +- models/quicksight.json | 282 +- models/rds.json | 102 +- models/redshift.json | 30 +- models/rekognition.json | 15 +- models/resiliencehub.json | 289 +- models/route-53-domains.json | 32 +- models/route53profiles.json | 6 +- models/route53resolver.json | 6 +- models/s3.json | 222 +- models/sagemaker.json | 1823 +++- models/secrets-manager.json | 77 +- models/securityhub.json | 142 +- models/securitylake.json | 12 +- models/sesv2.json | 41 +- models/shield.json | 55 +- models/snowball.json | 15 +- models/sns.json | 12 +- models/sqs.json | 145 +- models/ssm-sap.json | 333 +- models/ssm.json | 3 +- models/sso-oidc.json | 187 +- models/storage-gateway.json | 89 +- models/swf.json | 154 +- models/taxsettings.json | 3122 +++++++ models/transfer.json | 640 +- models/verifiedpermissions.json | 658 +- models/vpc-lattice.json | 234 +- models/waf.json | 15 + models/wafv2.json | 82 +- models/workspaces-thin-client.json | 54 + models/workspaces-web.json | 64 +- 317 files changed, 82922 insertions(+), 16613 deletions(-) create mode 100644 Sources/Soto/Services/AppTest/AppTest_api.swift create mode 100644 Sources/Soto/Services/AppTest/AppTest_shapes.swift create mode 100644 Sources/Soto/Services/ApplicationSignals/ApplicationSignals_api.swift create mode 100644 Sources/Soto/Services/ApplicationSignals/ApplicationSignals_shapes.swift delete mode 100644 Sources/Soto/Services/BackupStorage/BackupStorage_api.swift delete mode 100644 Sources/Soto/Services/BackupStorage/BackupStorage_shapes.swift delete mode 100644 Sources/Soto/Services/Honeycode/Honeycode_api.swift delete mode 100644 Sources/Soto/Services/Honeycode/Honeycode_shapes.swift create mode 100644 Sources/Soto/Services/MailManager/MailManager_api.swift create mode 100644 Sources/Soto/Services/MailManager/MailManager_shapes.swift create mode 100644 Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_api.swift create mode 100644 Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_shapes.swift create mode 100644 Sources/Soto/Services/TaxSettings/TaxSettings_api.swift create mode 100644 Sources/Soto/Services/TaxSettings/TaxSettings_shapes.swift create mode 100644 models/application-signals.json create mode 100644 models/apptest.json delete mode 100644 models/backupstorage.json delete mode 100644 models/honeycode.json create mode 100644 models/mailmanager.json create mode 100644 models/pca-connector-scep.json create mode 100644 models/taxsettings.json diff --git a/Package.swift b/Package.swift index 01b68e2b45..833085273b 100644 --- a/Package.swift +++ b/Package.swift @@ -48,11 +48,13 @@ let package = Package( .library(name: "SotoAppRunner", targets: ["SotoAppRunner"]), .library(name: "SotoAppStream", targets: ["SotoAppStream"]), .library(name: "SotoAppSync", targets: ["SotoAppSync"]), + .library(name: "SotoAppTest", targets: ["SotoAppTest"]), .library(name: "SotoAppflow", targets: ["SotoAppflow"]), .library(name: "SotoApplicationAutoScaling", targets: ["SotoApplicationAutoScaling"]), .library(name: "SotoApplicationCostProfiler", targets: ["SotoApplicationCostProfiler"]), .library(name: "SotoApplicationDiscoveryService", targets: ["SotoApplicationDiscoveryService"]), .library(name: "SotoApplicationInsights", targets: ["SotoApplicationInsights"]), + .library(name: "SotoApplicationSignals", targets: ["SotoApplicationSignals"]), .library(name: "SotoArtifact", targets: ["SotoArtifact"]), .library(name: "SotoAthena", targets: ["SotoAthena"]), .library(name: "SotoAuditManager", targets: ["SotoAuditManager"]), @@ -62,7 +64,6 @@ let package = Package( .library(name: "SotoBCMDataExports", targets: ["SotoBCMDataExports"]), .library(name: "SotoBackup", targets: ["SotoBackup"]), .library(name: "SotoBackupGateway", targets: ["SotoBackupGateway"]), - .library(name: "SotoBackupStorage", targets: ["SotoBackupStorage"]), .library(name: "SotoBatch", targets: ["SotoBatch"]), .library(name: "SotoBedrock", targets: ["SotoBedrock"]), .library(name: "SotoBedrockAgent", targets: ["SotoBedrockAgent"]), @@ -188,7 +189,6 @@ let package = Package( .library(name: "SotoGuardDuty", targets: ["SotoGuardDuty"]), .library(name: "SotoHealth", targets: ["SotoHealth"]), .library(name: "SotoHealthLake", targets: ["SotoHealthLake"]), - .library(name: "SotoHoneycode", targets: ["SotoHoneycode"]), .library(name: "SotoIAM", targets: ["SotoIAM"]), .library(name: "SotoIVS", targets: ["SotoIVS"]), .library(name: "SotoIVSRealTime", targets: ["SotoIVSRealTime"]), @@ -250,6 +250,7 @@ let package = Package( .library(name: "SotoMWAA", targets: ["SotoMWAA"]), .library(name: "SotoMachineLearning", targets: ["SotoMachineLearning"]), .library(name: "SotoMacie2", targets: ["SotoMacie2"]), + .library(name: "SotoMailManager", targets: ["SotoMailManager"]), .library(name: "SotoManagedBlockchain", targets: ["SotoManagedBlockchain"]), .library(name: "SotoManagedBlockchainQuery", targets: ["SotoManagedBlockchainQuery"]), .library(name: "SotoMarketplaceAgreement", targets: ["SotoMarketplaceAgreement"]), @@ -297,6 +298,7 @@ let package = Package( .library(name: "SotoPaymentCryptography", targets: ["SotoPaymentCryptography"]), .library(name: "SotoPaymentCryptographyData", targets: ["SotoPaymentCryptographyData"]), .library(name: "SotoPcaConnectorAd", targets: ["SotoPcaConnectorAd"]), + .library(name: "SotoPcaConnectorScep", targets: ["SotoPcaConnectorScep"]), .library(name: "SotoPersonalize", targets: ["SotoPersonalize"]), .library(name: "SotoPersonalizeEvents", targets: ["SotoPersonalizeEvents"]), .library(name: "SotoPersonalizeRuntime", targets: ["SotoPersonalizeRuntime"]), @@ -383,6 +385,7 @@ let package = Package( .library(name: "SotoSupport", targets: ["SotoSupport"]), .library(name: "SotoSupportApp", targets: ["SotoSupportApp"]), .library(name: "SotoSynthetics", targets: ["SotoSynthetics"]), + .library(name: "SotoTaxSettings", targets: ["SotoTaxSettings"]), .library(name: "SotoTextract", targets: ["SotoTextract"]), .library(name: "SotoTimestreamInfluxDB", targets: ["SotoTimestreamInfluxDB"]), .library(name: "SotoTimestreamQuery", targets: ["SotoTimestreamQuery"]), @@ -434,11 +437,13 @@ let package = Package( .target(name: "SotoAppRunner", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/AppRunner", swiftSettings: swiftSettings), .target(name: "SotoAppStream", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/AppStream", swiftSettings: swiftSettings), .target(name: "SotoAppSync", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/AppSync", swiftSettings: swiftSettings), + .target(name: "SotoAppTest", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/AppTest", swiftSettings: swiftSettings), .target(name: "SotoAppflow", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Appflow", swiftSettings: swiftSettings), .target(name: "SotoApplicationAutoScaling", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/ApplicationAutoScaling", swiftSettings: swiftSettings), .target(name: "SotoApplicationCostProfiler", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/ApplicationCostProfiler", swiftSettings: swiftSettings), .target(name: "SotoApplicationDiscoveryService", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/ApplicationDiscoveryService", swiftSettings: swiftSettings), .target(name: "SotoApplicationInsights", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/ApplicationInsights", swiftSettings: swiftSettings), + .target(name: "SotoApplicationSignals", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/ApplicationSignals", swiftSettings: swiftSettings), .target(name: "SotoArtifact", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Artifact", swiftSettings: swiftSettings), .target(name: "SotoAthena", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Athena", swiftSettings: swiftSettings), .target(name: "SotoAuditManager", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/AuditManager", swiftSettings: swiftSettings), @@ -448,7 +453,6 @@ let package = Package( .target(name: "SotoBCMDataExports", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/BCMDataExports", swiftSettings: swiftSettings), .target(name: "SotoBackup", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Backup", swiftSettings: swiftSettings), .target(name: "SotoBackupGateway", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/BackupGateway", swiftSettings: swiftSettings), - .target(name: "SotoBackupStorage", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/BackupStorage", swiftSettings: swiftSettings), .target(name: "SotoBatch", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Batch", swiftSettings: swiftSettings), .target(name: "SotoBedrock", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Bedrock", swiftSettings: swiftSettings), .target(name: "SotoBedrockAgent", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/BedrockAgent", swiftSettings: swiftSettings), @@ -574,7 +578,6 @@ let package = Package( .target(name: "SotoGuardDuty", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/GuardDuty", swiftSettings: swiftSettings), .target(name: "SotoHealth", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Health", swiftSettings: swiftSettings), .target(name: "SotoHealthLake", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/HealthLake", swiftSettings: swiftSettings), - .target(name: "SotoHoneycode", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Honeycode", swiftSettings: swiftSettings), .target(name: "SotoIAM", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/IAM", swiftSettings: swiftSettings), .target(name: "SotoIVS", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/IVS", swiftSettings: swiftSettings), .target(name: "SotoIVSRealTime", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/IVSRealTime", swiftSettings: swiftSettings), @@ -636,6 +639,7 @@ let package = Package( .target(name: "SotoMWAA", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MWAA", swiftSettings: swiftSettings), .target(name: "SotoMachineLearning", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MachineLearning", swiftSettings: swiftSettings), .target(name: "SotoMacie2", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Macie2", swiftSettings: swiftSettings), + .target(name: "SotoMailManager", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MailManager", swiftSettings: swiftSettings), .target(name: "SotoManagedBlockchain", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/ManagedBlockchain", swiftSettings: swiftSettings), .target(name: "SotoManagedBlockchainQuery", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/ManagedBlockchainQuery", swiftSettings: swiftSettings), .target(name: "SotoMarketplaceAgreement", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MarketplaceAgreement", swiftSettings: swiftSettings), @@ -683,6 +687,7 @@ let package = Package( .target(name: "SotoPaymentCryptography", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/PaymentCryptography", swiftSettings: swiftSettings), .target(name: "SotoPaymentCryptographyData", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/PaymentCryptographyData", swiftSettings: swiftSettings), .target(name: "SotoPcaConnectorAd", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/PcaConnectorAd", swiftSettings: swiftSettings), + .target(name: "SotoPcaConnectorScep", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/PcaConnectorScep", swiftSettings: swiftSettings), .target(name: "SotoPersonalize", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Personalize", swiftSettings: swiftSettings), .target(name: "SotoPersonalizeEvents", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/PersonalizeEvents", swiftSettings: swiftSettings), .target(name: "SotoPersonalizeRuntime", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/PersonalizeRuntime", swiftSettings: swiftSettings), @@ -769,6 +774,7 @@ let package = Package( .target(name: "SotoSupport", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Support", swiftSettings: swiftSettings), .target(name: "SotoSupportApp", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SupportApp", swiftSettings: swiftSettings), .target(name: "SotoSynthetics", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Synthetics", swiftSettings: swiftSettings), + .target(name: "SotoTaxSettings", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/TaxSettings", swiftSettings: swiftSettings), .target(name: "SotoTextract", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Textract", swiftSettings: swiftSettings), .target(name: "SotoTimestreamInfluxDB", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/TimestreamInfluxDB", swiftSettings: swiftSettings), .target(name: "SotoTimestreamQuery", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/TimestreamQuery", swiftSettings: swiftSettings), diff --git a/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift b/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift index 7f9ca8cde8..e8b3f0b803 100644 --- a/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift +++ b/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift @@ -372,8 +372,8 @@ public struct ACMPCA: AWSService { /// certificate, if any, that your root CA signed must be next to last. The /// subordinate certificate signed by the preceding subordinate CA must come next, /// and so on until your chain is built. The chain must be PEM-encoded. The maximum allowed size of a certificate is 32 KB. The maximum allowed size of a certificate chain is 2 MB. Enforcement of Critical Constraints Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA - /// certificate or chain. Basic constraints (must be marked critical) Subject alternative names Key usage Extended key usage Authority key identifier Subject key identifier Issuer alternative name Subject directory attributes Subject information access Certificate policies Policy mappings Inhibit anyPolicy Amazon Web Services Private CA rejects the following extensions when they are marked critical in an - /// imported CA certificate or chain. Name constraints Policy constraints CRL distribution points Authority information access Freshest CRL Any other extension + /// certificate or chain. Authority key identifier Basic constraints (must be marked critical) Certificate policies Extended key usage Inhibit anyPolicy Issuer alternative name Key usage Name constraints Policy mappings Subject alternative name Subject directory attributes Subject key identifier Subject information access Amazon Web Services Private CA rejects the following extensions when they are marked critical in an + /// imported CA certificate or chain. Authority information access CRL distribution points Freshest CRL Policy constraints Amazon Web Services Private Certificate Authority will also reject any other extension marked as critical not contained on the preceding list of allowed extensions. @Sendable public func importCertificateAuthorityCertificate(_ input: ImportCertificateAuthorityCertificateRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -712,6 +712,7 @@ extension ACMPCA { acceptors: [ .init(state: .success, matcher: try! JMESPathMatcher("auditReportStatus", expected: "SUCCESS")), .init(state: .failure, matcher: try! JMESPathMatcher("auditReportStatus", expected: "FAILED")), + .init(state: .failure, matcher: AWSErrorCodeMatcher("AccessDeniedException")), ], minDelayTime: .seconds(3), command: self.describeCertificateAuthorityAuditReport @@ -728,6 +729,7 @@ extension ACMPCA { acceptors: [ .init(state: .success, matcher: AWSSuccessMatcher()), .init(state: .retry, matcher: AWSErrorCodeMatcher("RequestInProgressException")), + .init(state: .failure, matcher: AWSErrorCodeMatcher("AccessDeniedException")), ], minDelayTime: .seconds(3), command: self.getCertificateAuthorityCsr @@ -744,6 +746,7 @@ extension ACMPCA { acceptors: [ .init(state: .success, matcher: AWSSuccessMatcher()), .init(state: .retry, matcher: AWSErrorCodeMatcher("RequestInProgressException")), + .init(state: .failure, matcher: AWSErrorCodeMatcher("AccessDeniedException")), ], minDelayTime: .seconds(1), command: self.getCertificate diff --git a/Sources/Soto/Services/AccessAnalyzer/AccessAnalyzer_api.swift b/Sources/Soto/Services/AccessAnalyzer/AccessAnalyzer_api.swift index 7f1ffce878..ac8b0dbfc8 100644 --- a/Sources/Soto/Services/AccessAnalyzer/AccessAnalyzer_api.swift +++ b/Sources/Soto/Services/AccessAnalyzer/AccessAnalyzer_api.swift @@ -81,6 +81,7 @@ public struct AccessAnalyzer: AWSService { static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.fips]: .init(endpoints: [ "ca-central-1": "access-analyzer-fips.ca-central-1.amazonaws.com", + "ca-west-1": "access-analyzer-fips.ca-west-1.amazonaws.com", "us-east-1": "access-analyzer-fips.us-east-1.amazonaws.com", "us-east-2": "access-analyzer-fips.us-east-2.amazonaws.com", "us-gov-east-1": "access-analyzer.us-gov-east-1.amazonaws.com", @@ -144,6 +145,19 @@ public struct AccessAnalyzer: AWSService { ) } + /// Checks whether a resource policy can grant public access to the specified resource type. + @Sendable + public func checkNoPublicAccess(_ input: CheckNoPublicAccessRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CheckNoPublicAccessResponse { + return try await self.client.execute( + operation: "CheckNoPublicAccess", + path: "/policy/check-no-public-access", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates an access preview that allows you to preview IAM Access Analyzer findings for your resource before deploying resource permissions. @Sendable public func createAccessPreview(_ input: CreateAccessPreviewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAccessPreviewResponse { @@ -209,6 +223,19 @@ public struct AccessAnalyzer: AWSService { ) } + /// Creates a recommendation for an unused permissions finding. + @Sendable + public func generateFindingRecommendation(_ input: GenerateFindingRecommendationRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "GenerateFindingRecommendation", + path: "/recommendation/{id}", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves information about an access preview for the specified analyzer. @Sendable public func getAccessPreview(_ input: GetAccessPreviewRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAccessPreviewResponse { @@ -274,6 +301,19 @@ public struct AccessAnalyzer: AWSService { ) } + /// Retrieves information about a finding recommendation for the specified analyzer. + @Sendable + public func getFindingRecommendation(_ input: GetFindingRecommendationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetFindingRecommendationResponse { + return try await self.client.execute( + operation: "GetFindingRecommendation", + path: "/recommendation/{id}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves information about the specified finding. GetFinding and GetFindingV2 both use access-analyzer:GetFinding in the Action element of an IAM policy statement. You must have permission to perform the access-analyzer:GetFinding action. @Sendable public func getFindingV2(_ input: GetFindingV2Request, logger: Logger = AWSClient.loggingDisabled) async throws -> GetFindingV2Response { @@ -517,3 +557,38 @@ extension AccessAnalyzer { self.config = from.config.with(patch: patch) } } + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension AccessAnalyzer { + /// Retrieves information about a finding recommendation for the specified analyzer. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func getFindingRecommendationPaginator( + _ input: GetFindingRecommendationRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.getFindingRecommendation, + inputKey: \GetFindingRecommendationRequest.nextToken, + outputKey: \GetFindingRecommendationResponse.nextToken, + logger: logger + ) + } +} + +extension AccessAnalyzer.GetFindingRecommendationRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AccessAnalyzer.GetFindingRecommendationRequest { + return .init( + analyzerArn: self.analyzerArn, + id: self.id, + maxResults: self.maxResults, + nextToken: token + ) + } +} diff --git a/Sources/Soto/Services/AccessAnalyzer/AccessAnalyzer_shapes.swift b/Sources/Soto/Services/AccessAnalyzer/AccessAnalyzer_shapes.swift index f52e7d6038..53a0db1a8f 100644 --- a/Sources/Soto/Services/AccessAnalyzer/AccessAnalyzer_shapes.swift +++ b/Sources/Soto/Services/AccessAnalyzer/AccessAnalyzer_shapes.swift @@ -32,6 +32,28 @@ extension AccessAnalyzer { public var description: String { return self.rawValue } } + public enum AccessCheckResourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case awsDynamodbStream = "AWS::DynamoDB::Stream" + case awsDynamodbTable = "AWS::DynamoDB::Table" + case awsEFSFilesystem = "AWS::EFS::FileSystem" + case awsIAMAssumerolepolicydocument = "AWS::IAM::AssumeRolePolicyDocument" + case awsKMSKey = "AWS::KMS::Key" + case awsKinesisStream = "AWS::Kinesis::Stream" + case awsKinesisStreamconsumer = "AWS::Kinesis::StreamConsumer" + case awsLambdaFunction = "AWS::Lambda::Function" + case awsOpensearchserviceDomain = "AWS::OpenSearchService::Domain" + case awsS3Accesspoint = "AWS::S3::AccessPoint" + case awsS3Bucket = "AWS::S3::Bucket" + case awsS3Glacier = "AWS::S3::Glacier" + case awsS3ExpressDirectorybucket = "AWS::S3Express::DirectoryBucket" + case awsS3OutpostsAccesspoint = "AWS::S3Outposts::AccessPoint" + case awsS3OutpostsBucket = "AWS::S3Outposts::Bucket" + case awsSNSTopic = "AWS::SNS::Topic" + case awsSQSQueue = "AWS::SQS::Queue" + case awsSecretsmanagerSecret = "AWS::SecretsManager::Secret" + public var description: String { return self.rawValue } + } + public enum AccessPreviewStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case completed = "COMPLETED" case creating = "CREATING" @@ -74,6 +96,12 @@ extension AccessAnalyzer { public var description: String { return self.rawValue } } + public enum CheckNoPublicAccessResult: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case fail = "FAIL" + case pass = "PASS" + public var description: String { return self.rawValue } + } + public enum FindingChangeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case changed = "CHANGED" case new = "NEW" @@ -180,6 +208,17 @@ extension AccessAnalyzer { public var description: String { return self.rawValue } } + public enum RecommendationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case unusedPermissionRecommendation = "UnusedPermissionRecommendation" + public var description: String { return self.rawValue } + } + + public enum RecommendedRemediationAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case createPolicy = "CREATE_POLICY" + case detachPolicy = "DETACH_POLICY" + public var description: String { return self.rawValue } + } + public enum ResourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case awsDynamodbStream = "AWS::DynamoDB::Stream" case awsDynamodbTable = "AWS::DynamoDB::Table" @@ -200,6 +239,13 @@ extension AccessAnalyzer { public var description: String { return self.rawValue } } + public enum Status: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failed = "FAILED" + case inProgress = "IN_PROGRESS" + case succeeded = "SUCCEEDED" + public var description: String { return self.rawValue } + } + public enum ValidatePolicyFindingType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case error = "ERROR" case securityWarning = "SECURITY_WARNING" @@ -560,14 +606,24 @@ extension AccessAnalyzer { public struct Access: AWSEncodableShape { /// A list of actions for the access permissions. Any strings that can be used as an action in an IAM policy can be used in the list of actions to check. - public let actions: [String] + public let actions: [String]? + /// A list of resources for the access permissions. Any strings that can be used as a resource in an IAM policy can be used in the list of resources to check. + public let resources: [String]? - public init(actions: [String]) { + public init(actions: [String]? = nil, resources: [String]? = nil) { self.actions = actions + self.resources = resources + } + + public func validate(name: String) throws { + try self.resources?.forEach { + try validate($0, name: "resources[]", parent: name, max: 2048) + } } private enum CodingKeys: String, CodingKey { case actions = "actions" + case resources = "resources" } } @@ -922,7 +978,7 @@ extension AccessAnalyzer { } public struct CheckAccessNotGrantedRequest: AWSEncodableShape { - /// An access object containing the permissions that shouldn't be granted by the specified policy. + /// An access object containing the permissions that shouldn't be granted by the specified policy. If only actions are specified, IAM Access Analyzer checks for access of the actions on all resources in the policy. If only resources are specified, then IAM Access Analyzer checks which actions have access to the specified resources. If both actions and resources are specified, then IAM Access Analyzer checks which of the specified actions have access to the specified resources. public let access: [Access] /// The JSON policy document to use as the content for the policy. public let policyDocument: String @@ -935,6 +991,12 @@ extension AccessAnalyzer { self.policyType = policyType } + public func validate(name: String) throws { + try self.access.forEach { + try $0.validate(name: "\(name).access[]") + } + } + private enum CodingKeys: String, CodingKey { case access = "access" case policyDocument = "policyDocument" @@ -1005,6 +1067,44 @@ extension AccessAnalyzer { } } + public struct CheckNoPublicAccessRequest: AWSEncodableShape { + /// The JSON policy document to evaluate for public access. + public let policyDocument: String + /// The type of resource to evaluate for public access. For example, to check for public access to Amazon S3 buckets, you can choose AWS::S3::Bucket for the resource type. For resource types not supported as valid values, IAM Access Analyzer will return an error. + public let resourceType: AccessCheckResourceType + + public init(policyDocument: String, resourceType: AccessCheckResourceType) { + self.policyDocument = policyDocument + self.resourceType = resourceType + } + + private enum CodingKeys: String, CodingKey { + case policyDocument = "policyDocument" + case resourceType = "resourceType" + } + } + + public struct CheckNoPublicAccessResponse: AWSDecodableShape { + /// The message indicating whether the specified policy allows public access to resources. + public let message: String? + /// A list of reasons why the specified resource policy grants public access for the resource type. + public let reasons: [ReasonSummary]? + /// The result of the check for public access to the specified resource type. If the result is PASS, the policy doesn't allow public access to the specified resource type. If the result is FAIL, the policy might allow public access to the specified resource type. + public let result: CheckNoPublicAccessResult? + + public init(message: String? = nil, reasons: [ReasonSummary]? = nil, result: CheckNoPublicAccessResult? = nil) { + self.message = message + self.reasons = reasons + self.result = result + } + + private enum CodingKeys: String, CodingKey { + case message = "message" + case reasons = "reasons" + case result = "result" + } + } + public struct CloudTrailDetails: AWSEncodableShape { /// The ARN of the service role that IAM Access Analyzer uses to access your CloudTrail trail and service last accessed information. public let accessRole: String @@ -1622,6 +1722,31 @@ extension AccessAnalyzer { } } + public struct GenerateFindingRecommendationRequest: AWSEncodableShape { + /// The ARN of the analyzer used to generate the finding recommendation. + public let analyzerArn: String + /// The unique ID for the finding recommendation. + public let id: String + + public init(analyzerArn: String, id: String) { + self.analyzerArn = analyzerArn + self.id = id + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.analyzerArn, key: "analyzerArn") + request.encodePath(self.id, key: "id") + } + + public func validate(name: String) throws { + try self.validate(self.analyzerArn, name: "analyzerArn", parent: name, pattern: "^[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:analyzer/.{1,255}$") + } + + private enum CodingKeys: CodingKey {} + } + public struct GeneratedPolicy: AWSDecodableShape { /// The text to use as the content for the new policy. The policy is created using the CreatePolicy action. public let policy: String @@ -1829,6 +1954,82 @@ extension AccessAnalyzer { } } + public struct GetFindingRecommendationRequest: AWSEncodableShape { + /// The ARN of the analyzer used to generate the finding recommendation. + public let analyzerArn: String + /// The unique ID for the finding recommendation. + public let id: String + /// The maximum number of results to return in the response. + public let maxResults: Int? + /// A token used for pagination of results returned. + public let nextToken: String? + + public init(analyzerArn: String, id: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.analyzerArn = analyzerArn + self.id = id + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.analyzerArn, key: "analyzerArn") + request.encodePath(self.id, key: "id") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.analyzerArn, name: "analyzerArn", parent: name, pattern: "^[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:analyzer/.{1,255}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetFindingRecommendationResponse: AWSDecodableShape { + /// The time at which the retrieval of the finding recommendation was completed. + @OptionalCustomCoding + public var completedAt: Date? + /// Detailed information about the reason that the retrieval of a recommendation for the finding failed. + public let error: RecommendationError? + /// A token used for pagination of results returned. + public let nextToken: String? + /// The type of recommendation for the finding. + public let recommendationType: RecommendationType + /// A group of recommended steps for the finding. + public let recommendedSteps: [RecommendedStep]? + /// The ARN of the resource of the finding. + public let resourceArn: String + /// The time at which the retrieval of the finding recommendation was started. + @CustomCoding + public var startedAt: Date + /// The status of the retrieval of the finding recommendation. + public let status: Status + + public init(completedAt: Date? = nil, error: RecommendationError? = nil, nextToken: String? = nil, recommendationType: RecommendationType, recommendedSteps: [RecommendedStep]? = nil, resourceArn: String, startedAt: Date, status: Status) { + self.completedAt = completedAt + self.error = error + self.nextToken = nextToken + self.recommendationType = recommendationType + self.recommendedSteps = recommendedSteps + self.resourceArn = resourceArn + self.startedAt = startedAt + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case completedAt = "completedAt" + case error = "error" + case nextToken = "nextToken" + case recommendationType = "recommendationType" + case recommendedSteps = "recommendedSteps" + case resourceArn = "resourceArn" + case startedAt = "startedAt" + case status = "status" + } + } + public struct GetFindingRequest: AWSEncodableShape { /// The ARN of the analyzer that generated the finding. public let analyzerArn: String @@ -2723,6 +2924,23 @@ extension AccessAnalyzer { } } + public struct RecommendationError: AWSDecodableShape { + /// The error code for a failed retrieval of a recommendation for a finding. + public let code: String + /// The error message for a failed retrieval of a recommendation for a finding. + public let message: String + + public init(code: String, message: String) { + self.code = code + self.message = message + } + + private enum CodingKeys: String, CodingKey { + case code = "code" + case message = "message" + } + } + public struct S3AccessPointConfiguration: AWSEncodableShape & AWSDecodableShape { /// The access point or multi-region access point policy. public let accessPointPolicy: String? @@ -3181,7 +3399,7 @@ extension AccessAnalyzer { public struct UnusedPermissionDetails: AWSDecodableShape { /// A list of unused actions for which the unused access finding was generated. public let actions: [UnusedAction]? - /// The time at which the permission last accessed. + /// The time at which the permission was last accessed. @OptionalCustomCoding public var lastAccessed: Date? /// The namespace of the Amazon Web Services service that contains the unused actions. @@ -3200,6 +3418,32 @@ extension AccessAnalyzer { } } + public struct UnusedPermissionsRecommendedStep: AWSDecodableShape { + /// If the recommended action for the unused permissions finding is to detach a policy, the ID of an existing policy to be detached. + public let existingPolicyId: String? + /// The time at which the existing policy for the unused permissions finding was last updated. + @OptionalCustomCoding + public var policyUpdatedAt: Date? + /// A recommendation of whether to create or detach a policy for an unused permissions finding. + public let recommendedAction: RecommendedRemediationAction + /// If the recommended action for the unused permissions finding is to replace the existing policy, the contents of the recommended policy to replace the policy specified in the existingPolicyId field. + public let recommendedPolicy: String? + + public init(existingPolicyId: String? = nil, policyUpdatedAt: Date? = nil, recommendedAction: RecommendedRemediationAction, recommendedPolicy: String? = nil) { + self.existingPolicyId = existingPolicyId + self.policyUpdatedAt = policyUpdatedAt + self.recommendedAction = recommendedAction + self.recommendedPolicy = recommendedPolicy + } + + private enum CodingKeys: String, CodingKey { + case existingPolicyId = "existingPolicyId" + case policyUpdatedAt = "policyUpdatedAt" + case recommendedAction = "recommendedAction" + case recommendedPolicy = "recommendedPolicy" + } + } + public struct UpdateArchiveRuleRequest: AWSEncodableShape { /// The name of the analyzer to update the archive rules for. public let analyzerName: String @@ -3421,6 +3665,19 @@ extension AccessAnalyzer { case accountIds = "accountIds" } } + + public struct RecommendedStep: AWSDecodableShape { + /// A recommended step for an unused permissions finding. + public let unusedPermissionsRecommendedStep: UnusedPermissionsRecommendedStep? + + public init(unusedPermissionsRecommendedStep: UnusedPermissionsRecommendedStep? = nil) { + self.unusedPermissionsRecommendedStep = unusedPermissionsRecommendedStep + } + + private enum CodingKeys: String, CodingKey { + case unusedPermissionsRecommendedStep = "unusedPermissionsRecommendedStep" + } + } } // MARK: - Errors diff --git a/Sources/Soto/Services/Account/Account_api.swift b/Sources/Soto/Services/Account/Account_api.swift index 6bb9013806..8ff3c92ab6 100644 --- a/Sources/Soto/Services/Account/Account_api.swift +++ b/Sources/Soto/Services/Account/Account_api.swift @@ -83,6 +83,19 @@ public struct Account: AWSService { // MARK: API Calls + /// Accepts the request that originated from StartPrimaryEmailUpdate to update the primary email address (also known as the root user email address) for the specified account. + @Sendable + public func acceptPrimaryEmailUpdate(_ input: AcceptPrimaryEmailUpdateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AcceptPrimaryEmailUpdateResponse { + return try await self.client.execute( + operation: "AcceptPrimaryEmailUpdate", + path: "/acceptPrimaryEmailUpdate", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes the specified alternate contact from an Amazon Web Services account. For complete details about how to use the alternate contact operations, see Access or updating the alternate contacts. Before you can update the alternate contact information for an Amazon Web Services account that is managed by Organizations, you must first enable integration between Amazon Web Services Account Management and Organizations. For more information, see Enabling trusted access for Amazon Web Services Account Management. @Sendable public func deleteAlternateContact(_ input: DeleteAlternateContactRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -96,7 +109,7 @@ public struct Account: AWSService { ) } - /// Disables (opts-out) a particular Region for an account. + /// Disables (opts-out) a particular Region for an account. The act of disabling a Region will remove all IAM access to any resources that reside in that Region. @Sendable public func disableRegion(_ input: DisableRegionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -148,6 +161,19 @@ public struct Account: AWSService { ) } + /// Retrieves the primary email address for the specified account. + @Sendable + public func getPrimaryEmail(_ input: GetPrimaryEmailRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetPrimaryEmailResponse { + return try await self.client.execute( + operation: "GetPrimaryEmail", + path: "/getPrimaryEmail", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves the opt-in status of a particular Region. @Sendable public func getRegionOptStatus(_ input: GetRegionOptStatusRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetRegionOptStatusResponse { @@ -199,6 +225,19 @@ public struct Account: AWSService { logger: logger ) } + + /// Starts the process to update the primary email address for the specified account. + @Sendable + public func startPrimaryEmailUpdate(_ input: StartPrimaryEmailUpdateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartPrimaryEmailUpdateResponse { + return try await self.client.execute( + operation: "StartPrimaryEmailUpdate", + path: "/startPrimaryEmailUpdate", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } } extension Account { diff --git a/Sources/Soto/Services/Account/Account_shapes.swift b/Sources/Soto/Services/Account/Account_shapes.swift index 8394b534d7..2630fbafe1 100644 --- a/Sources/Soto/Services/Account/Account_shapes.swift +++ b/Sources/Soto/Services/Account/Account_shapes.swift @@ -33,6 +33,12 @@ extension Account { public var description: String { return self.rawValue } } + public enum PrimaryEmailUpdateStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case accepted = "ACCEPTED" + case pending = "PENDING" + public var description: String { return self.rawValue } + } + public enum RegionOptStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case disabling = "DISABLING" @@ -44,6 +50,47 @@ extension Account { // MARK: Shapes + public struct AcceptPrimaryEmailUpdateRequest: AWSEncodableShape { + /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. This operation can only be called from the management account or the delegated administrator account of an organization for a member account. The management account can't specify its own AccountId. + public let accountId: String + /// The OTP code sent to the PrimaryEmail specified on the StartPrimaryEmailUpdate API call. + public let otp: String + /// The new primary email address for use with the specified account. This must match the PrimaryEmail from the StartPrimaryEmailUpdate API call. + public let primaryEmail: String + + public init(accountId: String, otp: String, primaryEmail: String) { + self.accountId = accountId + self.otp = otp + self.primaryEmail = primaryEmail + } + + public func validate(name: String) throws { + try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") + try self.validate(self.otp, name: "otp", parent: name, pattern: "^[a-zA-Z0-9]{6}$") + try self.validate(self.primaryEmail, name: "primaryEmail", parent: name, max: 64) + try self.validate(self.primaryEmail, name: "primaryEmail", parent: name, min: 5) + } + + private enum CodingKeys: String, CodingKey { + case accountId = "AccountId" + case otp = "Otp" + case primaryEmail = "PrimaryEmail" + } + } + + public struct AcceptPrimaryEmailUpdateResponse: AWSDecodableShape { + /// Retrieves the status of the accepted primary email update request. + public let status: PrimaryEmailUpdateStatus? + + public init(status: PrimaryEmailUpdateStatus? = nil) { + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case status = "Status" + } + } + public struct AlternateContact: AWSDecodableShape { /// The type of alternate contact. public let alternateContactType: AlternateContactType? @@ -94,7 +141,7 @@ extension Account { public let phoneNumber: String /// The postal code of the primary contact address. public let postalCode: String - /// The state or region of the primary contact address. This field is required in selected countries. + /// The state or region of the primary contact address. If the mailing address is within the United States (US), the value in this field can be either a two character state code (for example, NJ) or the full state name (for example, New Jersey). This field is required in the following countries: US, CA, GB, DE, JP, IN, and BR. public let stateOrRegion: String? /// The URL of the website associated with the primary contact information, if any. public let websiteUrl: String? @@ -180,7 +227,7 @@ extension Account { } public struct DisableRegionRequest: AWSEncodableShape { - /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. + /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. public let accountId: String? /// Specifies the Region-code for a given Region name (for example, af-south-1). When you disable a Region, Amazon Web Services performs actions to deactivate that Region in your account, such as destroying IAM resources in the Region. This process takes a few minutes for most accounts, but this can take several hours. You cannot enable the Region until the disabling process is fully completed. public let regionName: String @@ -203,7 +250,7 @@ extension Account { } public struct EnableRegionRequest: AWSEncodableShape { - /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. + /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. public let accountId: String? /// Specifies the Region-code for a given Region name (for example, af-south-1). When you enable a Region, Amazon Web Services performs actions to prepare your account in that Region, such as distributing your IAM resources to the Region. This process takes a few minutes for most accounts, but it can take several hours. You cannot use the Region until this process is complete. Furthermore, you cannot disable the Region until the enabling process is fully completed. public let regionName: String @@ -260,7 +307,7 @@ extension Account { } public struct GetContactInformationRequest: AWSEncodableShape { - /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. + /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. public let accountId: String? public init(accountId: String? = nil) { @@ -289,8 +336,38 @@ extension Account { } } + public struct GetPrimaryEmailRequest: AWSEncodableShape { + /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. This operation can only be called from the management account or the delegated administrator account of an organization for a member account. The management account can't specify its own AccountId. + public let accountId: String + + public init(accountId: String) { + self.accountId = accountId + } + + public func validate(name: String) throws { + try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") + } + + private enum CodingKeys: String, CodingKey { + case accountId = "AccountId" + } + } + + public struct GetPrimaryEmailResponse: AWSDecodableShape { + /// Retrieves the primary email address associated with the specified account. + public let primaryEmail: String? + + public init(primaryEmail: String? = nil) { + self.primaryEmail = primaryEmail + } + + private enum CodingKeys: String, CodingKey { + case primaryEmail = "PrimaryEmail" + } + } + public struct GetRegionOptStatusRequest: AWSEncodableShape { - /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. + /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. public let accountId: String? /// Specifies the Region-code for a given Region name (for example, af-south-1). This function will return the status of whatever Region you pass into this parameter. public let regionName: String @@ -330,7 +407,7 @@ extension Account { } public struct ListRegionsRequest: AWSEncodableShape { - /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. + /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. public let accountId: String? /// The total number of items to return in the command’s output. If the total number of items available is more than the value specified, a NextToken is provided in the command’s output. To resume pagination, provide the NextToken value in the starting-token argument of a subsequent command. Do not use the NextToken response element directly outside of the Amazon Web Services CLI. For usage examples, see Pagination in the Amazon Web Services Command Line Interface User Guide. public let maxResults: Int? @@ -423,7 +500,7 @@ extension Account { } public struct PutContactInformationRequest: AWSEncodableShape { - /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must also be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. + /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter. To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify. public let accountId: String? /// Contains the details of the primary contact information associated with an Amazon Web Services account. public let contactInformation: ContactInformation @@ -460,6 +537,42 @@ extension Account { case regionOptStatus = "RegionOptStatus" } } + + public struct StartPrimaryEmailUpdateRequest: AWSEncodableShape { + /// Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned. This operation can only be called from the management account or the delegated administrator account of an organization for a member account. The management account can't specify its own AccountId. + public let accountId: String + /// The new primary email address (also known as the root user email address) to use in the specified account. + public let primaryEmail: String + + public init(accountId: String, primaryEmail: String) { + self.accountId = accountId + self.primaryEmail = primaryEmail + } + + public func validate(name: String) throws { + try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") + try self.validate(self.primaryEmail, name: "primaryEmail", parent: name, max: 64) + try self.validate(self.primaryEmail, name: "primaryEmail", parent: name, min: 5) + } + + private enum CodingKeys: String, CodingKey { + case accountId = "AccountId" + case primaryEmail = "PrimaryEmail" + } + } + + public struct StartPrimaryEmailUpdateResponse: AWSDecodableShape { + /// The status of the primary email update request. + public let status: PrimaryEmailUpdateStatus? + + public init(status: PrimaryEmailUpdateStatus? = nil) { + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case status = "Status" + } + } } // MARK: - Errors diff --git a/Sources/Soto/Services/Amplify/Amplify_api.swift b/Sources/Soto/Services/Amplify/Amplify_api.swift index 69bed4d30a..3e1c2d8a35 100644 --- a/Sources/Soto/Services/Amplify/Amplify_api.swift +++ b/Sources/Soto/Services/Amplify/Amplify_api.swift @@ -87,7 +87,7 @@ public struct Amplify: AWSService { ) } - /// Creates a new backend environment for an Amplify app. This API is available only to Amplify Gen 1 applications where the backend is created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to applications created using the Amplify Gen 2 public preview. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code. + /// Creates a new backend environment for an Amplify app. This API is available only to Amplify Gen 1 applications where the backend is created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code. @Sendable public func createBackendEnvironment(_ input: CreateBackendEnvironmentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateBackendEnvironmentResult { return try await self.client.execute( @@ -165,7 +165,7 @@ public struct Amplify: AWSService { ) } - /// Deletes a backend environment for an Amplify app. This API is available only to Amplify Gen 1 applications where the backend was created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to applications created using the Amplify Gen 2 public preview. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code. + /// Deletes a backend environment for an Amplify app. This API is available only to Amplify Gen 1 applications where the backend is created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code. @Sendable public func deleteBackendEnvironment(_ input: DeleteBackendEnvironmentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteBackendEnvironmentResult { return try await self.client.execute( @@ -269,7 +269,7 @@ public struct Amplify: AWSService { ) } - /// Returns a backend environment for an Amplify app. This API is available only to Amplify Gen 1 applications where the backend was created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to applications created using the Amplify Gen 2 public preview. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code. + /// Returns a backend environment for an Amplify app. This API is available only to Amplify Gen 1 applications where the backend is created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code. @Sendable public func getBackendEnvironment(_ input: GetBackendEnvironmentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetBackendEnvironmentResult { return try await self.client.execute( @@ -360,7 +360,7 @@ public struct Amplify: AWSService { ) } - /// Lists the backend environments for an Amplify app. This API is available only to Amplify Gen 1 applications where the backend was created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to applications created using the Amplify Gen 2 public preview. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code. + /// Lists the backend environments for an Amplify app. This API is available only to Amplify Gen 1 applications where the backend is created using Amplify Studio or the Amplify command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code. @Sendable public func listBackendEnvironments(_ input: ListBackendEnvironmentsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListBackendEnvironmentsResult { return try await self.client.execute( diff --git a/Sources/Soto/Services/Amplify/Amplify_shapes.swift b/Sources/Soto/Services/Amplify/Amplify_shapes.swift index eb3182cef2..670f3e10b3 100644 --- a/Sources/Soto/Services/Amplify/Amplify_shapes.swift +++ b/Sources/Soto/Services/Amplify/Amplify_shapes.swift @@ -348,7 +348,7 @@ extension Amplify { /// A list of custom resources that are linked to this branch. public let associatedResources: [String]? public let backend: Backend? - /// The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app. + /// The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app. This property is available to Amplify Gen 1 apps only. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code. public let backendEnvironmentArn: String? /// The basic authorization credentials for a branch of an Amplify app. You must base64-encode the authorization credentials and provide them in the format user:password. public let basicAuthCredentials: String? @@ -713,9 +713,9 @@ extension Amplify { public struct CreateBranchRequest: AWSEncodableShape { /// The unique ID for an Amplify app. public let appId: String - /// The backend for a Branch of an Amplify app. Use for a backend created from an CloudFormation stack. + /// The backend for a Branch of an Amplify app. Use for a backend created from an CloudFormation stack. This field is available to Amplify Gen 2 apps only. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code. public let backend: Backend? - /// The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app. + /// The Amazon Resource Name (ARN) for a backend environment that is part of a Gen 1 Amplify app. This field is available to Amplify Gen 1 apps only where the backend is created using Amplify Studio or the Amplify command line interface (CLI). public let backendEnvironmentArn: String? /// The basic authorization credentials for the branch. You must base64-encode the authorization credentials and provide them in the format user:password. public let basicAuthCredentials: String? @@ -2780,9 +2780,9 @@ extension Amplify { public struct UpdateBranchRequest: AWSEncodableShape { /// The unique ID for an Amplify app. public let appId: String - /// The backend for a Branch of an Amplify app. Use for a backend created from an CloudFormation stack. + /// The backend for a Branch of an Amplify app. Use for a backend created from an CloudFormation stack. This field is available to Amplify Gen 2 apps only. When you deploy an application with Amplify Gen 2, you provision the app's backend infrastructure using Typescript code. public let backend: Backend? - /// The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app. + /// The Amazon Resource Name (ARN) for a backend environment that is part of a Gen 1 Amplify app. This field is available to Amplify Gen 1 apps only where the backend is created using Amplify Studio or the Amplify command line interface (CLI). public let backendEnvironmentArn: String? /// The basic authorization credentials for the branch. You must base64-encode the authorization credentials and provide them in the format user:password. public let basicAuthCredentials: String? diff --git a/Sources/Soto/Services/AppTest/AppTest_api.swift b/Sources/Soto/Services/AppTest/AppTest_api.swift new file mode 100644 index 0000000000..b22b77d1d0 --- /dev/null +++ b/Sources/Soto/Services/AppTest/AppTest_api.swift @@ -0,0 +1,578 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +@_exported import SotoCore + +/// Service object for interacting with AWS AppTest service. +/// +/// AWS Mainframe Modernization Application Testing provides tools and resources for automated functional equivalence testing for your migration projects. +public struct AppTest: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the AppTest client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded + /// - timeout: Timeout value for HTTP requests + /// - byteBufferAllocator: Allocator for ByteBuffers + /// - options: Service options + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + middleware: AWSMiddlewareProtocol? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + serviceName: "AppTest", + serviceIdentifier: "apptest", + serviceProtocol: .restjson, + apiVersion: "2022-12-06", + endpoint: endpoint, + errorType: AppTestErrorType.self, + middleware: middleware, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + + + + + // MARK: API Calls + + /// Creates a test case. + @Sendable + public func createTestCase(_ input: CreateTestCaseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTestCaseResponse { + return try await self.client.execute( + operation: "CreateTestCase", + path: "/testcase", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a test configuration. + @Sendable + public func createTestConfiguration(_ input: CreateTestConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTestConfigurationResponse { + return try await self.client.execute( + operation: "CreateTestConfiguration", + path: "/testconfiguration", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a test suite. + @Sendable + public func createTestSuite(_ input: CreateTestSuiteRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTestSuiteResponse { + return try await self.client.execute( + operation: "CreateTestSuite", + path: "/testsuite", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes a test case. + @Sendable + public func deleteTestCase(_ input: DeleteTestCaseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTestCaseResponse { + return try await self.client.execute( + operation: "DeleteTestCase", + path: "/testcases/{testCaseId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes a test configuration. + @Sendable + public func deleteTestConfiguration(_ input: DeleteTestConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTestConfigurationResponse { + return try await self.client.execute( + operation: "DeleteTestConfiguration", + path: "/testconfigurations/{testConfigurationId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes a test run. + @Sendable + public func deleteTestRun(_ input: DeleteTestRunRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTestRunResponse { + return try await self.client.execute( + operation: "DeleteTestRun", + path: "/testruns/{testRunId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes a test suite. + @Sendable + public func deleteTestSuite(_ input: DeleteTestSuiteRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTestSuiteResponse { + return try await self.client.execute( + operation: "DeleteTestSuite", + path: "/testsuites/{testSuiteId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Gets a test case. + @Sendable + public func getTestCase(_ input: GetTestCaseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTestCaseResponse { + return try await self.client.execute( + operation: "GetTestCase", + path: "/testcases/{testCaseId}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Gets a test configuration. + @Sendable + public func getTestConfiguration(_ input: GetTestConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTestConfigurationResponse { + return try await self.client.execute( + operation: "GetTestConfiguration", + path: "/testconfigurations/{testConfigurationId}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Gets a test run step. + @Sendable + public func getTestRunStep(_ input: GetTestRunStepRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTestRunStepResponse { + return try await self.client.execute( + operation: "GetTestRunStep", + path: "/testruns/{testRunId}/steps/{stepName}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Gets a test suite. + @Sendable + public func getTestSuite(_ input: GetTestSuiteRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTestSuiteResponse { + return try await self.client.execute( + operation: "GetTestSuite", + path: "/testsuites/{testSuiteId}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists tags for a resource. + @Sendable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + return try await self.client.execute( + operation: "ListTagsForResource", + path: "/tags/{resourceArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists test cases. + @Sendable + public func listTestCases(_ input: ListTestCasesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTestCasesResponse { + return try await self.client.execute( + operation: "ListTestCases", + path: "/testcases", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists test configurations. + @Sendable + public func listTestConfigurations(_ input: ListTestConfigurationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTestConfigurationsResponse { + return try await self.client.execute( + operation: "ListTestConfigurations", + path: "/testconfigurations", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists test run steps. + @Sendable + public func listTestRunSteps(_ input: ListTestRunStepsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTestRunStepsResponse { + return try await self.client.execute( + operation: "ListTestRunSteps", + path: "/testruns/{testRunId}/steps", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists test run test cases. + @Sendable + public func listTestRunTestCases(_ input: ListTestRunTestCasesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTestRunTestCasesResponse { + return try await self.client.execute( + operation: "ListTestRunTestCases", + path: "/testruns/{testRunId}/testcases", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists test runs. + @Sendable + public func listTestRuns(_ input: ListTestRunsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTestRunsResponse { + return try await self.client.execute( + operation: "ListTestRuns", + path: "/testruns", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists test suites. + @Sendable + public func listTestSuites(_ input: ListTestSuitesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTestSuitesResponse { + return try await self.client.execute( + operation: "ListTestSuites", + path: "/testsuites", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Starts a test run. + @Sendable + public func startTestRun(_ input: StartTestRunRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartTestRunResponse { + return try await self.client.execute( + operation: "StartTestRun", + path: "/testrun", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Specifies tags of a resource. + @Sendable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + return try await self.client.execute( + operation: "TagResource", + path: "/tags/{resourceArn}", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Untags a resource. + @Sendable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + return try await self.client.execute( + operation: "UntagResource", + path: "/tags/{resourceArn}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates a test case. + @Sendable + public func updateTestCase(_ input: UpdateTestCaseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateTestCaseResponse { + return try await self.client.execute( + operation: "UpdateTestCase", + path: "/testcases/{testCaseId}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates a test configuration. + @Sendable + public func updateTestConfiguration(_ input: UpdateTestConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateTestConfigurationResponse { + return try await self.client.execute( + operation: "UpdateTestConfiguration", + path: "/testconfigurations/{testConfigurationId}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates a test suite. + @Sendable + public func updateTestSuite(_ input: UpdateTestSuiteRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateTestSuiteResponse { + return try await self.client.execute( + operation: "UpdateTestSuite", + path: "/testsuites/{testSuiteId}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } +} + +extension AppTest { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: AppTest, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension AppTest { + /// Lists test cases. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listTestCasesPaginator( + _ input: ListTestCasesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listTestCases, + inputKey: \ListTestCasesRequest.nextToken, + outputKey: \ListTestCasesResponse.nextToken, + logger: logger + ) + } + + /// Lists test configurations. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listTestConfigurationsPaginator( + _ input: ListTestConfigurationsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listTestConfigurations, + inputKey: \ListTestConfigurationsRequest.nextToken, + outputKey: \ListTestConfigurationsResponse.nextToken, + logger: logger + ) + } + + /// Lists test run steps. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listTestRunStepsPaginator( + _ input: ListTestRunStepsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listTestRunSteps, + inputKey: \ListTestRunStepsRequest.nextToken, + outputKey: \ListTestRunStepsResponse.nextToken, + logger: logger + ) + } + + /// Lists test run test cases. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listTestRunTestCasesPaginator( + _ input: ListTestRunTestCasesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listTestRunTestCases, + inputKey: \ListTestRunTestCasesRequest.nextToken, + outputKey: \ListTestRunTestCasesResponse.nextToken, + logger: logger + ) + } + + /// Lists test runs. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listTestRunsPaginator( + _ input: ListTestRunsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listTestRuns, + inputKey: \ListTestRunsRequest.nextToken, + outputKey: \ListTestRunsResponse.nextToken, + logger: logger + ) + } + + /// Lists test suites. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listTestSuitesPaginator( + _ input: ListTestSuitesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listTestSuites, + inputKey: \ListTestSuitesRequest.nextToken, + outputKey: \ListTestSuitesResponse.nextToken, + logger: logger + ) + } +} + +extension AppTest.ListTestCasesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppTest.ListTestCasesRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + testCaseIds: self.testCaseIds + ) + } +} + +extension AppTest.ListTestConfigurationsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppTest.ListTestConfigurationsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + testConfigurationIds: self.testConfigurationIds + ) + } +} + +extension AppTest.ListTestRunStepsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppTest.ListTestRunStepsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + testCaseId: self.testCaseId, + testRunId: self.testRunId, + testSuiteId: self.testSuiteId + ) + } +} + +extension AppTest.ListTestRunTestCasesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppTest.ListTestRunTestCasesRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + testRunId: self.testRunId + ) + } +} + +extension AppTest.ListTestRunsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppTest.ListTestRunsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + testRunIds: self.testRunIds, + testSuiteId: self.testSuiteId + ) + } +} + +extension AppTest.ListTestSuitesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> AppTest.ListTestSuitesRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + testSuiteIds: self.testSuiteIds + ) + } +} diff --git a/Sources/Soto/Services/AppTest/AppTest_shapes.swift b/Sources/Soto/Services/AppTest/AppTest_shapes.swift new file mode 100644 index 0000000000..56a1711180 --- /dev/null +++ b/Sources/Soto/Services/AppTest/AppTest_shapes.swift @@ -0,0 +1,3450 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_spi(SotoInternal) import SotoCore + +extension AppTest { + // MARK: Enums + + public enum CaptureTool: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case awsDms = "AWS DMS" + case precisely = "Precisely" + public var description: String { return self.rawValue } + } + + public enum CloudFormationActionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case create = "Create" + case delete = "Delete" + public var description: String { return self.rawValue } + } + + public enum ComparisonStatusEnum: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case different = "Different" + case equal = "Equal" + case equivalent = "Equivalent" + public var description: String { return self.rawValue } + } + + public enum DataSetType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case ps = "PS" + public var description: String { return self.rawValue } + } + + public enum Format: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case fixed = "FIXED" + case lineSequential = "LINE_SEQUENTIAL" + case variable = "VARIABLE" + public var description: String { return self.rawValue } + } + + public enum M2ManagedActionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case configure = "Configure" + case deconfigure = "Deconfigure" + public var description: String { return self.rawValue } + } + + public enum M2ManagedRuntime: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case microfocus = "MicroFocus" + public var description: String { return self.rawValue } + } + + public enum M2NonManagedActionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case configure = "Configure" + case deconfigure = "Deconfigure" + public var description: String { return self.rawValue } + } + + public enum M2NonManagedRuntime: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case bluage = "BluAge" + public var description: String { return self.rawValue } + } + + public enum ScriptType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case selenium = "Selenium" + public var description: String { return self.rawValue } + } + + public enum SourceDatabase: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case zOsDb2 = "z/OS-DB2" + public var description: String { return self.rawValue } + } + + public enum StepRunStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failed = "Failed" + case running = "Running" + case success = "Success" + public var description: String { return self.rawValue } + } + + public enum TargetDatabase: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case postgresql = "PostgreSQL" + public var description: String { return self.rawValue } + } + + public enum TestCaseLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "Active" + case deleting = "Deleting" + public var description: String { return self.rawValue } + } + + public enum TestCaseRunStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failed = "Failed" + case running = "Running" + case success = "Success" + public var description: String { return self.rawValue } + } + + public enum TestConfigurationLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "Active" + case deleting = "Deleting" + public var description: String { return self.rawValue } + } + + public enum TestRunStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case deleting = "Deleting" + case failed = "Failed" + case running = "Running" + case success = "Success" + public var description: String { return self.rawValue } + } + + public enum TestSuiteLifecycle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "Active" + case creating = "Creating" + case deleting = "Deleting" + case failed = "Failed" + case updating = "Updating" + public var description: String { return self.rawValue } + } + + public enum CloudFormationStepSummary: AWSDecodableShape, Sendable { + /// Creates the CloudFormation summary of the step. + case createCloudformation(CreateCloudFormationSummary) + /// Deletes the CloudFormation summary of the CloudFormation step summary. + case deleteCloudformation(DeleteCloudFormationSummary) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .createCloudformation: + let value = try container.decode(CreateCloudFormationSummary.self, forKey: .createCloudformation) + self = .createCloudformation(value) + case .deleteCloudformation: + let value = try container.decode(DeleteCloudFormationSummary.self, forKey: .deleteCloudformation) + self = .deleteCloudformation(value) + } + } + + private enum CodingKeys: String, CodingKey { + case createCloudformation = "createCloudformation" + case deleteCloudformation = "deleteCloudformation" + } + } + + public enum CompareFileType: AWSDecodableShape, Sendable { + /// The database CDC of the compare file type. + case databaseCDC(CompareDatabaseCDCSummary) + /// The data sets in the compare file type. + case datasets(CompareDataSetsSummary) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .databaseCDC: + let value = try container.decode(CompareDatabaseCDCSummary.self, forKey: .databaseCDC) + self = .databaseCDC(value) + case .datasets: + let value = try container.decode(CompareDataSetsSummary.self, forKey: .datasets) + self = .datasets(value) + } + } + + private enum CodingKeys: String, CodingKey { + case databaseCDC = "databaseCDC" + case datasets = "datasets" + } + } + + public enum FileMetadata: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The database CDC of the file metadata. + case databaseCDC(DatabaseCDC) + /// The data sets of the file metadata. + case dataSets([DataSet]) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .databaseCDC: + let value = try container.decode(DatabaseCDC.self, forKey: .databaseCDC) + self = .databaseCDC(value) + case .dataSets: + let value = try container.decode([DataSet].self, forKey: .dataSets) + self = .dataSets(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .databaseCDC(let value): + try container.encode(value, forKey: .databaseCDC) + case .dataSets(let value): + try container.encode(value, forKey: .dataSets) + } + } + + public func validate(name: String) throws { + switch self { + case .dataSets(let value): + try value.forEach { + try $0.validate(name: "\(name).dataSets[]") + } + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case databaseCDC = "databaseCDC" + case dataSets = "dataSets" + } + } + + public enum MainframeActionSummary: AWSDecodableShape, Sendable { + /// The batch of the mainframe action summary. + case batch(BatchSummary) + /// The tn3270 port of the mainframe action summary. + case tn3270(TN3270Summary) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .batch: + let value = try container.decode(BatchSummary.self, forKey: .batch) + self = .batch(value) + case .tn3270: + let value = try container.decode(TN3270Summary.self, forKey: .tn3270) + self = .tn3270(value) + } + } + + private enum CodingKeys: String, CodingKey { + case batch = "batch" + case tn3270 = "tn3270" + } + } + + public enum MainframeActionType: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The batch of the mainframe action type. + case batch(Batch) + /// The tn3270 port of the mainframe action type. + case tn3270(TN3270) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .batch: + let value = try container.decode(Batch.self, forKey: .batch) + self = .batch(value) + case .tn3270: + let value = try container.decode(TN3270.self, forKey: .tn3270) + self = .tn3270(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .batch(let value): + try container.encode(value, forKey: .batch) + case .tn3270(let value): + try container.encode(value, forKey: .tn3270) + } + } + + public func validate(name: String) throws { + switch self { + case .batch(let value): + try value.validate(name: "\(name).batch") + case .tn3270(let value): + try value.validate(name: "\(name).tn3270") + } + } + + private enum CodingKeys: String, CodingKey { + case batch = "batch" + case tn3270 = "tn3270" + } + } + + public enum MainframeResourceSummary: AWSDecodableShape, Sendable { + /// The AWS Mainframe Modernization managed application in the mainframe resource summary. + case m2ManagedApplication(M2ManagedApplicationSummary) + /// The AWS Mainframe Modernization non-managed application in the mainframe resource summary. + case m2NonManagedApplication(M2NonManagedApplicationSummary) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .m2ManagedApplication: + let value = try container.decode(M2ManagedApplicationSummary.self, forKey: .m2ManagedApplication) + self = .m2ManagedApplication(value) + case .m2NonManagedApplication: + let value = try container.decode(M2NonManagedApplicationSummary.self, forKey: .m2NonManagedApplication) + self = .m2NonManagedApplication(value) + } + } + + private enum CodingKeys: String, CodingKey { + case m2ManagedApplication = "m2ManagedApplication" + case m2NonManagedApplication = "m2NonManagedApplication" + } + } + + public enum ResourceAction: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The CloudFormation action of the resource action. + case cloudFormationAction(CloudFormationAction) + /// The AWS Mainframe Modernization managed application action of the resource action. + case m2ManagedApplicationAction(M2ManagedApplicationAction) + /// The AWS Mainframe Modernization non-managed application action of the resource action. + case m2NonManagedApplicationAction(M2NonManagedApplicationAction) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .cloudFormationAction: + let value = try container.decode(CloudFormationAction.self, forKey: .cloudFormationAction) + self = .cloudFormationAction(value) + case .m2ManagedApplicationAction: + let value = try container.decode(M2ManagedApplicationAction.self, forKey: .m2ManagedApplicationAction) + self = .m2ManagedApplicationAction(value) + case .m2NonManagedApplicationAction: + let value = try container.decode(M2NonManagedApplicationAction.self, forKey: .m2NonManagedApplicationAction) + self = .m2NonManagedApplicationAction(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .cloudFormationAction(let value): + try container.encode(value, forKey: .cloudFormationAction) + case .m2ManagedApplicationAction(let value): + try container.encode(value, forKey: .m2ManagedApplicationAction) + case .m2NonManagedApplicationAction(let value): + try container.encode(value, forKey: .m2NonManagedApplicationAction) + } + } + + public func validate(name: String) throws { + switch self { + case .cloudFormationAction(let value): + try value.validate(name: "\(name).cloudFormationAction") + case .m2ManagedApplicationAction(let value): + try value.validate(name: "\(name).m2ManagedApplicationAction") + case .m2NonManagedApplicationAction(let value): + try value.validate(name: "\(name).m2NonManagedApplicationAction") + } + } + + private enum CodingKeys: String, CodingKey { + case cloudFormationAction = "cloudFormationAction" + case m2ManagedApplicationAction = "m2ManagedApplicationAction" + case m2NonManagedApplicationAction = "m2NonManagedApplicationAction" + } + } + + public enum ResourceActionSummary: AWSDecodableShape, Sendable { + /// The CloudFormation template of the resource action summary. + case cloudFormation(CloudFormationStepSummary) + /// The AWS Mainframe Modernization managed application of the resource action summary. + case m2ManagedApplication(M2ManagedApplicationStepSummary) + /// The AWS Mainframe Modernization non-managed application of the resource action summary. + case m2NonManagedApplication(M2NonManagedApplicationStepSummary) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .cloudFormation: + let value = try container.decode(CloudFormationStepSummary.self, forKey: .cloudFormation) + self = .cloudFormation(value) + case .m2ManagedApplication: + let value = try container.decode(M2ManagedApplicationStepSummary.self, forKey: .m2ManagedApplication) + self = .m2ManagedApplication(value) + case .m2NonManagedApplication: + let value = try container.decode(M2NonManagedApplicationStepSummary.self, forKey: .m2NonManagedApplication) + self = .m2NonManagedApplication(value) + } + } + + private enum CodingKeys: String, CodingKey { + case cloudFormation = "cloudFormation" + case m2ManagedApplication = "m2ManagedApplication" + case m2NonManagedApplication = "m2NonManagedApplication" + } + } + + public enum ResourceType: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The CloudFormation template of the resource type. + case cloudFormation(CloudFormation) + /// The AWS Mainframe Modernization managed application of the resource type. + case m2ManagedApplication(M2ManagedApplication) + /// The AWS Mainframe Modernization non-managed application of the resource type. + case m2NonManagedApplication(M2NonManagedApplication) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .cloudFormation: + let value = try container.decode(CloudFormation.self, forKey: .cloudFormation) + self = .cloudFormation(value) + case .m2ManagedApplication: + let value = try container.decode(M2ManagedApplication.self, forKey: .m2ManagedApplication) + self = .m2ManagedApplication(value) + case .m2NonManagedApplication: + let value = try container.decode(M2NonManagedApplication.self, forKey: .m2NonManagedApplication) + self = .m2NonManagedApplication(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .cloudFormation(let value): + try container.encode(value, forKey: .cloudFormation) + case .m2ManagedApplication(let value): + try container.encode(value, forKey: .m2ManagedApplication) + case .m2NonManagedApplication(let value): + try container.encode(value, forKey: .m2NonManagedApplication) + } + } + + public func validate(name: String) throws { + switch self { + case .cloudFormation(let value): + try value.validate(name: "\(name).cloudFormation") + case .m2ManagedApplication(let value): + try value.validate(name: "\(name).m2ManagedApplication") + case .m2NonManagedApplication(let value): + try value.validate(name: "\(name).m2NonManagedApplication") + } + } + + private enum CodingKeys: String, CodingKey { + case cloudFormation = "cloudFormation" + case m2ManagedApplication = "m2ManagedApplication" + case m2NonManagedApplication = "m2NonManagedApplication" + } + } + + public enum StepAction: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The compare action of the step action. + case compareAction(CompareAction) + /// The mainframe action of the step action. + case mainframeAction(MainframeAction) + /// The resource action of the step action. + case resourceAction(ResourceAction) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .compareAction: + let value = try container.decode(CompareAction.self, forKey: .compareAction) + self = .compareAction(value) + case .mainframeAction: + let value = try container.decode(MainframeAction.self, forKey: .mainframeAction) + self = .mainframeAction(value) + case .resourceAction: + let value = try container.decode(ResourceAction.self, forKey: .resourceAction) + self = .resourceAction(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .compareAction(let value): + try container.encode(value, forKey: .compareAction) + case .mainframeAction(let value): + try container.encode(value, forKey: .mainframeAction) + case .resourceAction(let value): + try container.encode(value, forKey: .resourceAction) + } + } + + public func validate(name: String) throws { + switch self { + case .compareAction(let value): + try value.validate(name: "\(name).compareAction") + case .mainframeAction(let value): + try value.validate(name: "\(name).mainframeAction") + case .resourceAction(let value): + try value.validate(name: "\(name).resourceAction") + } + } + + private enum CodingKeys: String, CodingKey { + case compareAction = "compareAction" + case mainframeAction = "mainframeAction" + case resourceAction = "resourceAction" + } + } + + public enum StepRunSummary: AWSDecodableShape, Sendable { + /// The compare action of the step run summary. + case compareAction(CompareActionSummary) + /// The mainframe action of the step run summary. + case mainframeAction(MainframeActionSummary) + /// The resource action of the step run summary. + case resourceAction(ResourceActionSummary) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .compareAction: + let value = try container.decode(CompareActionSummary.self, forKey: .compareAction) + self = .compareAction(value) + case .mainframeAction: + let value = try container.decode(MainframeActionSummary.self, forKey: .mainframeAction) + self = .mainframeAction(value) + case .resourceAction: + let value = try container.decode(ResourceActionSummary.self, forKey: .resourceAction) + self = .resourceAction(value) + } + } + + private enum CodingKeys: String, CodingKey { + case compareAction = "compareAction" + case mainframeAction = "mainframeAction" + case resourceAction = "resourceAction" + } + } + + // MARK: Shapes + + public struct Batch: AWSEncodableShape & AWSDecodableShape { + /// The job name of the batch. + public let batchJobName: String + /// The batch job parameters of the batch. + public let batchJobParameters: [String: String]? + /// The export data set names of the batch. + public let exportDataSetNames: [String]? + + public init(batchJobName: String, batchJobParameters: [String: String]? = nil, exportDataSetNames: [String]? = nil) { + self.batchJobName = batchJobName + self.batchJobParameters = batchJobParameters + self.exportDataSetNames = exportDataSetNames + } + + public func validate(name: String) throws { + try self.validate(self.batchJobName, name: "batchJobName", parent: name, pattern: "^\\S{1,1000}$") + try self.exportDataSetNames?.forEach { + try validate($0, name: "exportDataSetNames[]", parent: name, pattern: "^\\S{1,100}$") + } + } + + private enum CodingKeys: String, CodingKey { + case batchJobName = "batchJobName" + case batchJobParameters = "batchJobParameters" + case exportDataSetNames = "exportDataSetNames" + } + } + + public struct BatchStepInput: AWSDecodableShape { + /// The batch job name of the batch step input. + public let batchJobName: String + /// The batch job parameters of the batch step input. + public let batchJobParameters: [String: String]? + /// The export data set names of the batch step input. + public let exportDataSetNames: [String]? + /// The properties of the batch step input. + public let properties: MainframeActionProperties? + /// The resource of the batch step input. + public let resource: MainframeResourceSummary + + public init(batchJobName: String, batchJobParameters: [String: String]? = nil, exportDataSetNames: [String]? = nil, properties: MainframeActionProperties? = nil, resource: MainframeResourceSummary) { + self.batchJobName = batchJobName + self.batchJobParameters = batchJobParameters + self.exportDataSetNames = exportDataSetNames + self.properties = properties + self.resource = resource + } + + private enum CodingKeys: String, CodingKey { + case batchJobName = "batchJobName" + case batchJobParameters = "batchJobParameters" + case exportDataSetNames = "exportDataSetNames" + case properties = "properties" + case resource = "resource" + } + } + + public struct BatchStepOutput: AWSDecodableShape { + /// The data set details of the batch step output. + public let dataSetDetails: [DataSet]? + /// The data set export location of the batch step output. + public let dataSetExportLocation: String? + /// The Database Migration Service (DMS) output location of the batch step output. + public let dmsOutputLocation: String? + + public init(dataSetDetails: [DataSet]? = nil, dataSetExportLocation: String? = nil, dmsOutputLocation: String? = nil) { + self.dataSetDetails = dataSetDetails + self.dataSetExportLocation = dataSetExportLocation + self.dmsOutputLocation = dmsOutputLocation + } + + private enum CodingKeys: String, CodingKey { + case dataSetDetails = "dataSetDetails" + case dataSetExportLocation = "dataSetExportLocation" + case dmsOutputLocation = "dmsOutputLocation" + } + } + + public struct BatchSummary: AWSDecodableShape { + /// The step input of the batch summary. + public let stepInput: BatchStepInput + /// The step output of the batch summary. + public let stepOutput: BatchStepOutput? + + public init(stepInput: BatchStepInput, stepOutput: BatchStepOutput? = nil) { + self.stepInput = stepInput + self.stepOutput = stepOutput + } + + private enum CodingKeys: String, CodingKey { + case stepInput = "stepInput" + case stepOutput = "stepOutput" + } + } + + public struct CloudFormation: AWSEncodableShape & AWSDecodableShape { + /// The CloudFormation properties in the CloudFormation template. + public let parameters: [String: String]? + /// The template location of the CloudFormation template. + public let templateLocation: String + + public init(parameters: [String: String]? = nil, templateLocation: String) { + self.parameters = parameters + self.templateLocation = templateLocation + } + + public func validate(name: String) throws { + try self.validate(self.templateLocation, name: "templateLocation", parent: name, max: 1024) + } + + private enum CodingKeys: String, CodingKey { + case parameters = "parameters" + case templateLocation = "templateLocation" + } + } + + public struct CloudFormationAction: AWSEncodableShape & AWSDecodableShape { + /// The action type of the CloudFormation action. + public let actionType: CloudFormationActionType? + /// The resource of the CloudFormation action. + public let resource: String + + public init(actionType: CloudFormationActionType? = nil, resource: String) { + self.actionType = actionType + self.resource = resource + } + + public func validate(name: String) throws { + try self.validate(self.resource, name: "resource", parent: name, pattern: "^\\S{1,1000}$") + } + + private enum CodingKeys: String, CodingKey { + case actionType = "actionType" + case resource = "resource" + } + } + + public struct CompareAction: AWSEncodableShape & AWSDecodableShape { + /// The input of the compare action. + public let input: Input + /// The output of the compare action. + public let output: Output? + + public init(input: Input, output: Output? = nil) { + self.input = input + self.output = output + } + + public func validate(name: String) throws { + try self.input.validate(name: "\(name).input") + try self.output?.validate(name: "\(name).output") + } + + private enum CodingKeys: String, CodingKey { + case input = "input" + case output = "output" + } + } + + public struct CompareActionSummary: AWSDecodableShape { + /// The type of the compare action summary. + public let type: File + + public init(type: File) { + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case type = "type" + } + } + + public struct CompareDataSetsStepInput: AWSDecodableShape { + /// The source data sets of the compare data sets step input location. + public let sourceDataSets: [DataSet] + /// The source location of the compare data sets step input location. + public let sourceLocation: String + /// The target data sets of the compare data sets step input location. + public let targetDataSets: [DataSet] + /// The target location of the compare data sets step input location. + public let targetLocation: String + + public init(sourceDataSets: [DataSet], sourceLocation: String, targetDataSets: [DataSet], targetLocation: String) { + self.sourceDataSets = sourceDataSets + self.sourceLocation = sourceLocation + self.targetDataSets = targetDataSets + self.targetLocation = targetLocation + } + + private enum CodingKeys: String, CodingKey { + case sourceDataSets = "sourceDataSets" + case sourceLocation = "sourceLocation" + case targetDataSets = "targetDataSets" + case targetLocation = "targetLocation" + } + } + + public struct CompareDataSetsStepOutput: AWSDecodableShape { + /// The comparison output location of the compare data sets step output. + public let comparisonOutputLocation: String + /// The comparison status of the compare data sets step output. + public let comparisonStatus: ComparisonStatusEnum + + public init(comparisonOutputLocation: String, comparisonStatus: ComparisonStatusEnum) { + self.comparisonOutputLocation = comparisonOutputLocation + self.comparisonStatus = comparisonStatus + } + + private enum CodingKeys: String, CodingKey { + case comparisonOutputLocation = "comparisonOutputLocation" + case comparisonStatus = "comparisonStatus" + } + } + + public struct CompareDataSetsSummary: AWSDecodableShape { + /// The step input of the compare data sets summary. + public let stepInput: CompareDataSetsStepInput + /// The step output of the compare data sets summary. + public let stepOutput: CompareDataSetsStepOutput? + + public init(stepInput: CompareDataSetsStepInput, stepOutput: CompareDataSetsStepOutput? = nil) { + self.stepInput = stepInput + self.stepOutput = stepOutput + } + + private enum CodingKeys: String, CodingKey { + case stepInput = "stepInput" + case stepOutput = "stepOutput" + } + } + + public struct CompareDatabaseCDCStepInput: AWSDecodableShape { + /// The output location of the compare database CDC step input. + public let outputLocation: String? + /// The source location of the compare database CDC step input. + public let sourceLocation: String + /// The source metadata of the compare database CDC step input. + public let sourceMetadata: SourceDatabaseMetadata + /// The target location of the compare database CDC step input. + public let targetLocation: String + /// The target metadata location of the compare database CDC step input. + public let targetMetadata: TargetDatabaseMetadata + + public init(outputLocation: String? = nil, sourceLocation: String, sourceMetadata: SourceDatabaseMetadata, targetLocation: String, targetMetadata: TargetDatabaseMetadata) { + self.outputLocation = outputLocation + self.sourceLocation = sourceLocation + self.sourceMetadata = sourceMetadata + self.targetLocation = targetLocation + self.targetMetadata = targetMetadata + } + + private enum CodingKeys: String, CodingKey { + case outputLocation = "outputLocation" + case sourceLocation = "sourceLocation" + case sourceMetadata = "sourceMetadata" + case targetLocation = "targetLocation" + case targetMetadata = "targetMetadata" + } + } + + public struct CompareDatabaseCDCStepOutput: AWSDecodableShape { + /// The comparison output of the compare database CDC step output. + public let comparisonOutputLocation: String + /// The comparison status of the compare database CDC step output. + public let comparisonStatus: ComparisonStatusEnum + + public init(comparisonOutputLocation: String, comparisonStatus: ComparisonStatusEnum) { + self.comparisonOutputLocation = comparisonOutputLocation + self.comparisonStatus = comparisonStatus + } + + private enum CodingKeys: String, CodingKey { + case comparisonOutputLocation = "comparisonOutputLocation" + case comparisonStatus = "comparisonStatus" + } + } + + public struct CompareDatabaseCDCSummary: AWSDecodableShape { + /// The step input of the compare database CDC summary. + public let stepInput: CompareDatabaseCDCStepInput + /// The step output of the compare database CDC summary. + public let stepOutput: CompareDatabaseCDCStepOutput? + + public init(stepInput: CompareDatabaseCDCStepInput, stepOutput: CompareDatabaseCDCStepOutput? = nil) { + self.stepInput = stepInput + self.stepOutput = stepOutput + } + + private enum CodingKeys: String, CodingKey { + case stepInput = "stepInput" + case stepOutput = "stepOutput" + } + } + + public struct CreateCloudFormationStepInput: AWSDecodableShape { + /// The CloudFormation properties of the CloudFormation step input. + public let parameters: [String: String]? + /// The template location of the CloudFormation step input. + public let templateLocation: String + + public init(parameters: [String: String]? = nil, templateLocation: String) { + self.parameters = parameters + self.templateLocation = templateLocation + } + + private enum CodingKeys: String, CodingKey { + case parameters = "parameters" + case templateLocation = "templateLocation" + } + } + + public struct CreateCloudFormationStepOutput: AWSDecodableShape { + /// The exports of the CloudFormation step output. + public let exports: [String: String]? + /// The stack ID of the CloudFormation step output. + public let stackId: String + + public init(exports: [String: String]? = nil, stackId: String) { + self.exports = exports + self.stackId = stackId + } + + private enum CodingKeys: String, CodingKey { + case exports = "exports" + case stackId = "stackId" + } + } + + public struct CreateCloudFormationSummary: AWSDecodableShape { + /// The step input of the CloudFormation summary. + public let stepInput: CreateCloudFormationStepInput + /// The step output of the CloudFormation summary. + public let stepOutput: CreateCloudFormationStepOutput? + + public init(stepInput: CreateCloudFormationStepInput, stepOutput: CreateCloudFormationStepOutput? = nil) { + self.stepInput = stepInput + self.stepOutput = stepOutput + } + + private enum CodingKeys: String, CodingKey { + case stepInput = "stepInput" + case stepOutput = "stepOutput" + } + } + + public struct CreateTestCaseRequest: AWSEncodableShape { + /// The client token of the test case. + public let clientToken: String? + /// The description of the test case. + public let description: String? + /// The name of the test case. + public let name: String + /// The steps in the test case. + public let steps: [Step] + /// The specified tags of the test case. + public let tags: [String: String]? + + public init(clientToken: String? = CreateTestCaseRequest.idempotencyToken(), description: String? = nil, name: String, steps: [Step], tags: [String: String]? = nil) { + self.clientToken = clientToken + self.description = description + self.name = name + self.steps = steps + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[A-Za-z0-9\\-]{1,64}$") + try self.validate(self.description, name: "description", parent: name, max: 1000) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z][A-Za-z0-9_\\-]{1,59}$") + try self.steps.forEach { + try $0.validate(name: "\(name).steps[]") + } + try self.validate(self.steps, name: "steps", parent: name, max: 20) + try self.validate(self.steps, name: "steps", parent: name, min: 1) + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:).+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case description = "description" + case name = "name" + case steps = "steps" + case tags = "tags" + } + } + + public struct CreateTestCaseResponse: AWSDecodableShape { + /// The test case ID of the test case. + public let testCaseId: String + /// The test case version of the test case. + public let testCaseVersion: Int + + public init(testCaseId: String, testCaseVersion: Int) { + self.testCaseId = testCaseId + self.testCaseVersion = testCaseVersion + } + + private enum CodingKeys: String, CodingKey { + case testCaseId = "testCaseId" + case testCaseVersion = "testCaseVersion" + } + } + + public struct CreateTestConfigurationRequest: AWSEncodableShape { + /// The client token of the test configuration. + public let clientToken: String? + /// The description of the test configuration. + public let description: String? + /// The name of the test configuration. + public let name: String + /// The properties of the test configuration. + public let properties: [String: String]? + /// The defined resources of the test configuration. + public let resources: [Resource] + /// The service settings of the test configuration. + public let serviceSettings: ServiceSettings? + /// The tags of the test configuration. + public let tags: [String: String]? + + public init(clientToken: String? = CreateTestConfigurationRequest.idempotencyToken(), description: String? = nil, name: String, properties: [String: String]? = nil, resources: [Resource], serviceSettings: ServiceSettings? = nil, tags: [String: String]? = nil) { + self.clientToken = clientToken + self.description = description + self.name = name + self.properties = properties + self.resources = resources + self.serviceSettings = serviceSettings + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[A-Za-z0-9\\-]{1,64}$") + try self.validate(self.description, name: "description", parent: name, max: 1000) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z][A-Za-z0-9_\\-]{1,59}$") + try self.resources.forEach { + try $0.validate(name: "\(name).resources[]") + } + try self.validate(self.resources, name: "resources", parent: name, max: 20) + try self.validate(self.resources, name: "resources", parent: name, min: 1) + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:).+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case description = "description" + case name = "name" + case properties = "properties" + case resources = "resources" + case serviceSettings = "serviceSettings" + case tags = "tags" + } + } + + public struct CreateTestConfigurationResponse: AWSDecodableShape { + /// The test configuration ID. + public let testConfigurationId: String + /// The test configuration version. + public let testConfigurationVersion: Int + + public init(testConfigurationId: String, testConfigurationVersion: Int) { + self.testConfigurationId = testConfigurationId + self.testConfigurationVersion = testConfigurationVersion + } + + private enum CodingKeys: String, CodingKey { + case testConfigurationId = "testConfigurationId" + case testConfigurationVersion = "testConfigurationVersion" + } + } + + public struct CreateTestSuiteRequest: AWSEncodableShape { + /// The after steps of the test suite. + public let afterSteps: [Step]? + /// The before steps of the test suite. + public let beforeSteps: [Step]? + /// The client token of the test suite. + public let clientToken: String? + /// The description of the test suite. + public let description: String? + /// The name of the test suite. + public let name: String + /// The tags of the test suite. + public let tags: [String: String]? + /// The test cases in the test suite. + public let testCases: TestCases + + public init(afterSteps: [Step]? = nil, beforeSteps: [Step]? = nil, clientToken: String? = CreateTestSuiteRequest.idempotencyToken(), description: String? = nil, name: String, tags: [String: String]? = nil, testCases: TestCases) { + self.afterSteps = afterSteps + self.beforeSteps = beforeSteps + self.clientToken = clientToken + self.description = description + self.name = name + self.tags = tags + self.testCases = testCases + } + + public func validate(name: String) throws { + try self.afterSteps?.forEach { + try $0.validate(name: "\(name).afterSteps[]") + } + try self.validate(self.afterSteps, name: "afterSteps", parent: name, max: 20) + try self.validate(self.afterSteps, name: "afterSteps", parent: name, min: 1) + try self.beforeSteps?.forEach { + try $0.validate(name: "\(name).beforeSteps[]") + } + try self.validate(self.beforeSteps, name: "beforeSteps", parent: name, max: 20) + try self.validate(self.beforeSteps, name: "beforeSteps", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[A-Za-z0-9\\-]{1,64}$") + try self.validate(self.description, name: "description", parent: name, max: 1000) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z][A-Za-z0-9_\\-]{1,59}$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:).+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.testCases.validate(name: "\(name).testCases") + } + + private enum CodingKeys: String, CodingKey { + case afterSteps = "afterSteps" + case beforeSteps = "beforeSteps" + case clientToken = "clientToken" + case description = "description" + case name = "name" + case tags = "tags" + case testCases = "testCases" + } + } + + public struct CreateTestSuiteResponse: AWSDecodableShape { + /// The suite ID of the test suite. + public let testSuiteId: String + /// The suite version of the test suite. + public let testSuiteVersion: Int + + public init(testSuiteId: String, testSuiteVersion: Int) { + self.testSuiteId = testSuiteId + self.testSuiteVersion = testSuiteVersion + } + + private enum CodingKeys: String, CodingKey { + case testSuiteId = "testSuiteId" + case testSuiteVersion = "testSuiteVersion" + } + } + + public struct DataSet: AWSEncodableShape & AWSDecodableShape { + /// The CCSID of the data set. + public let ccsid: String + /// The format of the data set. + public let format: Format + /// The length of the data set. + public let length: Int + /// The name of the data set. + public let name: String + /// The type of the data set. + public let type: DataSetType + + public init(ccsid: String, format: Format, length: Int, name: String, type: DataSetType) { + self.ccsid = ccsid + self.format = format + self.length = length + self.name = name + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.ccsid, name: "ccsid", parent: name, pattern: "^\\S{1,50}$") + try self.validate(self.name, name: "name", parent: name, pattern: "^\\S{1,100}$") + } + + private enum CodingKeys: String, CodingKey { + case ccsid = "ccsid" + case format = "format" + case length = "length" + case name = "name" + case type = "type" + } + } + + public struct DatabaseCDC: AWSEncodableShape & AWSDecodableShape { + /// The source metadata of the database CDC. + public let sourceMetadata: SourceDatabaseMetadata + /// The target metadata of the database CDC. + public let targetMetadata: TargetDatabaseMetadata + + public init(sourceMetadata: SourceDatabaseMetadata, targetMetadata: TargetDatabaseMetadata) { + self.sourceMetadata = sourceMetadata + self.targetMetadata = targetMetadata + } + + private enum CodingKeys: String, CodingKey { + case sourceMetadata = "sourceMetadata" + case targetMetadata = "targetMetadata" + } + } + + public struct DeleteCloudFormationStepInput: AWSDecodableShape { + /// The stack ID of the deleted CloudFormation step input. + public let stackId: String + + public init(stackId: String) { + self.stackId = stackId + } + + private enum CodingKeys: String, CodingKey { + case stackId = "stackId" + } + } + + public struct DeleteCloudFormationStepOutput: AWSDecodableShape { + public init() {} + } + + public struct DeleteCloudFormationSummary: AWSDecodableShape { + /// The step input of the deleted CloudFormation summary. + public let stepInput: DeleteCloudFormationStepInput + /// The step output of the deleted CloudFormation summary. + public let stepOutput: DeleteCloudFormationStepOutput? + + public init(stepInput: DeleteCloudFormationStepInput, stepOutput: DeleteCloudFormationStepOutput? = nil) { + self.stepInput = stepInput + self.stepOutput = stepOutput + } + + private enum CodingKeys: String, CodingKey { + case stepInput = "stepInput" + case stepOutput = "stepOutput" + } + } + + public struct DeleteTestCaseRequest: AWSEncodableShape { + /// The test case ID of the test case. + public let testCaseId: String + + public init(testCaseId: String) { + self.testCaseId = testCaseId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.testCaseId, key: "testCaseId") + } + + public func validate(name: String) throws { + try self.validate(self.testCaseId, name: "testCaseId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteTestCaseResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteTestConfigurationRequest: AWSEncodableShape { + /// The test ID of the test configuration. + public let testConfigurationId: String + + public init(testConfigurationId: String) { + self.testConfigurationId = testConfigurationId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.testConfigurationId, key: "testConfigurationId") + } + + public func validate(name: String) throws { + try self.validate(self.testConfigurationId, name: "testConfigurationId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteTestConfigurationResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteTestRunRequest: AWSEncodableShape { + /// The run ID of the test run. + public let testRunId: String + + public init(testRunId: String) { + self.testRunId = testRunId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.testRunId, key: "testRunId") + } + + public func validate(name: String) throws { + try self.validate(self.testRunId, name: "testRunId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteTestRunResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteTestSuiteRequest: AWSEncodableShape { + /// The test ID of the test suite. + public let testSuiteId: String + + public init(testSuiteId: String) { + self.testSuiteId = testSuiteId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.testSuiteId, key: "testSuiteId") + } + + public func validate(name: String) throws { + try self.validate(self.testSuiteId, name: "testSuiteId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteTestSuiteResponse: AWSDecodableShape { + public init() {} + } + + public struct GetTestCaseRequest: AWSEncodableShape { + /// The request test ID of the test case. + public let testCaseId: String + /// The test case version of the test case. + public let testCaseVersion: Int? + + public init(testCaseId: String, testCaseVersion: Int? = nil) { + self.testCaseId = testCaseId + self.testCaseVersion = testCaseVersion + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.testCaseId, key: "testCaseId") + request.encodeQuery(self.testCaseVersion, key: "testCaseVersion") + } + + public func validate(name: String) throws { + try self.validate(self.testCaseId, name: "testCaseId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetTestCaseResponse: AWSDecodableShape { + /// The creation time of the test case. + public let creationTime: Date + /// The description of the test case. + public let description: String? + /// The last update time of the test case. + public let lastUpdateTime: Date + /// The latest version of the test case. + public let latestVersion: TestCaseLatestVersion + /// The name of the test case. + public let name: String + /// The status of the test case. + public let status: TestCaseLifecycle + /// The status reason of the test case. + public let statusReason: String? + /// The steps of the test case. + public let steps: [Step] + /// The tags of the test case. + public let tags: [String: String]? + /// The Amazon Resource Name (ARN) of the test case. + public let testCaseArn: String + /// The response test ID of the test case. + public let testCaseId: String + /// The case version of the test case. + public let testCaseVersion: Int + + public init(creationTime: Date, description: String? = nil, lastUpdateTime: Date, latestVersion: TestCaseLatestVersion, name: String, status: TestCaseLifecycle, statusReason: String? = nil, steps: [Step], tags: [String: String]? = nil, testCaseArn: String, testCaseId: String, testCaseVersion: Int) { + self.creationTime = creationTime + self.description = description + self.lastUpdateTime = lastUpdateTime + self.latestVersion = latestVersion + self.name = name + self.status = status + self.statusReason = statusReason + self.steps = steps + self.tags = tags + self.testCaseArn = testCaseArn + self.testCaseId = testCaseId + self.testCaseVersion = testCaseVersion + } + + private enum CodingKeys: String, CodingKey { + case creationTime = "creationTime" + case description = "description" + case lastUpdateTime = "lastUpdateTime" + case latestVersion = "latestVersion" + case name = "name" + case status = "status" + case statusReason = "statusReason" + case steps = "steps" + case tags = "tags" + case testCaseArn = "testCaseArn" + case testCaseId = "testCaseId" + case testCaseVersion = "testCaseVersion" + } + } + + public struct GetTestConfigurationRequest: AWSEncodableShape { + /// The request test configuration ID. + public let testConfigurationId: String + /// The test configuration version. + public let testConfigurationVersion: Int? + + public init(testConfigurationId: String, testConfigurationVersion: Int? = nil) { + self.testConfigurationId = testConfigurationId + self.testConfigurationVersion = testConfigurationVersion + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.testConfigurationId, key: "testConfigurationId") + request.encodeQuery(self.testConfigurationVersion, key: "testConfigurationVersion") + } + + public func validate(name: String) throws { + try self.validate(self.testConfigurationId, name: "testConfigurationId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetTestConfigurationResponse: AWSDecodableShape { + /// The creation time of the test configuration. + public let creationTime: Date + /// The description of the test configuration. + public let description: String? + /// The last update time of the test configuration. + public let lastUpdateTime: Date + /// The latest version of the test configuration. + public let latestVersion: TestConfigurationLatestVersion + /// The test configuration name + public let name: String + /// The properties of the test configuration. + public let properties: [String: String] + /// The resources of the test configuration. + public let resources: [Resource] + /// The service settings of the test configuration. + public let serviceSettings: ServiceSettings? + /// The status of the test configuration. + public let status: TestConfigurationLifecycle + /// The status reason of the test configuration. + public let statusReason: String? + /// The tags of the test configuration. + public let tags: [String: String]? + /// The test configuration Amazon Resource Name (ARN). + public let testConfigurationArn: String + /// The response test configuration ID. + public let testConfigurationId: String + /// The test configuration version. + public let testConfigurationVersion: Int + + public init(creationTime: Date, description: String? = nil, lastUpdateTime: Date, latestVersion: TestConfigurationLatestVersion, name: String, properties: [String: String], resources: [Resource], serviceSettings: ServiceSettings? = nil, status: TestConfigurationLifecycle, statusReason: String? = nil, tags: [String: String]? = nil, testConfigurationArn: String, testConfigurationId: String, testConfigurationVersion: Int) { + self.creationTime = creationTime + self.description = description + self.lastUpdateTime = lastUpdateTime + self.latestVersion = latestVersion + self.name = name + self.properties = properties + self.resources = resources + self.serviceSettings = serviceSettings + self.status = status + self.statusReason = statusReason + self.tags = tags + self.testConfigurationArn = testConfigurationArn + self.testConfigurationId = testConfigurationId + self.testConfigurationVersion = testConfigurationVersion + } + + private enum CodingKeys: String, CodingKey { + case creationTime = "creationTime" + case description = "description" + case lastUpdateTime = "lastUpdateTime" + case latestVersion = "latestVersion" + case name = "name" + case properties = "properties" + case resources = "resources" + case serviceSettings = "serviceSettings" + case status = "status" + case statusReason = "statusReason" + case tags = "tags" + case testConfigurationArn = "testConfigurationArn" + case testConfigurationId = "testConfigurationId" + case testConfigurationVersion = "testConfigurationVersion" + } + } + + public struct GetTestRunStepRequest: AWSEncodableShape { + /// The step name of the test run step. + public let stepName: String + /// The test case ID of a test run step. + public let testCaseId: String? + /// The test run ID of the test run step. + public let testRunId: String + /// The test suite ID of a test run step. + public let testSuiteId: String? + + public init(stepName: String, testCaseId: String? = nil, testRunId: String, testSuiteId: String? = nil) { + self.stepName = stepName + self.testCaseId = testCaseId + self.testRunId = testRunId + self.testSuiteId = testSuiteId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.stepName, key: "stepName") + request.encodeQuery(self.testCaseId, key: "testCaseId") + request.encodePath(self.testRunId, key: "testRunId") + request.encodeQuery(self.testSuiteId, key: "testSuiteId") + } + + public func validate(name: String) throws { + try self.validate(self.stepName, name: "stepName", parent: name, pattern: "^[A-Za-z][A-Za-z0-9_\\-]{1,59}$") + try self.validate(self.testCaseId, name: "testCaseId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + try self.validate(self.testRunId, name: "testRunId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + try self.validate(self.testSuiteId, name: "testSuiteId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetTestRunStepResponse: AWSDecodableShape { + /// The after steps of the test run step. + public let afterStep: Bool? + /// The before steps of the test run step. + public let beforeStep: Bool? + /// The run end time of the test run step. + public let runEndTime: Date? + /// The run start time of the test run step. + public let runStartTime: Date + /// The status of the test run step. + public let status: StepRunStatus + /// The status reason of the test run step. + public let statusReason: String? + /// The step name of the test run step. + public let stepName: String + /// The step run summary of the test run step. + public let stepRunSummary: StepRunSummary? + /// The test case ID of the test run step. + public let testCaseId: String? + /// The test case version of the test run step. + public let testCaseVersion: Int? + /// The test run ID of the test run step. + public let testRunId: String + /// The test suite ID of the test run step. + public let testSuiteId: String? + /// The test suite version of the test run step. + public let testSuiteVersion: Int? + + public init(afterStep: Bool? = nil, beforeStep: Bool? = nil, runEndTime: Date? = nil, runStartTime: Date, status: StepRunStatus, statusReason: String? = nil, stepName: String, stepRunSummary: StepRunSummary? = nil, testCaseId: String? = nil, testCaseVersion: Int? = nil, testRunId: String, testSuiteId: String? = nil, testSuiteVersion: Int? = nil) { + self.afterStep = afterStep + self.beforeStep = beforeStep + self.runEndTime = runEndTime + self.runStartTime = runStartTime + self.status = status + self.statusReason = statusReason + self.stepName = stepName + self.stepRunSummary = stepRunSummary + self.testCaseId = testCaseId + self.testCaseVersion = testCaseVersion + self.testRunId = testRunId + self.testSuiteId = testSuiteId + self.testSuiteVersion = testSuiteVersion + } + + private enum CodingKeys: String, CodingKey { + case afterStep = "afterStep" + case beforeStep = "beforeStep" + case runEndTime = "runEndTime" + case runStartTime = "runStartTime" + case status = "status" + case statusReason = "statusReason" + case stepName = "stepName" + case stepRunSummary = "stepRunSummary" + case testCaseId = "testCaseId" + case testCaseVersion = "testCaseVersion" + case testRunId = "testRunId" + case testSuiteId = "testSuiteId" + case testSuiteVersion = "testSuiteVersion" + } + } + + public struct GetTestSuiteRequest: AWSEncodableShape { + /// The ID of the test suite. + public let testSuiteId: String + /// The version of the test suite. + public let testSuiteVersion: Int? + + public init(testSuiteId: String, testSuiteVersion: Int? = nil) { + self.testSuiteId = testSuiteId + self.testSuiteVersion = testSuiteVersion + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.testSuiteId, key: "testSuiteId") + request.encodeQuery(self.testSuiteVersion, key: "testSuiteVersion") + } + + public func validate(name: String) throws { + try self.validate(self.testSuiteId, name: "testSuiteId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetTestSuiteResponse: AWSDecodableShape { + /// The after steps of the test suite. + public let afterSteps: [Step] + /// The before steps of the test suite. + public let beforeSteps: [Step] + /// The creation time of the test suite. + public let creationTime: Date + /// The description of the test suite. + public let description: String? + /// The last update time of the test suite. + public let lastUpdateTime: Date + /// The latest version of the test suite. + public let latestVersion: TestSuiteLatestVersion + /// The name of the test suite. + public let name: String + /// The status of the test suite. + public let status: TestSuiteLifecycle? + /// The status reason of the test suite. + public let statusReason: String? + /// The tags of the test suite. + public let tags: [String: String]? + /// The test cases of the test suite. + public let testCases: TestCases + /// The test suite Amazon Resource Name (ARN). + public let testSuiteArn: String + /// The response ID of the test suite. + public let testSuiteId: String + /// The version of the test suite. + public let testSuiteVersion: Int + + public init(afterSteps: [Step], beforeSteps: [Step], creationTime: Date, description: String? = nil, lastUpdateTime: Date, latestVersion: TestSuiteLatestVersion, name: String, status: TestSuiteLifecycle? = nil, statusReason: String? = nil, tags: [String: String]? = nil, testCases: TestCases, testSuiteArn: String, testSuiteId: String, testSuiteVersion: Int) { + self.afterSteps = afterSteps + self.beforeSteps = beforeSteps + self.creationTime = creationTime + self.description = description + self.lastUpdateTime = lastUpdateTime + self.latestVersion = latestVersion + self.name = name + self.status = status + self.statusReason = statusReason + self.tags = tags + self.testCases = testCases + self.testSuiteArn = testSuiteArn + self.testSuiteId = testSuiteId + self.testSuiteVersion = testSuiteVersion + } + + private enum CodingKeys: String, CodingKey { + case afterSteps = "afterSteps" + case beforeSteps = "beforeSteps" + case creationTime = "creationTime" + case description = "description" + case lastUpdateTime = "lastUpdateTime" + case latestVersion = "latestVersion" + case name = "name" + case status = "status" + case statusReason = "statusReason" + case tags = "tags" + case testCases = "testCases" + case testSuiteArn = "testSuiteArn" + case testSuiteId = "testSuiteId" + case testSuiteVersion = "testSuiteVersion" + } + } + + public struct InputFile: AWSEncodableShape & AWSDecodableShape { + /// The file metadata of the input file. + public let fileMetadata: FileMetadata + /// The source location of the input file. + public let sourceLocation: String + /// The target location of the input file. + public let targetLocation: String + + public init(fileMetadata: FileMetadata, sourceLocation: String, targetLocation: String) { + self.fileMetadata = fileMetadata + self.sourceLocation = sourceLocation + self.targetLocation = targetLocation + } + + public func validate(name: String) throws { + try self.fileMetadata.validate(name: "\(name).fileMetadata") + try self.validate(self.sourceLocation, name: "sourceLocation", parent: name, pattern: "^\\S{1,1000}$") + try self.validate(self.targetLocation, name: "targetLocation", parent: name, pattern: "^\\S{1,1000}$") + } + + private enum CodingKeys: String, CodingKey { + case fileMetadata = "fileMetadata" + case sourceLocation = "sourceLocation" + case targetLocation = "targetLocation" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+=,@.-]{0,1023}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// The tags of the resource. + public let tags: [String: String] + + public init(tags: [String: String]) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct ListTestCasesRequest: AWSEncodableShape { + /// The maximum results of the test case. + public let maxResults: Int? + /// The next token of the test cases. + public let nextToken: String? + /// The IDs of the test cases. + public let testCaseIds: [String]? + + public init(maxResults: Int? = nil, nextToken: String? = nil, testCaseIds: [String]? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.testCaseIds = testCaseIds + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.testCaseIds, key: "testCaseIds") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S{1,2000}$") + try self.testCaseIds?.forEach { + try validate($0, name: "testCaseIds[]", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTestCasesResponse: AWSDecodableShape { + /// The next token in test cases. + public let nextToken: String? + /// The test cases in an application. + public let testCases: [TestCaseSummary] + + public init(nextToken: String? = nil, testCases: [TestCaseSummary]) { + self.nextToken = nextToken + self.testCases = testCases + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case testCases = "testCases" + } + } + + public struct ListTestConfigurationsRequest: AWSEncodableShape { + /// The maximum results of the test configuration. + public let maxResults: Int? + /// The next token for the test configurations. + public let nextToken: String? + /// The configuration IDs of the test configurations. + public let testConfigurationIds: [String]? + + public init(maxResults: Int? = nil, nextToken: String? = nil, testConfigurationIds: [String]? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.testConfigurationIds = testConfigurationIds + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.testConfigurationIds, key: "testConfigurationIds") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S{1,2000}$") + try self.testConfigurationIds?.forEach { + try validate($0, name: "testConfigurationIds[]", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTestConfigurationsResponse: AWSDecodableShape { + /// The next token in the test configurations. + public let nextToken: String? + /// The test configurations. + public let testConfigurations: [TestConfigurationSummary] + + public init(nextToken: String? = nil, testConfigurations: [TestConfigurationSummary]) { + self.nextToken = nextToken + self.testConfigurations = testConfigurations + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case testConfigurations = "testConfigurations" + } + } + + public struct ListTestRunStepsRequest: AWSEncodableShape { + /// The maximum number of test run steps to return in one page of results. + public let maxResults: Int? + /// The token from a previous step to retrieve the next page of results. + public let nextToken: String? + /// The test case ID of the test run steps. + public let testCaseId: String? + /// The test run ID of the test run steps. + public let testRunId: String + /// The test suite ID of the test run steps. + public let testSuiteId: String? + + public init(maxResults: Int? = nil, nextToken: String? = nil, testCaseId: String? = nil, testRunId: String, testSuiteId: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.testCaseId = testCaseId + self.testRunId = testRunId + self.testSuiteId = testSuiteId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.testCaseId, key: "testCaseId") + request.encodePath(self.testRunId, key: "testRunId") + request.encodeQuery(self.testSuiteId, key: "testSuiteId") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S{1,2000}$") + try self.validate(self.testCaseId, name: "testCaseId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + try self.validate(self.testRunId, name: "testRunId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + try self.validate(self.testSuiteId, name: "testSuiteId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTestRunStepsResponse: AWSDecodableShape { + /// The token from a previous request to retrieve the next page of results. + public let nextToken: String? + /// The test run steps of the response query. + public let testRunSteps: [TestRunStepSummary] + + public init(nextToken: String? = nil, testRunSteps: [TestRunStepSummary]) { + self.nextToken = nextToken + self.testRunSteps = testRunSteps + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case testRunSteps = "testRunSteps" + } + } + + public struct ListTestRunTestCasesRequest: AWSEncodableShape { + /// The maximum number of test run test cases to return in one page of results. + public let maxResults: Int? + /// The token from a previous request to retrieve the next page of results. + public let nextToken: String? + /// The test run ID of the test cases. + public let testRunId: String + + public init(maxResults: Int? = nil, nextToken: String? = nil, testRunId: String) { + self.maxResults = maxResults + self.nextToken = nextToken + self.testRunId = testRunId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodePath(self.testRunId, key: "testRunId") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S{1,2000}$") + try self.validate(self.testRunId, name: "testRunId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTestRunTestCasesResponse: AWSDecodableShape { + /// The token from a previous request to retrieve the next page of results. + public let nextToken: String? + /// The test run of the test cases. + public let testRunTestCases: [TestCaseRunSummary] + + public init(nextToken: String? = nil, testRunTestCases: [TestCaseRunSummary]) { + self.nextToken = nextToken + self.testRunTestCases = testRunTestCases + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case testRunTestCases = "testRunTestCases" + } + } + + public struct ListTestRunsRequest: AWSEncodableShape { + /// The maximum number of test runs to return in one page of results. + public let maxResults: Int? + /// The token from the previous request to retrieve the next page of test run results. + public let nextToken: String? + /// The test run IDs of the test runs. + public let testRunIds: [String]? + /// The test suite ID of the test runs. + public let testSuiteId: String? + + public init(maxResults: Int? = nil, nextToken: String? = nil, testRunIds: [String]? = nil, testSuiteId: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.testRunIds = testRunIds + self.testSuiteId = testSuiteId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.testRunIds, key: "testrunIds") + request.encodeQuery(self.testSuiteId, key: "testSuiteId") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S{1,2000}$") + try self.testRunIds?.forEach { + try validate($0, name: "testRunIds[]", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + try self.validate(self.testSuiteId, name: "testSuiteId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTestRunsResponse: AWSDecodableShape { + /// The token from the previous request to retrieve the next page of results. + public let nextToken: String? + /// The test runs of the response query. + public let testRuns: [TestRunSummary] + + public init(nextToken: String? = nil, testRuns: [TestRunSummary]) { + self.nextToken = nextToken + self.testRuns = testRuns + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case testRuns = "testRuns" + } + } + + public struct ListTestSuitesRequest: AWSEncodableShape { + /// The maximum number of test suites to return in one page of results. + public let maxResults: Int? + /// The token from a previous request to retrieve the next page of results. + public let nextToken: String? + /// The suite ID of the test suites. + public let testSuiteIds: [String]? + + public init(maxResults: Int? = nil, nextToken: String? = nil, testSuiteIds: [String]? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.testSuiteIds = testSuiteIds + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.testSuiteIds, key: "testSuiteIds") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S{1,2000}$") + try self.testSuiteIds?.forEach { + try validate($0, name: "testSuiteIds[]", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTestSuitesResponse: AWSDecodableShape { + /// The token from a previous request to retrieve the next page of test suites results. + public let nextToken: String? + /// The test suites returned with the response query. + public let testSuites: [TestSuiteSummary] + + public init(nextToken: String? = nil, testSuites: [TestSuiteSummary]) { + self.nextToken = nextToken + self.testSuites = testSuites + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case testSuites = "testSuites" + } + } + + public struct M2ManagedActionProperties: AWSEncodableShape & AWSDecodableShape { + /// Force stops the AWS Mainframe Modernization managed action properties. + public let forceStop: Bool? + /// The import data set location of the AWS Mainframe Modernization managed action properties. + public let importDataSetLocation: String? + + public init(forceStop: Bool? = nil, importDataSetLocation: String? = nil) { + self.forceStop = forceStop + self.importDataSetLocation = importDataSetLocation + } + + public func validate(name: String) throws { + try self.validate(self.importDataSetLocation, name: "importDataSetLocation", parent: name, pattern: "^\\S{1,1000}$") + } + + private enum CodingKeys: String, CodingKey { + case forceStop = "forceStop" + case importDataSetLocation = "importDataSetLocation" + } + } + + public struct M2ManagedApplication: AWSEncodableShape & AWSDecodableShape { + /// The application ID of the AWS Mainframe Modernization managed application. + public let applicationId: String + /// The listener port of the AWS Mainframe Modernization managed application. + public let listenerPort: String? + /// The runtime of the AWS Mainframe Modernization managed application. + public let runtime: M2ManagedRuntime + /// The VPC endpoint service name of the AWS Mainframe Modernization managed application. + public let vpcEndpointServiceName: String? + + public init(applicationId: String, listenerPort: String? = nil, runtime: M2ManagedRuntime, vpcEndpointServiceName: String? = nil) { + self.applicationId = applicationId + self.listenerPort = listenerPort + self.runtime = runtime + self.vpcEndpointServiceName = vpcEndpointServiceName + } + + public func validate(name: String) throws { + try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^\\S{1,1000}$") + try self.validate(self.listenerPort, name: "listenerPort", parent: name, pattern: "^\\S{1,1000}$") + try self.validate(self.vpcEndpointServiceName, name: "vpcEndpointServiceName", parent: name, pattern: "^\\S{1,1000}$") + } + + private enum CodingKeys: String, CodingKey { + case applicationId = "applicationId" + case listenerPort = "listenerPort" + case runtime = "runtime" + case vpcEndpointServiceName = "vpcEndpointServiceName" + } + } + + public struct M2ManagedApplicationAction: AWSEncodableShape & AWSDecodableShape { + /// The action type of the AWS Mainframe Modernization managed application action. + public let actionType: M2ManagedActionType + /// The properties of the AWS Mainframe Modernization managed application action. + public let properties: M2ManagedActionProperties? + /// The resource of the AWS Mainframe Modernization managed application action. + public let resource: String + + public init(actionType: M2ManagedActionType, properties: M2ManagedActionProperties? = nil, resource: String) { + self.actionType = actionType + self.properties = properties + self.resource = resource + } + + public func validate(name: String) throws { + try self.properties?.validate(name: "\(name).properties") + try self.validate(self.resource, name: "resource", parent: name, pattern: "^\\S{1,1000}$") + } + + private enum CodingKeys: String, CodingKey { + case actionType = "actionType" + case properties = "properties" + case resource = "resource" + } + } + + public struct M2ManagedApplicationStepInput: AWSDecodableShape { + /// The action type of the AWS Mainframe Modernization managed application step input. + public let actionType: M2ManagedActionType + /// The application ID of the AWS Mainframe Modernization managed application step input. + public let applicationId: String + /// The listener port of the AWS Mainframe Modernization managed application step input. + public let listenerPort: Int? + /// The properties of the AWS Mainframe Modernization managed application step input. + public let properties: M2ManagedActionProperties? + /// The runtime of the AWS Mainframe Modernization managed application step input. + public let runtime: String + /// The VPC endpoint service name of the AWS Mainframe Modernization managed application step input. + public let vpcEndpointServiceName: String? + + public init(actionType: M2ManagedActionType, applicationId: String, listenerPort: Int? = nil, properties: M2ManagedActionProperties? = nil, runtime: String, vpcEndpointServiceName: String? = nil) { + self.actionType = actionType + self.applicationId = applicationId + self.listenerPort = listenerPort + self.properties = properties + self.runtime = runtime + self.vpcEndpointServiceName = vpcEndpointServiceName + } + + private enum CodingKeys: String, CodingKey { + case actionType = "actionType" + case applicationId = "applicationId" + case listenerPort = "listenerPort" + case properties = "properties" + case runtime = "runtime" + case vpcEndpointServiceName = "vpcEndpointServiceName" + } + } + + public struct M2ManagedApplicationStepOutput: AWSDecodableShape { + /// The import data set summary of the AWS Mainframe Modernization managed application step output. + public let importDataSetSummary: [String: String]? + + public init(importDataSetSummary: [String: String]? = nil) { + self.importDataSetSummary = importDataSetSummary + } + + private enum CodingKeys: String, CodingKey { + case importDataSetSummary = "importDataSetSummary" + } + } + + public struct M2ManagedApplicationStepSummary: AWSDecodableShape { + /// The step input of the AWS Mainframe Modernization managed application step summary. + public let stepInput: M2ManagedApplicationStepInput + /// The step output of the AWS Mainframe Modernization managed application step summary. + public let stepOutput: M2ManagedApplicationStepOutput? + + public init(stepInput: M2ManagedApplicationStepInput, stepOutput: M2ManagedApplicationStepOutput? = nil) { + self.stepInput = stepInput + self.stepOutput = stepOutput + } + + private enum CodingKeys: String, CodingKey { + case stepInput = "stepInput" + case stepOutput = "stepOutput" + } + } + + public struct M2ManagedApplicationSummary: AWSDecodableShape { + /// The application ID of the AWS Mainframe Modernization managed application summary. + public let applicationId: String + /// The listener port of the AWS Mainframe Modernization managed application summary. + public let listenerPort: Int? + /// The runtime of the AWS Mainframe Modernization managed application summary. + public let runtime: M2ManagedRuntime + + public init(applicationId: String, listenerPort: Int? = nil, runtime: M2ManagedRuntime) { + self.applicationId = applicationId + self.listenerPort = listenerPort + self.runtime = runtime + } + + private enum CodingKeys: String, CodingKey { + case applicationId = "applicationId" + case listenerPort = "listenerPort" + case runtime = "runtime" + } + } + + public struct M2NonManagedApplication: AWSEncodableShape & AWSDecodableShape { + /// The listener port of the AWS Mainframe Modernization non-managed application. + public let listenerPort: String + /// The runtime of the AWS Mainframe Modernization non-managed application. + public let runtime: M2NonManagedRuntime + /// The VPC endpoint service name of the AWS Mainframe Modernization non-managed application. + public let vpcEndpointServiceName: String + /// The web application name of the AWS Mainframe Modernization non-managed application. + public let webAppName: String? + + public init(listenerPort: String, runtime: M2NonManagedRuntime, vpcEndpointServiceName: String, webAppName: String? = nil) { + self.listenerPort = listenerPort + self.runtime = runtime + self.vpcEndpointServiceName = vpcEndpointServiceName + self.webAppName = webAppName + } + + public func validate(name: String) throws { + try self.validate(self.listenerPort, name: "listenerPort", parent: name, pattern: "^\\S{1,1000}$") + try self.validate(self.vpcEndpointServiceName, name: "vpcEndpointServiceName", parent: name, pattern: "^\\S{1,1000}$") + try self.validate(self.webAppName, name: "webAppName", parent: name, pattern: "^\\S{1,1000}$") + } + + private enum CodingKeys: String, CodingKey { + case listenerPort = "listenerPort" + case runtime = "runtime" + case vpcEndpointServiceName = "vpcEndpointServiceName" + case webAppName = "webAppName" + } + } + + public struct M2NonManagedApplicationAction: AWSEncodableShape & AWSDecodableShape { + /// The action type of the AWS Mainframe Modernization non-managed application action. + public let actionType: M2NonManagedActionType + /// The resource of the AWS Mainframe Modernization non-managed application action. + public let resource: String + + public init(actionType: M2NonManagedActionType, resource: String) { + self.actionType = actionType + self.resource = resource + } + + public func validate(name: String) throws { + try self.validate(self.resource, name: "resource", parent: name, pattern: "^\\S{1,1000}$") + } + + private enum CodingKeys: String, CodingKey { + case actionType = "actionType" + case resource = "resource" + } + } + + public struct M2NonManagedApplicationStepInput: AWSDecodableShape { + /// The action type of the AWS Mainframe Modernization non-managed application step input. + public let actionType: M2NonManagedActionType + /// The listener port of the AWS Mainframe Modernization non-managed application step input. + public let listenerPort: Int + /// The runtime of the AWS Mainframe Modernization non-managed application step input. + public let runtime: M2NonManagedRuntime + /// The VPC endpoint service name of the AWS Mainframe Modernization non-managed application step input. + public let vpcEndpointServiceName: String + /// The web app name of the AWS Mainframe Modernization non-managed application step input. + public let webAppName: String? + + public init(actionType: M2NonManagedActionType, listenerPort: Int, runtime: M2NonManagedRuntime, vpcEndpointServiceName: String, webAppName: String? = nil) { + self.actionType = actionType + self.listenerPort = listenerPort + self.runtime = runtime + self.vpcEndpointServiceName = vpcEndpointServiceName + self.webAppName = webAppName + } + + private enum CodingKeys: String, CodingKey { + case actionType = "actionType" + case listenerPort = "listenerPort" + case runtime = "runtime" + case vpcEndpointServiceName = "vpcEndpointServiceName" + case webAppName = "webAppName" + } + } + + public struct M2NonManagedApplicationStepOutput: AWSDecodableShape { + public init() {} + } + + public struct M2NonManagedApplicationStepSummary: AWSDecodableShape { + /// The step input of the AWS Mainframe Modernization non-managed application step summary. + public let stepInput: M2NonManagedApplicationStepInput + /// The step output of the AWS Mainframe Modernization non-managed application step summary. + public let stepOutput: M2NonManagedApplicationStepOutput? + + public init(stepInput: M2NonManagedApplicationStepInput, stepOutput: M2NonManagedApplicationStepOutput? = nil) { + self.stepInput = stepInput + self.stepOutput = stepOutput + } + + private enum CodingKeys: String, CodingKey { + case stepInput = "stepInput" + case stepOutput = "stepOutput" + } + } + + public struct M2NonManagedApplicationSummary: AWSDecodableShape { + /// The listener port of the AWS Mainframe Modernization non-managed application summary. + public let listenerPort: Int + /// The runtime of the AWS Mainframe Modernization non-managed application summary. + public let runtime: M2NonManagedRuntime + /// The VPC endpoint service name of the AWS Mainframe Modernization non-managed application summary. + public let vpcEndpointServiceName: String + /// The web application name of the AWS Mainframe Modernization non-managed application summary. + public let webAppName: String? + + public init(listenerPort: Int, runtime: M2NonManagedRuntime, vpcEndpointServiceName: String, webAppName: String? = nil) { + self.listenerPort = listenerPort + self.runtime = runtime + self.vpcEndpointServiceName = vpcEndpointServiceName + self.webAppName = webAppName + } + + private enum CodingKeys: String, CodingKey { + case listenerPort = "listenerPort" + case runtime = "runtime" + case vpcEndpointServiceName = "vpcEndpointServiceName" + case webAppName = "webAppName" + } + } + + public struct MainframeAction: AWSEncodableShape & AWSDecodableShape { + /// The action type of the mainframe action. + public let actionType: MainframeActionType + /// The properties of the mainframe action. + public let properties: MainframeActionProperties? + /// The resource of the mainframe action. + public let resource: String + + public init(actionType: MainframeActionType, properties: MainframeActionProperties? = nil, resource: String) { + self.actionType = actionType + self.properties = properties + self.resource = resource + } + + public func validate(name: String) throws { + try self.actionType.validate(name: "\(name).actionType") + try self.properties?.validate(name: "\(name).properties") + try self.validate(self.resource, name: "resource", parent: name, pattern: "^\\S{1,1000}$") + } + + private enum CodingKeys: String, CodingKey { + case actionType = "actionType" + case properties = "properties" + case resource = "resource" + } + } + + public struct MainframeActionProperties: AWSEncodableShape & AWSDecodableShape { + /// The DMS task ARN of the mainframe action properties. + public let dmsTaskArn: String? + + public init(dmsTaskArn: String? = nil) { + self.dmsTaskArn = dmsTaskArn + } + + public func validate(name: String) throws { + try self.validate(self.dmsTaskArn, name: "dmsTaskArn", parent: name, pattern: "^\\S{1,1000}$") + } + + private enum CodingKeys: String, CodingKey { + case dmsTaskArn = "dmsTaskArn" + } + } + + public struct OutputFile: AWSEncodableShape & AWSDecodableShape { + /// The file location of the output file. + public let fileLocation: String? + + public init(fileLocation: String? = nil) { + self.fileLocation = fileLocation + } + + public func validate(name: String) throws { + try self.validate(self.fileLocation, name: "fileLocation", parent: name, max: 1024) + } + + private enum CodingKeys: String, CodingKey { + case fileLocation = "fileLocation" + } + } + + public struct Resource: AWSEncodableShape & AWSDecodableShape { + /// The name of the resource. + public let name: String + /// The type of the resource. + public let type: ResourceType + + public init(name: String, type: ResourceType) { + self.name = name + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z][A-Za-z0-9_\\-]{1,59}$") + try self.type.validate(name: "\(name).type") + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case type = "type" + } + } + + public struct Script: AWSEncodableShape & AWSDecodableShape { + /// The script location of the scripts. + public let scriptLocation: String + /// The type of the scripts. + public let type: ScriptType + + public init(scriptLocation: String, type: ScriptType) { + self.scriptLocation = scriptLocation + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.scriptLocation, name: "scriptLocation", parent: name, max: 1024) + } + + private enum CodingKeys: String, CodingKey { + case scriptLocation = "scriptLocation" + case type = "type" + } + } + + public struct ScriptSummary: AWSDecodableShape { + /// The script location of the script summary. + public let scriptLocation: String + /// The type of the script summary. + public let type: ScriptType + + public init(scriptLocation: String, type: ScriptType) { + self.scriptLocation = scriptLocation + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case scriptLocation = "scriptLocation" + case type = "type" + } + } + + public struct ServiceSettings: AWSEncodableShape & AWSDecodableShape { + /// The KMS key ID of the service settings. + public let kmsKeyId: String? + + public init(kmsKeyId: String? = nil) { + self.kmsKeyId = kmsKeyId + } + + private enum CodingKeys: String, CodingKey { + case kmsKeyId = "kmsKeyId" + } + } + + public struct SourceDatabaseMetadata: AWSEncodableShape & AWSDecodableShape { + /// The capture tool of the source database metadata. + public let captureTool: CaptureTool + /// The type of the source database metadata. + public let type: SourceDatabase + + public init(captureTool: CaptureTool, type: SourceDatabase) { + self.captureTool = captureTool + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case captureTool = "captureTool" + case type = "type" + } + } + + public struct StartTestRunRequest: AWSEncodableShape { + /// The client token of the test run. + public let clientToken: String? + /// The tags of the test run. + public let tags: [String: String]? + /// The configuration ID of the test run. + public let testConfigurationId: String? + /// The test suite ID of the test run. + public let testSuiteId: String + + public init(clientToken: String? = StartTestRunRequest.idempotencyToken(), tags: [String: String]? = nil, testConfigurationId: String? = nil, testSuiteId: String) { + self.clientToken = clientToken + self.tags = tags + self.testConfigurationId = testConfigurationId + self.testSuiteId = testSuiteId + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[A-Za-z0-9\\-]{1,64}$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:).+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.testConfigurationId, name: "testConfigurationId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + try self.validate(self.testSuiteId, name: "testSuiteId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case tags = "tags" + case testConfigurationId = "testConfigurationId" + case testSuiteId = "testSuiteId" + } + } + + public struct StartTestRunResponse: AWSDecodableShape { + /// The test run ID of the test run. + public let testRunId: String + /// The test run status of the test run. + public let testRunStatus: TestRunStatus + + public init(testRunId: String, testRunStatus: TestRunStatus) { + self.testRunId = testRunId + self.testRunStatus = testRunStatus + } + + private enum CodingKeys: String, CodingKey { + case testRunId = "testRunId" + case testRunStatus = "testRunStatus" + } + } + + public struct Step: AWSEncodableShape & AWSDecodableShape { + /// The action of the step. + public let action: StepAction + /// The description of the step. + public let description: String? + /// The name of the step. + public let name: String + + public init(action: StepAction, description: String? = nil, name: String) { + self.action = action + self.description = description + self.name = name + } + + public func validate(name: String) throws { + try self.action.validate(name: "\(name).action") + try self.validate(self.description, name: "description", parent: name, max: 1000) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z][A-Za-z0-9_\\-]{1,59}$") + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case description = "description" + case name = "name" + } + } + + public struct TN3270: AWSEncodableShape & AWSDecodableShape { + /// The data set names of the TN3270 protocol. + public let exportDataSetNames: [String]? + /// The script of the TN3270 protocol. + public let script: Script + + public init(exportDataSetNames: [String]? = nil, script: Script) { + self.exportDataSetNames = exportDataSetNames + self.script = script + } + + public func validate(name: String) throws { + try self.exportDataSetNames?.forEach { + try validate($0, name: "exportDataSetNames[]", parent: name, pattern: "^\\S{1,100}$") + } + try self.script.validate(name: "\(name).script") + } + + private enum CodingKeys: String, CodingKey { + case exportDataSetNames = "exportDataSetNames" + case script = "script" + } + } + + public struct TN3270StepInput: AWSDecodableShape { + /// The export data set names of the TN3270 step input. + public let exportDataSetNames: [String]? + /// The properties of the TN3270 step input. + public let properties: MainframeActionProperties? + /// The resource of the TN3270 step input. + public let resource: MainframeResourceSummary + /// The script of the TN3270 step input. + public let script: ScriptSummary + + public init(exportDataSetNames: [String]? = nil, properties: MainframeActionProperties? = nil, resource: MainframeResourceSummary, script: ScriptSummary) { + self.exportDataSetNames = exportDataSetNames + self.properties = properties + self.resource = resource + self.script = script + } + + private enum CodingKeys: String, CodingKey { + case exportDataSetNames = "exportDataSetNames" + case properties = "properties" + case resource = "resource" + case script = "script" + } + } + + public struct TN3270StepOutput: AWSDecodableShape { + /// The data set details of the TN3270 step output. + public let dataSetDetails: [DataSet]? + /// The data set export location of the TN3270 step output. + public let dataSetExportLocation: String? + /// The output location of the TN3270 step output. + public let dmsOutputLocation: String? + /// The script output location of the TN3270 step output. + public let scriptOutputLocation: String + + public init(dataSetDetails: [DataSet]? = nil, dataSetExportLocation: String? = nil, dmsOutputLocation: String? = nil, scriptOutputLocation: String) { + self.dataSetDetails = dataSetDetails + self.dataSetExportLocation = dataSetExportLocation + self.dmsOutputLocation = dmsOutputLocation + self.scriptOutputLocation = scriptOutputLocation + } + + private enum CodingKeys: String, CodingKey { + case dataSetDetails = "dataSetDetails" + case dataSetExportLocation = "dataSetExportLocation" + case dmsOutputLocation = "dmsOutputLocation" + case scriptOutputLocation = "scriptOutputLocation" + } + } + + public struct TN3270Summary: AWSDecodableShape { + /// The step input of the TN3270 summary. + public let stepInput: TN3270StepInput + /// The step output of the TN3270 summary. + public let stepOutput: TN3270StepOutput? + + public init(stepInput: TN3270StepInput, stepOutput: TN3270StepOutput? = nil) { + self.stepInput = stepInput + self.stepOutput = stepOutput + } + + private enum CodingKeys: String, CodingKey { + case stepInput = "stepInput" + case stepOutput = "stepOutput" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the tag resource. + public let resourceArn: String + /// The tags of the resource. + public let tags: [String: String] + + public init(resourceArn: String, tags: [String: String]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + try container.encode(self.tags, forKey: .tags) + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+=,@.-]{0,1023}$") + try self.tags.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:).+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct TargetDatabaseMetadata: AWSEncodableShape & AWSDecodableShape { + /// The capture tool of the target database metadata. + public let captureTool: CaptureTool + /// The type of the target database metadata. + public let type: TargetDatabase + + public init(captureTool: CaptureTool, type: TargetDatabase) { + self.captureTool = captureTool + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case captureTool = "captureTool" + case type = "type" + } + } + + public struct TestCaseLatestVersion: AWSDecodableShape { + /// The status of the test case latest version. + public let status: TestCaseLifecycle + /// The status reason of the test case latest version. + public let statusReason: String? + /// The version of the test case latest version. + public let version: Int + + public init(status: TestCaseLifecycle, statusReason: String? = nil, version: Int) { + self.status = status + self.statusReason = statusReason + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + case statusReason = "statusReason" + case version = "version" + } + } + + public struct TestCaseRunSummary: AWSDecodableShape { + /// The run end time of the test case run summary. + public let runEndTime: Date? + /// The run start time of the test case run summary. + public let runStartTime: Date + /// The status of the test case run summary. + public let status: TestCaseRunStatus + /// The status reason of the test case run summary. + public let statusReason: String? + /// The test case id of the test case run summary. + public let testCaseId: String + /// The test case version of the test case run summary. + public let testCaseVersion: Int + /// The test run id of the test case run summary. + public let testRunId: String + + public init(runEndTime: Date? = nil, runStartTime: Date, status: TestCaseRunStatus, statusReason: String? = nil, testCaseId: String, testCaseVersion: Int, testRunId: String) { + self.runEndTime = runEndTime + self.runStartTime = runStartTime + self.status = status + self.statusReason = statusReason + self.testCaseId = testCaseId + self.testCaseVersion = testCaseVersion + self.testRunId = testRunId + } + + private enum CodingKeys: String, CodingKey { + case runEndTime = "runEndTime" + case runStartTime = "runStartTime" + case status = "status" + case statusReason = "statusReason" + case testCaseId = "testCaseId" + case testCaseVersion = "testCaseVersion" + case testRunId = "testRunId" + } + } + + public struct TestCaseSummary: AWSDecodableShape { + /// The creation time of the test case summary. + public let creationTime: Date + /// The last update time of the test case summary. + public let lastUpdateTime: Date + /// The latest version of the test case summary. + public let latestVersion: Int + /// The name of the test case summary. + public let name: String + /// The status of the test case summary. + public let status: TestCaseLifecycle + /// The status reason of the test case summary. + public let statusReason: String? + /// The test case Amazon Resource Name (ARN) of the test case summary. + public let testCaseArn: String + /// The test case ID of the test case summary. + public let testCaseId: String + + public init(creationTime: Date, lastUpdateTime: Date, latestVersion: Int, name: String, status: TestCaseLifecycle, statusReason: String? = nil, testCaseArn: String, testCaseId: String) { + self.creationTime = creationTime + self.lastUpdateTime = lastUpdateTime + self.latestVersion = latestVersion + self.name = name + self.status = status + self.statusReason = statusReason + self.testCaseArn = testCaseArn + self.testCaseId = testCaseId + } + + private enum CodingKeys: String, CodingKey { + case creationTime = "creationTime" + case lastUpdateTime = "lastUpdateTime" + case latestVersion = "latestVersion" + case name = "name" + case status = "status" + case statusReason = "statusReason" + case testCaseArn = "testCaseArn" + case testCaseId = "testCaseId" + } + } + + public struct TestConfigurationLatestVersion: AWSDecodableShape { + /// The status of the test configuration latest version. + public let status: TestConfigurationLifecycle + /// The status reason of the test configuration latest version. + public let statusReason: String? + /// The version of the test configuration latest version. + public let version: Int + + public init(status: TestConfigurationLifecycle, statusReason: String? = nil, version: Int) { + self.status = status + self.statusReason = statusReason + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + case statusReason = "statusReason" + case version = "version" + } + } + + public struct TestConfigurationSummary: AWSDecodableShape { + /// The creation time of the test configuration summary. + public let creationTime: Date + /// The last update time of the test configuration summary. + public let lastUpdateTime: Date + /// The latest version of the test configuration summary. + public let latestVersion: Int + /// The name of the test configuration summary. + public let name: String + /// The status of the test configuration summary. + public let status: TestConfigurationLifecycle + /// The status reason of the test configuration summary. + public let statusReason: String? + /// The test configuration ARN of the test configuration summary. + public let testConfigurationArn: String + /// The test configuration ID of the test configuration summary. + public let testConfigurationId: String + + public init(creationTime: Date, lastUpdateTime: Date, latestVersion: Int, name: String, status: TestConfigurationLifecycle, statusReason: String? = nil, testConfigurationArn: String, testConfigurationId: String) { + self.creationTime = creationTime + self.lastUpdateTime = lastUpdateTime + self.latestVersion = latestVersion + self.name = name + self.status = status + self.statusReason = statusReason + self.testConfigurationArn = testConfigurationArn + self.testConfigurationId = testConfigurationId + } + + private enum CodingKeys: String, CodingKey { + case creationTime = "creationTime" + case lastUpdateTime = "lastUpdateTime" + case latestVersion = "latestVersion" + case name = "name" + case status = "status" + case statusReason = "statusReason" + case testConfigurationArn = "testConfigurationArn" + case testConfigurationId = "testConfigurationId" + } + } + + public struct TestRunStepSummary: AWSDecodableShape { + /// The after step of the test run step summary. + public let afterStep: Bool? + /// The before step of the test run step summary. + public let beforeStep: Bool? + /// The run end time of the test run step summary. + public let runEndTime: Date? + /// The run start time of the test run step summary. + public let runStartTime: Date + /// The status of the test run step summary. + public let status: StepRunStatus + /// The status reason of the test run step summary. + public let statusReason: String? + /// The step name of the test run step summary. + public let stepName: String + /// The test case ID of the test run step summary. + public let testCaseId: String? + /// The test case version of the test run step summary. + public let testCaseVersion: Int? + /// The test run ID of the test run step summary. + public let testRunId: String + /// The test suite ID of the test run step summary. + public let testSuiteId: String? + /// The test suite version of the test run step summary. + public let testSuiteVersion: Int? + + public init(afterStep: Bool? = nil, beforeStep: Bool? = nil, runEndTime: Date? = nil, runStartTime: Date, status: StepRunStatus, statusReason: String? = nil, stepName: String, testCaseId: String? = nil, testCaseVersion: Int? = nil, testRunId: String, testSuiteId: String? = nil, testSuiteVersion: Int? = nil) { + self.afterStep = afterStep + self.beforeStep = beforeStep + self.runEndTime = runEndTime + self.runStartTime = runStartTime + self.status = status + self.statusReason = statusReason + self.stepName = stepName + self.testCaseId = testCaseId + self.testCaseVersion = testCaseVersion + self.testRunId = testRunId + self.testSuiteId = testSuiteId + self.testSuiteVersion = testSuiteVersion + } + + private enum CodingKeys: String, CodingKey { + case afterStep = "afterStep" + case beforeStep = "beforeStep" + case runEndTime = "runEndTime" + case runStartTime = "runStartTime" + case status = "status" + case statusReason = "statusReason" + case stepName = "stepName" + case testCaseId = "testCaseId" + case testCaseVersion = "testCaseVersion" + case testRunId = "testRunId" + case testSuiteId = "testSuiteId" + case testSuiteVersion = "testSuiteVersion" + } + } + + public struct TestRunSummary: AWSDecodableShape { + /// The run end time of the test run summary. + public let runEndTime: Date? + /// The run start time of the test run summary. + public let runStartTime: Date + /// The status of the test run summary. + public let status: TestRunStatus + /// The status reason of the test run summary. + public let statusReason: String? + /// The test configuration ID of the test run summary. + public let testConfigurationId: String? + /// The test configuration version of the test run summary. + public let testConfigurationVersion: Int? + /// The test run ARN of the test run summary. + public let testRunArn: String + /// The test run ID of the test run summary. + public let testRunId: String + /// The test suite ID of the test run summary. + public let testSuiteId: String + /// The test suite version of the test run summary. + public let testSuiteVersion: Int + + public init(runEndTime: Date? = nil, runStartTime: Date, status: TestRunStatus, statusReason: String? = nil, testConfigurationId: String? = nil, testConfigurationVersion: Int? = nil, testRunArn: String, testRunId: String, testSuiteId: String, testSuiteVersion: Int) { + self.runEndTime = runEndTime + self.runStartTime = runStartTime + self.status = status + self.statusReason = statusReason + self.testConfigurationId = testConfigurationId + self.testConfigurationVersion = testConfigurationVersion + self.testRunArn = testRunArn + self.testRunId = testRunId + self.testSuiteId = testSuiteId + self.testSuiteVersion = testSuiteVersion + } + + private enum CodingKeys: String, CodingKey { + case runEndTime = "runEndTime" + case runStartTime = "runStartTime" + case status = "status" + case statusReason = "statusReason" + case testConfigurationId = "testConfigurationId" + case testConfigurationVersion = "testConfigurationVersion" + case testRunArn = "testRunArn" + case testRunId = "testRunId" + case testSuiteId = "testSuiteId" + case testSuiteVersion = "testSuiteVersion" + } + } + + public struct TestSuiteLatestVersion: AWSDecodableShape { + /// The status of the test suite latest version. + public let status: TestSuiteLifecycle + /// The status reason of the test suite latest version. + public let statusReason: String? + /// The version of the test suite latest version. + public let version: Int + + public init(status: TestSuiteLifecycle, statusReason: String? = nil, version: Int) { + self.status = status + self.statusReason = statusReason + self.version = version + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + case statusReason = "statusReason" + case version = "version" + } + } + + public struct TestSuiteSummary: AWSDecodableShape { + /// The creation time of the test suite summary. + public let creationTime: Date + /// The last update time of the test suite summary. + public let lastUpdateTime: Date + /// The latest version of the test suite summary. + public let latestVersion: Int + /// The name of the test suite summary. + public let name: String + /// The status of the test suite summary. + public let status: TestSuiteLifecycle + /// The status reason of the test suite summary. + public let statusReason: String? + /// The test suite Amazon Resource Name (ARN) of the test suite summary. + public let testSuiteArn: String + /// The test suite ID of the test suite summary. + public let testSuiteId: String + + public init(creationTime: Date, lastUpdateTime: Date, latestVersion: Int, name: String, status: TestSuiteLifecycle, statusReason: String? = nil, testSuiteArn: String, testSuiteId: String) { + self.creationTime = creationTime + self.lastUpdateTime = lastUpdateTime + self.latestVersion = latestVersion + self.name = name + self.status = status + self.statusReason = statusReason + self.testSuiteArn = testSuiteArn + self.testSuiteId = testSuiteId + } + + private enum CodingKeys: String, CodingKey { + case creationTime = "creationTime" + case lastUpdateTime = "lastUpdateTime" + case latestVersion = "latestVersion" + case name = "name" + case status = "status" + case statusReason = "statusReason" + case testSuiteArn = "testSuiteArn" + case testSuiteId = "testSuiteId" + } + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + /// The tag keys of the resource. + public let tagKeys: [String] + + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + request.encodeQuery(self.tagKeys, key: "tagKeys") + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+=,@.-]{0,1023}$") + try self.tagKeys.forEach { + try validate($0, name: "tagKeys[]", parent: name, max: 128) + try validate($0, name: "tagKeys[]", parent: name, min: 1) + try validate($0, name: "tagKeys[]", parent: name, pattern: "^(?!aws:).+$") + } + } + + private enum CodingKeys: CodingKey {} + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateTestCaseRequest: AWSEncodableShape { + /// The description of the test case. + public let description: String? + /// The steps of the test case. + public let steps: [Step]? + /// The test case ID of the test case. + public let testCaseId: String + + public init(description: String? = nil, steps: [Step]? = nil, testCaseId: String) { + self.description = description + self.steps = steps + self.testCaseId = testCaseId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encodeIfPresent(self.steps, forKey: .steps) + request.encodePath(self.testCaseId, key: "testCaseId") + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 1000) + try self.steps?.forEach { + try $0.validate(name: "\(name).steps[]") + } + try self.validate(self.steps, name: "steps", parent: name, max: 20) + try self.validate(self.steps, name: "steps", parent: name, min: 1) + try self.validate(self.testCaseId, name: "testCaseId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case steps = "steps" + } + } + + public struct UpdateTestCaseResponse: AWSDecodableShape { + /// The test case ID of the test case. + public let testCaseId: String + /// The test case version of the test case. + public let testCaseVersion: Int + + public init(testCaseId: String, testCaseVersion: Int) { + self.testCaseId = testCaseId + self.testCaseVersion = testCaseVersion + } + + private enum CodingKeys: String, CodingKey { + case testCaseId = "testCaseId" + case testCaseVersion = "testCaseVersion" + } + } + + public struct UpdateTestConfigurationRequest: AWSEncodableShape { + /// The description of the test configuration. + public let description: String? + /// The properties of the test configuration. + public let properties: [String: String]? + /// The resources of the test configuration. + public let resources: [Resource]? + /// The service settings of the test configuration. + public let serviceSettings: ServiceSettings? + /// The test configuration ID of the test configuration. + public let testConfigurationId: String + + public init(description: String? = nil, properties: [String: String]? = nil, resources: [Resource]? = nil, serviceSettings: ServiceSettings? = nil, testConfigurationId: String) { + self.description = description + self.properties = properties + self.resources = resources + self.serviceSettings = serviceSettings + self.testConfigurationId = testConfigurationId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encodeIfPresent(self.properties, forKey: .properties) + try container.encodeIfPresent(self.resources, forKey: .resources) + try container.encodeIfPresent(self.serviceSettings, forKey: .serviceSettings) + request.encodePath(self.testConfigurationId, key: "testConfigurationId") + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 1000) + try self.resources?.forEach { + try $0.validate(name: "\(name).resources[]") + } + try self.validate(self.resources, name: "resources", parent: name, max: 20) + try self.validate(self.resources, name: "resources", parent: name, min: 1) + try self.validate(self.testConfigurationId, name: "testConfigurationId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case properties = "properties" + case resources = "resources" + case serviceSettings = "serviceSettings" + } + } + + public struct UpdateTestConfigurationResponse: AWSDecodableShape { + /// The configuration ID of the test configuration. + public let testConfigurationId: String + /// The configuration version of the test configuration. + public let testConfigurationVersion: Int + + public init(testConfigurationId: String, testConfigurationVersion: Int) { + self.testConfigurationId = testConfigurationId + self.testConfigurationVersion = testConfigurationVersion + } + + private enum CodingKeys: String, CodingKey { + case testConfigurationId = "testConfigurationId" + case testConfigurationVersion = "testConfigurationVersion" + } + } + + public struct UpdateTestSuiteRequest: AWSEncodableShape { + /// The after steps of the test suite. + public let afterSteps: [Step]? + /// The before steps for the test suite. + public let beforeSteps: [Step]? + /// The description of the test suite. + public let description: String? + /// The test cases in the test suite. + public let testCases: TestCases? + /// The test suite ID of the test suite. + public let testSuiteId: String + + public init(afterSteps: [Step]? = nil, beforeSteps: [Step]? = nil, description: String? = nil, testCases: TestCases? = nil, testSuiteId: String) { + self.afterSteps = afterSteps + self.beforeSteps = beforeSteps + self.description = description + self.testCases = testCases + self.testSuiteId = testSuiteId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.afterSteps, forKey: .afterSteps) + try container.encodeIfPresent(self.beforeSteps, forKey: .beforeSteps) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encodeIfPresent(self.testCases, forKey: .testCases) + request.encodePath(self.testSuiteId, key: "testSuiteId") + } + + public func validate(name: String) throws { + try self.afterSteps?.forEach { + try $0.validate(name: "\(name).afterSteps[]") + } + try self.validate(self.afterSteps, name: "afterSteps", parent: name, max: 20) + try self.validate(self.afterSteps, name: "afterSteps", parent: name, min: 1) + try self.beforeSteps?.forEach { + try $0.validate(name: "\(name).beforeSteps[]") + } + try self.validate(self.beforeSteps, name: "beforeSteps", parent: name, max: 20) + try self.validate(self.beforeSteps, name: "beforeSteps", parent: name, min: 1) + try self.validate(self.description, name: "description", parent: name, max: 1000) + try self.testCases?.validate(name: "\(name).testCases") + try self.validate(self.testSuiteId, name: "testSuiteId", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + + private enum CodingKeys: String, CodingKey { + case afterSteps = "afterSteps" + case beforeSteps = "beforeSteps" + case description = "description" + case testCases = "testCases" + } + } + + public struct UpdateTestSuiteResponse: AWSDecodableShape { + /// The test suite ID of the test suite. + public let testSuiteId: String + /// The test suite version of the test suite. + public let testSuiteVersion: Int? + + public init(testSuiteId: String, testSuiteVersion: Int? = nil) { + self.testSuiteId = testSuiteId + self.testSuiteVersion = testSuiteVersion + } + + private enum CodingKeys: String, CodingKey { + case testSuiteId = "testSuiteId" + case testSuiteVersion = "testSuiteVersion" + } + } + + public struct File: AWSDecodableShape { + /// The file type of the file. + public let fileType: CompareFileType? + + public init(fileType: CompareFileType? = nil) { + self.fileType = fileType + } + + private enum CodingKeys: String, CodingKey { + case fileType = "fileType" + } + } + + public struct Input: AWSEncodableShape & AWSDecodableShape { + /// The file in the input. + public let file: InputFile? + + public init(file: InputFile? = nil) { + self.file = file + } + + public func validate(name: String) throws { + try self.file?.validate(name: "\(name).file") + } + + private enum CodingKeys: String, CodingKey { + case file = "file" + } + } + + public struct Output: AWSEncodableShape & AWSDecodableShape { + /// The file of the output. + public let file: OutputFile? + + public init(file: OutputFile? = nil) { + self.file = file + } + + public func validate(name: String) throws { + try self.file?.validate(name: "\(name).file") + } + + private enum CodingKeys: String, CodingKey { + case file = "file" + } + } + + public struct TestCases: AWSEncodableShape & AWSDecodableShape { + /// The sequential of the test case. + public let sequential: [String]? + + public init(sequential: [String]? = nil) { + self.sequential = sequential + } + + public func validate(name: String) throws { + try self.sequential?.forEach { + try validate($0, name: "sequential[]", parent: name, pattern: "^[A-Za-z0-9:/\\-]{1,100}$") + } + } + + private enum CodingKeys: String, CodingKey { + case sequential = "sequential" + } + } +} + +// MARK: - Errors + +/// Error enum for AppTest +public struct AppTestErrorType: AWSErrorType { + enum Code: String { + case accessDeniedException = "AccessDeniedException" + case conflictException = "ConflictException" + case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" + case serviceQuotaExceededException = "ServiceQuotaExceededException" + case throttlingException = "ThrottlingException" + case validationException = "ValidationException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize AppTest + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// The account or role doesn't have the right permissions to make the request. + public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// The parameters provided in the request conflict with existing resources. + public static var conflictException: Self { .init(.conflictException) } + /// An unexpected error occurred during the processing of the request. + public static var internalServerException: Self { .init(.internalServerException) } + /// The specified resource was not found. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// One or more quotas for AWS Application Testing exceeds the limit. + public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } + /// The number of requests made exceeds the limit. + public static var throttlingException: Self { .init(.throttlingException) } + /// One or more parameter provided in the request is not valid. + public static var validationException: Self { .init(.validationException) } +} + +extension AppTestErrorType: Equatable { + public static func == (lhs: AppTestErrorType, rhs: AppTestErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension AppTestErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_api.swift b/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_api.swift new file mode 100644 index 0000000000..1181293b97 --- /dev/null +++ b/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_api.swift @@ -0,0 +1,437 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +@_exported import SotoCore + +/// Service object for interacting with AWS ApplicationSignals service. +/// +/// This is a Preview release of the Application Signals API Reference. Operations and parameters are subject to change before the general availability release. Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. The application-centric view provides you with unified visibility across your applications, services, and dependencies, so you can proactively monitor and efficiently triage any issues that may arise, ensuring optimal customer experience. Application Signals provides the following benefits: Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors. Create and monitor service level objectives (SLOs). See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity. +public struct ApplicationSignals: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the ApplicationSignals client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded + /// - timeout: Timeout value for HTTP requests + /// - byteBufferAllocator: Allocator for ByteBuffers + /// - options: Service options + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + middleware: AWSMiddlewareProtocol? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + serviceName: "ApplicationSignals", + serviceIdentifier: "application-signals", + serviceProtocol: .restjson, + apiVersion: "2024-04-15", + endpoint: endpoint, + errorType: ApplicationSignalsErrorType.self, + middleware: middleware, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + + + + + // MARK: API Calls + + /// Use this operation to retrieve one or more service level objective (SLO) budget reports. An error budget is the amount of time in unhealthy periods that your service can accumulate during an interval before your overall SLO budget health is breached and the SLO is considered to be unmet. For example, an SLO with a threshold of 99.95% and a monthly interval translates to an error budget of 21.9 minutes of downtime in a 30-day month. Budget reports include a health indicator, the attainment value, and remaining budget. For more information about SLO error budgets, see SLO concepts. + @Sendable + public func batchGetServiceLevelObjectiveBudgetReport(_ input: BatchGetServiceLevelObjectiveBudgetReportInput, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchGetServiceLevelObjectiveBudgetReportOutput { + return try await self.client.execute( + operation: "BatchGetServiceLevelObjectiveBudgetReport", + path: "/budget-report", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a service level objective (SLO), which can help you ensure that your critical business operations are meeting customer expectations. Use SLOs to set and track specific target levels for the reliability and availability of your applications and services. SLOs use service level indicators (SLIs) to calculate whether the application is performing at the level that you want. Create an SLO to set a target for a service or operation’s availability or latency. CloudWatch measures this target frequently you can find whether it has been breached. When you create an SLO, you set an attainment goal for it. An attainment goal is the ratio of good periods that meet the threshold requirements to the total periods within the interval. For example, an attainment goal of 99.9% means that within your interval, you are targeting 99.9% of the periods to be in healthy state. After you have created an SLO, you can retrieve error budget reports for it. An error budget is the number of periods or amount of time that your service can accumulate during an interval before your overall SLO budget health is breached and the SLO is considered to be unmet. for example, an SLO with a threshold that 99.95% of requests must be completed under 2000ms every month translates to an error budget of 21.9 minutes of downtime per month. When you call this operation, Application Signals creates the AWSServiceRoleForCloudWatchApplicationSignals service-linked role, if it doesn't already exist in your account. This service- linked role has the following permissions: xray:GetServiceGraph logs:StartQuery logs:GetQueryResults cloudwatch:GetMetricData cloudwatch:ListMetrics tag:GetResources autoscaling:DescribeAutoScalingGroups You can easily set SLO targets for your applications that are discovered by Application Signals, using critical metrics such as latency and availability. You can also set SLOs against any CloudWatch metric or math expression that produces a time series. For more information about SLOs, see Service level objectives (SLOs). + @Sendable + public func createServiceLevelObjective(_ input: CreateServiceLevelObjectiveInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateServiceLevelObjectiveOutput { + return try await self.client.execute( + operation: "CreateServiceLevelObjective", + path: "/slo", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes the specified service level objective. + @Sendable + public func deleteServiceLevelObjective(_ input: DeleteServiceLevelObjectiveInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteServiceLevelObjectiveOutput { + return try await self.client.execute( + operation: "DeleteServiceLevelObjective", + path: "/slo/{Id}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns information about a service discovered by Application Signals. + @Sendable + public func getService(_ input: GetServiceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetServiceOutput { + return try await self.client.execute( + operation: "GetService", + path: "/service", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns information about one SLO created in the account. + @Sendable + public func getServiceLevelObjective(_ input: GetServiceLevelObjectiveInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetServiceLevelObjectiveOutput { + return try await self.client.execute( + operation: "GetServiceLevelObjective", + path: "/slo/{Id}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of service dependencies of the service that you specify. A dependency is an infrastructure component that an operation of this service connects with. Dependencies can include Amazon Web Services services, Amazon Web Services resources, and third-party services. + @Sendable + public func listServiceDependencies(_ input: ListServiceDependenciesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListServiceDependenciesOutput { + return try await self.client.execute( + operation: "ListServiceDependencies", + path: "/service-dependencies", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns the list of dependents that invoked the specified service during the provided time range. Dependents include other services, CloudWatch Synthetics canaries, and clients that are instrumented with CloudWatch RUM app monitors. + @Sendable + public func listServiceDependents(_ input: ListServiceDependentsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListServiceDependentsOutput { + return try await self.client.execute( + operation: "ListServiceDependents", + path: "/service-dependents", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of SLOs created in this account. + @Sendable + public func listServiceLevelObjectives(_ input: ListServiceLevelObjectivesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListServiceLevelObjectivesOutput { + return try await self.client.execute( + operation: "ListServiceLevelObjectives", + path: "/slos", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of the operations of this service that have been discovered by Application Signals. Only the operations that were invoked during the specified time range are returned. + @Sendable + public func listServiceOperations(_ input: ListServiceOperationsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListServiceOperationsOutput { + return try await self.client.execute( + operation: "ListServiceOperations", + path: "/service-operations", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of services that have been discovered by Application Signals. A service represents a minimum logical and transactional unit that completes a business function. Services are discovered through Application Signals instrumentation. + @Sendable + public func listServices(_ input: ListServicesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListServicesOutput { + return try await self.client.execute( + operation: "ListServices", + path: "/services", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Displays the tags associated with a CloudWatch resource. Tags can be assigned to service level objectives. + @Sendable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + return try await self.client.execute( + operation: "ListTagsForResource", + path: "/tags", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Enables this Amazon Web Services account to be able to use CloudWatch Application Signals by creating the AWSServiceRoleForCloudWatchApplicationSignals service-linked role. This service- linked role has the following permissions: xray:GetServiceGraph logs:StartQuery logs:GetQueryResults cloudwatch:GetMetricData cloudwatch:ListMetrics tag:GetResources autoscaling:DescribeAutoScalingGroups After completing this step, you still need to instrument your Java and Python applications to send data to Application Signals. For more information, see Enabling Application Signals. + @Sendable + public func startDiscovery(_ input: StartDiscoveryInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartDiscoveryOutput { + return try await self.client.execute( + operation: "StartDiscovery", + path: "/start-discovery", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Assigns one or more tags (key-value pairs) to the specified CloudWatch resource, such as a service level objective. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters. You can use the TagResource action with an alarm that already has tags. If you specify a new tag key for the alarm, this tag is appended to the list of tags associated with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces the previous value for that tag. You can associate as many as 50 tags with a CloudWatch resource. + @Sendable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + return try await self.client.execute( + operation: "TagResource", + path: "/tag-resource", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Removes one or more tags from the specified resource. + @Sendable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + return try await self.client.execute( + operation: "UntagResource", + path: "/untag-resource", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates an existing service level objective (SLO). If you omit parameters, the previous values of those parameters are retained. + @Sendable + public func updateServiceLevelObjective(_ input: UpdateServiceLevelObjectiveInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateServiceLevelObjectiveOutput { + return try await self.client.execute( + operation: "UpdateServiceLevelObjective", + path: "/slo/{Id}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } +} + +extension ApplicationSignals { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: ApplicationSignals, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension ApplicationSignals { + /// Returns a list of service dependencies of the service that you specify. A dependency is an infrastructure component that an operation of this service connects with. Dependencies can include Amazon Web Services services, Amazon Web Services resources, and third-party services. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listServiceDependenciesPaginator( + _ input: ListServiceDependenciesInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listServiceDependencies, + inputKey: \ListServiceDependenciesInput.nextToken, + outputKey: \ListServiceDependenciesOutput.nextToken, + logger: logger + ) + } + + /// Returns the list of dependents that invoked the specified service during the provided time range. Dependents include other services, CloudWatch Synthetics canaries, and clients that are instrumented with CloudWatch RUM app monitors. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listServiceDependentsPaginator( + _ input: ListServiceDependentsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listServiceDependents, + inputKey: \ListServiceDependentsInput.nextToken, + outputKey: \ListServiceDependentsOutput.nextToken, + logger: logger + ) + } + + /// Returns a list of SLOs created in this account. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listServiceLevelObjectivesPaginator( + _ input: ListServiceLevelObjectivesInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listServiceLevelObjectives, + inputKey: \ListServiceLevelObjectivesInput.nextToken, + outputKey: \ListServiceLevelObjectivesOutput.nextToken, + logger: logger + ) + } + + /// Returns a list of the operations of this service that have been discovered by Application Signals. Only the operations that were invoked during the specified time range are returned. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listServiceOperationsPaginator( + _ input: ListServiceOperationsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listServiceOperations, + inputKey: \ListServiceOperationsInput.nextToken, + outputKey: \ListServiceOperationsOutput.nextToken, + logger: logger + ) + } + + /// Returns a list of services that have been discovered by Application Signals. A service represents a minimum logical and transactional unit that completes a business function. Services are discovered through Application Signals instrumentation. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listServicesPaginator( + _ input: ListServicesInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listServices, + inputKey: \ListServicesInput.nextToken, + outputKey: \ListServicesOutput.nextToken, + logger: logger + ) + } +} + +extension ApplicationSignals.ListServiceDependenciesInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> ApplicationSignals.ListServiceDependenciesInput { + return .init( + endTime: self.endTime, + keyAttributes: self.keyAttributes, + maxResults: self.maxResults, + nextToken: token, + startTime: self.startTime + ) + } +} + +extension ApplicationSignals.ListServiceDependentsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> ApplicationSignals.ListServiceDependentsInput { + return .init( + endTime: self.endTime, + keyAttributes: self.keyAttributes, + maxResults: self.maxResults, + nextToken: token, + startTime: self.startTime + ) + } +} + +extension ApplicationSignals.ListServiceLevelObjectivesInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> ApplicationSignals.ListServiceLevelObjectivesInput { + return .init( + keyAttributes: self.keyAttributes, + maxResults: self.maxResults, + nextToken: token, + operationName: self.operationName + ) + } +} + +extension ApplicationSignals.ListServiceOperationsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> ApplicationSignals.ListServiceOperationsInput { + return .init( + endTime: self.endTime, + keyAttributes: self.keyAttributes, + maxResults: self.maxResults, + nextToken: token, + startTime: self.startTime + ) + } +} + +extension ApplicationSignals.ListServicesInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> ApplicationSignals.ListServicesInput { + return .init( + endTime: self.endTime, + maxResults: self.maxResults, + nextToken: token, + startTime: self.startTime + ) + } +} diff --git a/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_shapes.swift b/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_shapes.swift new file mode 100644 index 0000000000..3fd287620a --- /dev/null +++ b/Sources/Soto/Services/ApplicationSignals/ApplicationSignals_shapes.swift @@ -0,0 +1,1528 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_spi(SotoInternal) import SotoCore + +extension ApplicationSignals { + // MARK: Enums + + public enum DurationUnit: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case day = "DAY" + case month = "MONTH" + public var description: String { return self.rawValue } + } + + public enum ServiceLevelIndicatorComparisonOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case greaterThan = "GreaterThan" + case greaterThanOrEqualTo = "GreaterThanOrEqualTo" + case lessThan = "LessThan" + case lessThanOrEqualTo = "LessThanOrEqualTo" + public var description: String { return self.rawValue } + } + + public enum ServiceLevelIndicatorMetricType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case availability = "AVAILABILITY" + case latency = "LATENCY" + public var description: String { return self.rawValue } + } + + public enum ServiceLevelObjectiveBudgetStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case breached = "BREACHED" + case insufficientData = "INSUFFICIENT_DATA" + case ok = "OK" + case warning = "WARNING" + public var description: String { return self.rawValue } + } + + public enum StandardUnit: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case bits = "Bits" + case bitsSecond = "Bits/Second" + case bytes = "Bytes" + case bytesSecond = "Bytes/Second" + case count = "Count" + case countSecond = "Count/Second" + case gigabits = "Gigabits" + case gigabitsSecond = "Gigabits/Second" + case gigabytes = "Gigabytes" + case gigabytesSecond = "Gigabytes/Second" + case kilobits = "Kilobits" + case kilobitsSecond = "Kilobits/Second" + case kilobytes = "Kilobytes" + case kilobytesSecond = "Kilobytes/Second" + case megabits = "Megabits" + case megabitsSecond = "Megabits/Second" + case megabytes = "Megabytes" + case megabytesSecond = "Megabytes/Second" + case microseconds = "Microseconds" + case milliseconds = "Milliseconds" + case none = "None" + case percent = "Percent" + case seconds = "Seconds" + case terabits = "Terabits" + case terabitsSecond = "Terabits/Second" + case terabytes = "Terabytes" + case terabytesSecond = "Terabytes/Second" + public var description: String { return self.rawValue } + } + + public enum Interval: AWSEncodableShape & AWSDecodableShape, Sendable { + /// If the interval is a calendar interval, this structure contains the interval specifications. + case calendarInterval(CalendarInterval) + /// If the interval is a rolling interval, this structure contains the interval specifications. + case rollingInterval(RollingInterval) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .calendarInterval: + let value = try container.decode(CalendarInterval.self, forKey: .calendarInterval) + self = .calendarInterval(value) + case .rollingInterval: + let value = try container.decode(RollingInterval.self, forKey: .rollingInterval) + self = .rollingInterval(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .calendarInterval(let value): + try container.encode(value, forKey: .calendarInterval) + case .rollingInterval(let value): + try container.encode(value, forKey: .rollingInterval) + } + } + + public func validate(name: String) throws { + switch self { + case .calendarInterval(let value): + try value.validate(name: "\(name).calendarInterval") + case .rollingInterval(let value): + try value.validate(name: "\(name).rollingInterval") + } + } + + private enum CodingKeys: String, CodingKey { + case calendarInterval = "CalendarInterval" + case rollingInterval = "RollingInterval" + } + } + + // MARK: Shapes + + public struct BatchGetServiceLevelObjectiveBudgetReportInput: AWSEncodableShape { + /// An array containing the IDs of the service level objectives that you want to include in the report. + public let sloIds: [String] + /// The date and time that you want the report to be for. It is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. + public let timestamp: Date + + public init(sloIds: [String], timestamp: Date) { + self.sloIds = sloIds + self.timestamp = timestamp + } + + public func validate(name: String) throws { + try self.validate(self.sloIds, name: "sloIds", parent: name, max: 50) + try self.validate(self.sloIds, name: "sloIds", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case sloIds = "SloIds" + case timestamp = "Timestamp" + } + } + + public struct BatchGetServiceLevelObjectiveBudgetReportOutput: AWSDecodableShape { + /// An array of structures, where each structure includes an error indicating that one of the requests in the array was not valid. + public let errors: [ServiceLevelObjectiveBudgetReportError] + /// An array of structures, where each structure is one budget report. + public let reports: [ServiceLevelObjectiveBudgetReport] + /// The date and time that the report is for. It is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. + public let timestamp: Date + + public init(errors: [ServiceLevelObjectiveBudgetReportError], reports: [ServiceLevelObjectiveBudgetReport], timestamp: Date) { + self.errors = errors + self.reports = reports + self.timestamp = timestamp + } + + private enum CodingKeys: String, CodingKey { + case errors = "Errors" + case reports = "Reports" + case timestamp = "Timestamp" + } + } + + public struct CalendarInterval: AWSEncodableShape & AWSDecodableShape { + /// Specifies the duration of each calendar interval. For example, if Duration is 1 and DurationUnit is MONTH, each interval is one month, aligned with the calendar. + public let duration: Int + /// Specifies the calendar interval unit. + public let durationUnit: DurationUnit + /// The date and time when you want the first interval to start. Be sure to choose a time that configures the intervals the way that you want. For example, if you want weekly intervals starting on Mondays at 6 a.m., be sure to specify a start time that is a Monday at 6 a.m. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 As soon as one calendar interval ends, another automatically begins. + public let startTime: Date + + public init(duration: Int, durationUnit: DurationUnit, startTime: Date) { + self.duration = duration + self.durationUnit = durationUnit + self.startTime = startTime + } + + public func validate(name: String) throws { + try self.validate(self.duration, name: "duration", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case duration = "Duration" + case durationUnit = "DurationUnit" + case startTime = "StartTime" + } + } + + public struct CreateServiceLevelObjectiveInput: AWSEncodableShape { + /// An optional description for this SLO. + public let description: String? + /// A structure that contains the attributes that determine the goal of the SLO. This includes the time period for evaluation and the attainment threshold. + public let goal: Goal? + /// A name for this SLO. + public let name: String + /// A structure that contains information about what service and what performance metric that this SLO will monitor. + public let sliConfig: ServiceLevelIndicatorConfig + /// A list of key-value pairs to associate with the SLO. You can associate as many as 50 tags with an SLO. To be able to associate tags with the SLO when you create the SLO, you must have the cloudwatch:TagResource permission. Tags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values. + public let tags: [Tag]? + + public init(description: String? = nil, goal: Goal? = nil, name: String, sliConfig: ServiceLevelIndicatorConfig, tags: [Tag]? = nil) { + self.description = description + self.goal = goal + self.name = name + self.sliConfig = sliConfig + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 1024) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.goal?.validate(name: "\(name).goal") + try self.validate(self.name, name: "name", parent: name, pattern: "^[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$") + try self.sliConfig.validate(name: "\(name).sliConfig") + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case description = "Description" + case goal = "Goal" + case name = "Name" + case sliConfig = "SliConfig" + case tags = "Tags" + } + } + + public struct CreateServiceLevelObjectiveOutput: AWSDecodableShape { + /// A structure that contains information about the SLO that you just created. + public let slo: ServiceLevelObjective + + public init(slo: ServiceLevelObjective) { + self.slo = slo + } + + private enum CodingKeys: String, CodingKey { + case slo = "Slo" + } + } + + public struct DeleteServiceLevelObjectiveInput: AWSEncodableShape { + /// The ARN or name of the service level objective to delete. + public let id: String + + public init(id: String) { + self.id = id + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.id, key: "Id") + } + + public func validate(name: String) throws { + try self.validate(self.id, name: "id", parent: name, pattern: "^[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$|^arn:aws:application-signals:[^:]*:[^:]*:slo/[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteServiceLevelObjectiveOutput: AWSDecodableShape { + public init() {} + } + + public struct Dimension: AWSEncodableShape & AWSDecodableShape { + /// The name of the dimension. Dimension names must contain only ASCII characters, must include at least one non-whitespace character, and cannot start with a colon (:). ASCII control characters are not supported as part of dimension names. + public let name: String + /// The value of the dimension. Dimension values must contain only ASCII characters and must include at least one non-whitespace character. ASCII control characters are not supported as part of dimension values. + public let value: String + + public init(name: String, value: String) { + self.name = name + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 255) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.value, name: "value", parent: name, max: 1024) + try self.validate(self.value, name: "value", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case name = "Name" + case value = "Value" + } + } + + public struct GetServiceInput: AWSEncodableShape { + /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let endTime: Date + /// Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let keyAttributes: [String: String] + /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let startTime: Date + + public init(endTime: Date, keyAttributes: [String: String], startTime: Date) { + self.endTime = endTime + self.keyAttributes = keyAttributes + self.startTime = startTime + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.endTime, key: "EndTime") + try container.encode(self.keyAttributes, forKey: .keyAttributes) + request.encodeQuery(self.startTime, key: "StartTime") + } + + public func validate(name: String) throws { + try self.keyAttributes.forEach { + try validate($0.key, name: "keyAttributes.key", parent: name, pattern: "^[a-zA-Z]{1,50}$") + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, max: 1024) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, min: 1) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, pattern: "^[ -~]*[!-~]+[ -~]*$") + } + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, max: 3) + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case keyAttributes = "KeyAttributes" + } + } + + public struct GetServiceLevelObjectiveInput: AWSEncodableShape { + /// The ARN or name of the SLO that you want to retrieve information about. You can find the ARNs of SLOs by using the ListServiceLevelObjectives operation. + public let id: String + + public init(id: String) { + self.id = id + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.id, key: "Id") + } + + public func validate(name: String) throws { + try self.validate(self.id, name: "id", parent: name, pattern: "^[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$|^arn:aws:application-signals:[^:]*:[^:]*:slo/[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetServiceLevelObjectiveOutput: AWSDecodableShape { + /// A structure containing the information about the SLO. + public let slo: ServiceLevelObjective + + public init(slo: ServiceLevelObjective) { + self.slo = slo + } + + private enum CodingKeys: String, CodingKey { + case slo = "Slo" + } + } + + public struct GetServiceOutput: AWSDecodableShape { + /// The end time of the data included in the response. In a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057. + public let endTime: Date + /// A structure containing information about the service. + public let service: Service + /// The start time of the data included in the response. In a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057. + public let startTime: Date + + public init(endTime: Date, service: Service, startTime: Date) { + self.endTime = endTime + self.service = service + self.startTime = startTime + } + + private enum CodingKeys: String, CodingKey { + case endTime = "EndTime" + case service = "Service" + case startTime = "StartTime" + } + } + + public struct Goal: AWSEncodableShape & AWSDecodableShape { + /// The threshold that determines if the goal is being met. An attainment goal is the ratio of good periods that meet the threshold requirements to the total periods within the interval. For example, an attainment goal of 99.9% means that within your interval, you are targeting 99.9% of the periods to be in healthy state. If you omit this parameter, 99 is used to represent 99% as the attainment goal. + public let attainmentGoal: Double? + /// The time period used to evaluate the SLO. It can be either a calendar interval or rolling interval. If you omit this parameter, a rolling interval of 7 days is used. + public let interval: Interval? + /// The percentage of remaining budget over total budget that you want to get warnings for. If you omit this parameter, the default of 50.0 is used. + public let warningThreshold: Double? + + public init(attainmentGoal: Double? = nil, interval: Interval? = nil, warningThreshold: Double? = nil) { + self.attainmentGoal = attainmentGoal + self.interval = interval + self.warningThreshold = warningThreshold + } + + public func validate(name: String) throws { + try self.interval?.validate(name: "\(name).interval") + } + + private enum CodingKeys: String, CodingKey { + case attainmentGoal = "AttainmentGoal" + case interval = "Interval" + case warningThreshold = "WarningThreshold" + } + } + + public struct ListServiceDependenciesInput: AWSEncodableShape { + /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let endTime: Date + /// Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let keyAttributes: [String: String] + /// The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used. + public let maxResults: Int? + /// Include this value, if it was returned by the previous operation, to get the next set of service dependencies. + public let nextToken: String? + /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let startTime: Date + + public init(endTime: Date, keyAttributes: [String: String], maxResults: Int? = nil, nextToken: String? = nil, startTime: Date) { + self.endTime = endTime + self.keyAttributes = keyAttributes + self.maxResults = maxResults + self.nextToken = nextToken + self.startTime = startTime + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.endTime, key: "EndTime") + try container.encode(self.keyAttributes, forKey: .keyAttributes) + request.encodeQuery(self.maxResults, key: "MaxResults") + request.encodeQuery(self.nextToken, key: "NextToken") + request.encodeQuery(self.startTime, key: "StartTime") + } + + public func validate(name: String) throws { + try self.keyAttributes.forEach { + try validate($0.key, name: "keyAttributes.key", parent: name, pattern: "^[a-zA-Z]{1,50}$") + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, max: 1024) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, min: 1) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, pattern: "^[ -~]*[!-~]+[ -~]*$") + } + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, max: 3) + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case keyAttributes = "KeyAttributes" + } + } + + public struct ListServiceDependenciesOutput: AWSDecodableShape { + /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let endTime: Date + /// Include this value in your next use of this API to get next set of service dependencies. + public let nextToken: String? + /// An array, where each object in the array contains information about one of the dependencies of this service. + public let serviceDependencies: [ServiceDependency] + /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let startTime: Date + + public init(endTime: Date, nextToken: String? = nil, serviceDependencies: [ServiceDependency], startTime: Date) { + self.endTime = endTime + self.nextToken = nextToken + self.serviceDependencies = serviceDependencies + self.startTime = startTime + } + + private enum CodingKeys: String, CodingKey { + case endTime = "EndTime" + case nextToken = "NextToken" + case serviceDependencies = "ServiceDependencies" + case startTime = "StartTime" + } + } + + public struct ListServiceDependentsInput: AWSEncodableShape { + /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let endTime: Date + /// Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let keyAttributes: [String: String] + /// The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used. + public let maxResults: Int? + /// Include this value, if it was returned by the previous operation, to get the next set of service dependents. + public let nextToken: String? + /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let startTime: Date + + public init(endTime: Date, keyAttributes: [String: String], maxResults: Int? = nil, nextToken: String? = nil, startTime: Date) { + self.endTime = endTime + self.keyAttributes = keyAttributes + self.maxResults = maxResults + self.nextToken = nextToken + self.startTime = startTime + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.endTime, key: "EndTime") + try container.encode(self.keyAttributes, forKey: .keyAttributes) + request.encodeQuery(self.maxResults, key: "MaxResults") + request.encodeQuery(self.nextToken, key: "NextToken") + request.encodeQuery(self.startTime, key: "StartTime") + } + + public func validate(name: String) throws { + try self.keyAttributes.forEach { + try validate($0.key, name: "keyAttributes.key", parent: name, pattern: "^[a-zA-Z]{1,50}$") + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, max: 1024) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, min: 1) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, pattern: "^[ -~]*[!-~]+[ -~]*$") + } + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, max: 3) + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case keyAttributes = "KeyAttributes" + } + } + + public struct ListServiceDependentsOutput: AWSDecodableShape { + /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let endTime: Date + /// Include this value in your next use of this API to get next set of service dependents. + public let nextToken: String? + /// An array, where each object in the array contains information about one of the dependents of this service. + public let serviceDependents: [ServiceDependent] + /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let startTime: Date + + public init(endTime: Date, nextToken: String? = nil, serviceDependents: [ServiceDependent], startTime: Date) { + self.endTime = endTime + self.nextToken = nextToken + self.serviceDependents = serviceDependents + self.startTime = startTime + } + + private enum CodingKeys: String, CodingKey { + case endTime = "EndTime" + case nextToken = "NextToken" + case serviceDependents = "ServiceDependents" + case startTime = "StartTime" + } + } + + public struct ListServiceLevelObjectivesInput: AWSEncodableShape { + /// You can use this optional field to specify which services you want to retrieve SLO information for. This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let keyAttributes: [String: String]? + /// The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used. + public let maxResults: Int? + /// Include this value, if it was returned by the previous operation, to get the next set of service level objectives. + public let nextToken: String? + /// The name of the operation that this SLO is associated with. + public let operationName: String? + + public init(keyAttributes: [String: String]? = nil, maxResults: Int? = nil, nextToken: String? = nil, operationName: String? = nil) { + self.keyAttributes = keyAttributes + self.maxResults = maxResults + self.nextToken = nextToken + self.operationName = operationName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.keyAttributes, forKey: .keyAttributes) + request.encodeQuery(self.maxResults, key: "MaxResults") + request.encodeQuery(self.nextToken, key: "NextToken") + request.encodeQuery(self.operationName, key: "OperationName") + } + + public func validate(name: String) throws { + try self.keyAttributes?.forEach { + try validate($0.key, name: "keyAttributes.key", parent: name, pattern: "^[a-zA-Z]{1,50}$") + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, max: 1024) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, min: 1) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, pattern: "^[ -~]*[!-~]+[ -~]*$") + } + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, max: 3) + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.operationName, name: "operationName", parent: name, max: 255) + try self.validate(self.operationName, name: "operationName", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case keyAttributes = "KeyAttributes" + } + } + + public struct ListServiceLevelObjectivesOutput: AWSDecodableShape { + /// Include this value in your next use of this API to get next set of service level objectives. + public let nextToken: String? + /// An array of structures, where each structure contains information about one SLO. + public let sloSummaries: [ServiceLevelObjectiveSummary]? + + public init(nextToken: String? = nil, sloSummaries: [ServiceLevelObjectiveSummary]? = nil) { + self.nextToken = nextToken + self.sloSummaries = sloSummaries + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case sloSummaries = "SloSummaries" + } + } + + public struct ListServiceOperationsInput: AWSEncodableShape { + /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let endTime: Date + /// Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let keyAttributes: [String: String] + /// The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used. + public let maxResults: Int? + /// Include this value, if it was returned by the previous operation, to get the next set of service operations. + public let nextToken: String? + /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let startTime: Date + + public init(endTime: Date, keyAttributes: [String: String], maxResults: Int? = nil, nextToken: String? = nil, startTime: Date) { + self.endTime = endTime + self.keyAttributes = keyAttributes + self.maxResults = maxResults + self.nextToken = nextToken + self.startTime = startTime + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.endTime, key: "EndTime") + try container.encode(self.keyAttributes, forKey: .keyAttributes) + request.encodeQuery(self.maxResults, key: "MaxResults") + request.encodeQuery(self.nextToken, key: "NextToken") + request.encodeQuery(self.startTime, key: "StartTime") + } + + public func validate(name: String) throws { + try self.keyAttributes.forEach { + try validate($0.key, name: "keyAttributes.key", parent: name, pattern: "^[a-zA-Z]{1,50}$") + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, max: 1024) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, min: 1) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, pattern: "^[ -~]*[!-~]+[ -~]*$") + } + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, max: 3) + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case keyAttributes = "KeyAttributes" + } + } + + public struct ListServiceOperationsOutput: AWSDecodableShape { + /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let endTime: Date + /// Include this value in your next use of this API to get next set of service operations. + public let nextToken: String? + /// An array of structures that each contain information about one operation of this service. + public let serviceOperations: [ServiceOperation] + /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let startTime: Date + + public init(endTime: Date, nextToken: String? = nil, serviceOperations: [ServiceOperation], startTime: Date) { + self.endTime = endTime + self.nextToken = nextToken + self.serviceOperations = serviceOperations + self.startTime = startTime + } + + private enum CodingKeys: String, CodingKey { + case endTime = "EndTime" + case nextToken = "NextToken" + case serviceOperations = "ServiceOperations" + case startTime = "StartTime" + } + } + + public struct ListServicesInput: AWSEncodableShape { + /// The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let endTime: Date + /// The maximum number of results to return in one operation. If you omit this parameter, the default of 50 is used. + public let maxResults: Int? + /// Include this value, if it was returned by the previous operation, to get the next set of services. + public let nextToken: String? + /// The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let startTime: Date + + public init(endTime: Date, maxResults: Int? = nil, nextToken: String? = nil, startTime: Date) { + self.endTime = endTime + self.maxResults = maxResults + self.nextToken = nextToken + self.startTime = startTime + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.endTime, key: "EndTime") + request.encodeQuery(self.maxResults, key: "MaxResults") + request.encodeQuery(self.nextToken, key: "NextToken") + request.encodeQuery(self.startTime, key: "StartTime") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListServicesOutput: AWSDecodableShape { + /// The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let endTime: Date + /// Include this value in your next use of this API to get next set of services. + public let nextToken: String? + /// An array of structures, where each structure contains some information about a service. To get complete information about a service, use GetService. + public let serviceSummaries: [ServiceSummary] + /// The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + public let startTime: Date + + public init(endTime: Date, nextToken: String? = nil, serviceSummaries: [ServiceSummary], startTime: Date) { + self.endTime = endTime + self.nextToken = nextToken + self.serviceSummaries = serviceSummaries + self.startTime = startTime + } + + private enum CodingKeys: String, CodingKey { + case endTime = "EndTime" + case nextToken = "NextToken" + case serviceSummaries = "ServiceSummaries" + case startTime = "StartTime" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the CloudWatch resource that you want to view tags for. The ARN format of an Application Signals SLO is arn:aws:cloudwatch:Region:account-id:slo:slo-name For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference. + public let resourceArn: String + + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.resourceArn, key: "ResourceArn") + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1024) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// The list of tag keys and values associated with the resource you specified. + public let tags: [Tag]? + + public init(tags: [Tag]? = nil) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "Tags" + } + } + + public struct Metric: AWSEncodableShape & AWSDecodableShape { + /// An array of one or more dimensions to use to define the metric that you want to use. For more information, see Dimensions. + public let dimensions: [Dimension]? + /// The name of the metric to use. + public let metricName: String? + /// The namespace of the metric. For more information, see Namespaces. + public let namespace: String? + + public init(dimensions: [Dimension]? = nil, metricName: String? = nil, namespace: String? = nil) { + self.dimensions = dimensions + self.metricName = metricName + self.namespace = namespace + } + + public func validate(name: String) throws { + try self.dimensions?.forEach { + try $0.validate(name: "\(name).dimensions[]") + } + try self.validate(self.dimensions, name: "dimensions", parent: name, max: 30) + try self.validate(self.metricName, name: "metricName", parent: name, max: 255) + try self.validate(self.metricName, name: "metricName", parent: name, min: 1) + try self.validate(self.namespace, name: "namespace", parent: name, max: 255) + try self.validate(self.namespace, name: "namespace", parent: name, min: 1) + try self.validate(self.namespace, name: "namespace", parent: name, pattern: "[^:].*") + } + + private enum CodingKeys: String, CodingKey { + case dimensions = "Dimensions" + case metricName = "MetricName" + case namespace = "Namespace" + } + } + + public struct MetricDataQuery: AWSEncodableShape & AWSDecodableShape { + /// The ID of the account where this metric is located. If you are performing this operatiion in a monitoring account, use this to specify which source account to retrieve this metric from. + public let accountId: String? + /// This field can contain a metric math expression to be performed on the other metrics that you are retrieving within this MetricDataQueries structure. A math expression can use the Id of the other metrics or queries to refer to those metrics, and can also use the Id of other expressions to use the result of those expressions. For more information about metric math expressions, see Metric Math Syntax and Functions in the Amazon CloudWatch User Guide. Within each MetricDataQuery object, you must specify either Expression or MetricStat but not both. + public let expression: String? + /// A short name used to tie this object to the results in the response. This Id must be unique within a MetricDataQueries array. If you are performing math expressions on this set of data, this name represents that data and can serve as a variable in the metric math expression. The valid characters are letters, numbers, and underscore. The first character must be a lowercase letter. + public let id: String + /// A human-readable label for this metric or expression. This is especially useful if this is an expression, so that you know what the value represents. If the metric or expression is shown in a CloudWatch dashboard widget, the label is shown. If Label is omitted, CloudWatch generates a default. You can put dynamic expressions into a label, so that it is more descriptive. For more information, see Using Dynamic Labels. + public let label: String? + /// A metric to be used directly for the SLO, or to be used in the math expression that will be used for the SLO. Within one MetricDataQuery object, you must specify either Expression or MetricStat but not both. + public let metricStat: MetricStat? + /// The granularity, in seconds, of the returned data points for this metric. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second. If the StartTime parameter specifies a time stamp that is greater than 3 hours ago, you must specify the period as follows or no data points in that time range is returned: Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 minute). Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes). Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour). + public let period: Int? + /// Use this only if you are using a metric math expression for the SLO. Specify true for ReturnData for only the one expression result to use as the alarm. For all other metrics and expressions in the same CreateServiceLevelObjective operation, specify ReturnData as false. + public let returnData: Bool? + + public init(accountId: String? = nil, expression: String? = nil, id: String, label: String? = nil, metricStat: MetricStat? = nil, period: Int? = nil, returnData: Bool? = nil) { + self.accountId = accountId + self.expression = expression + self.id = id + self.label = label + self.metricStat = metricStat + self.period = period + self.returnData = returnData + } + + public func validate(name: String) throws { + try self.validate(self.accountId, name: "accountId", parent: name, max: 255) + try self.validate(self.accountId, name: "accountId", parent: name, min: 1) + try self.validate(self.expression, name: "expression", parent: name, max: 2048) + try self.validate(self.expression, name: "expression", parent: name, min: 1) + try self.validate(self.id, name: "id", parent: name, max: 255) + try self.validate(self.id, name: "id", parent: name, min: 1) + try self.metricStat?.validate(name: "\(name).metricStat") + try self.validate(self.period, name: "period", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case accountId = "AccountId" + case expression = "Expression" + case id = "Id" + case label = "Label" + case metricStat = "MetricStat" + case period = "Period" + case returnData = "ReturnData" + } + } + + public struct MetricReference: AWSDecodableShape { + /// An array of one or more dimensions that further define the metric. For more information, see CloudWatchDimensions. + public let dimensions: [Dimension]? + /// The name of the metric. + public let metricName: String + /// Used to display the appropriate statistics in the CloudWatch console. + public let metricType: String + /// The namespace of the metric. For more information, see CloudWatchNamespaces. + public let namespace: String + + public init(dimensions: [Dimension]? = nil, metricName: String, metricType: String, namespace: String) { + self.dimensions = dimensions + self.metricName = metricName + self.metricType = metricType + self.namespace = namespace + } + + private enum CodingKeys: String, CodingKey { + case dimensions = "Dimensions" + case metricName = "MetricName" + case metricType = "MetricType" + case namespace = "Namespace" + } + } + + public struct MetricStat: AWSEncodableShape & AWSDecodableShape { + /// The metric to use as the service level indicator, including the metric name, namespace, and dimensions. + public let metric: Metric + /// The granularity, in seconds, to be used for the metric. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second. + public let period: Int + /// The statistic to use for comparison to the threshold. It can be any CloudWatch statistic or extended statistic. For more information about statistics, see CloudWatch statistics definitions. + public let stat: String + /// If you omit Unit then all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions. + public let unit: StandardUnit? + + public init(metric: Metric, period: Int, stat: String, unit: StandardUnit? = nil) { + self.metric = metric + self.period = period + self.stat = stat + self.unit = unit + } + + public func validate(name: String) throws { + try self.metric.validate(name: "\(name).metric") + try self.validate(self.period, name: "period", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case metric = "Metric" + case period = "Period" + case stat = "Stat" + case unit = "Unit" + } + } + + public struct RollingInterval: AWSEncodableShape & AWSDecodableShape { + /// Specifies the duration of each rolling interval. For example, if Duration is 7 and DurationUnit is DAY, each rolling interval is seven days. + public let duration: Int + /// Specifies the rolling interval unit. + public let durationUnit: DurationUnit + + public init(duration: Int, durationUnit: DurationUnit) { + self.duration = duration + self.durationUnit = durationUnit + } + + public func validate(name: String) throws { + try self.validate(self.duration, name: "duration", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case duration = "Duration" + case durationUnit = "DurationUnit" + } + } + + public struct Service: AWSDecodableShape { + /// This structure contains one or more string-to-string maps that help identify this service. It can include platform attributes, application attributes, and telemetry attributes. Platform attributes contain information the service's platform. PlatformType defines the hosted-in platform. EKS.Cluster is the name of the Amazon EKS cluster. K8s.Cluster is the name of the self-hosted Kubernetes cluster. K8s.Namespace is the name of the Kubernetes namespace in either Amazon EKS or Kubernetes clusters. K8s.Workload is the name of the Kubernetes workload in either Amazon EKS or Kubernetes clusters. K8s.Node is the name of the Kubernetes node in either Amazon EKS or Kubernetes clusters. K8s.Pod is the name of the Kubernetes pod in either Amazon EKS or Kubernetes clusters. EC2.AutoScalingGroup is the name of the Amazon EC2 Auto Scaling group. EC2.InstanceId is the ID of the Amazon EC2 instance. Host is the name of the host, for all platform types. Applciation attributes contain information about the application. AWS.Application is the application's name in Amazon Web Services Service Catalog AppRegistry. AWS.Application.ARN is the application's ARN in Amazon Web Services Service Catalog AppRegistry. Telemetry attributes contain telemetry information. Telemetry.SDK is the fingerprint of the OpenTelemetry SDK version for instrumented services. Telemetry.Agent is the fingerprint of the agent used to collect and send telemetry data. Telemetry.Source Specifies the point of application where the telemetry was collected or specifies what was used for the source of telemetry data. + public let attributeMaps: [[String: String]]? + /// This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let keyAttributes: [String: String] + /// An array of structures that each contain information about one metric associated with this service. + public let metricReferences: [MetricReference] + + public init(attributeMaps: [[String: String]]? = nil, keyAttributes: [String: String], metricReferences: [MetricReference]) { + self.attributeMaps = attributeMaps + self.keyAttributes = keyAttributes + self.metricReferences = metricReferences + } + + private enum CodingKeys: String, CodingKey { + case attributeMaps = "AttributeMaps" + case keyAttributes = "KeyAttributes" + case metricReferences = "MetricReferences" + } + } + + public struct ServiceDependency: AWSDecodableShape { + /// This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let dependencyKeyAttributes: [String: String] + /// The name of the called operation in the dependency. + public let dependencyOperationName: String + /// An array of structures that each contain information about one metric associated with this service dependency that was discovered by Application Signals. + public let metricReferences: [MetricReference] + /// The name of the operation in this service that calls the dependency. + public let operationName: String + + public init(dependencyKeyAttributes: [String: String], dependencyOperationName: String, metricReferences: [MetricReference], operationName: String) { + self.dependencyKeyAttributes = dependencyKeyAttributes + self.dependencyOperationName = dependencyOperationName + self.metricReferences = metricReferences + self.operationName = operationName + } + + private enum CodingKeys: String, CodingKey { + case dependencyKeyAttributes = "DependencyKeyAttributes" + case dependencyOperationName = "DependencyOperationName" + case metricReferences = "MetricReferences" + case operationName = "OperationName" + } + } + + public struct ServiceDependent: AWSDecodableShape { + /// This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let dependentKeyAttributes: [String: String] + /// If the dependent invoker was a service that invoked it from an operation, the name of that dependent operation is displayed here. + public let dependentOperationName: String? + /// An array of structures that each contain information about one metric associated with this service dependent that was discovered by Application Signals. + public let metricReferences: [MetricReference] + /// If the invoked entity is an operation on an entity, the name of that dependent operation is displayed here. + public let operationName: String? + + public init(dependentKeyAttributes: [String: String], dependentOperationName: String? = nil, metricReferences: [MetricReference], operationName: String? = nil) { + self.dependentKeyAttributes = dependentKeyAttributes + self.dependentOperationName = dependentOperationName + self.metricReferences = metricReferences + self.operationName = operationName + } + + private enum CodingKeys: String, CodingKey { + case dependentKeyAttributes = "DependentKeyAttributes" + case dependentOperationName = "DependentOperationName" + case metricReferences = "MetricReferences" + case operationName = "OperationName" + } + } + + public struct ServiceLevelIndicator: AWSDecodableShape { + /// The arithmetic operation used when comparing the specified metric to the threshold. + public let comparisonOperator: ServiceLevelIndicatorComparisonOperator + /// The value that the SLI metric is compared to. + public let metricThreshold: Double + /// A structure that contains information about the metric that the SLO monitors. + public let sliMetric: ServiceLevelIndicatorMetric + + public init(comparisonOperator: ServiceLevelIndicatorComparisonOperator, metricThreshold: Double, sliMetric: ServiceLevelIndicatorMetric) { + self.comparisonOperator = comparisonOperator + self.metricThreshold = metricThreshold + self.sliMetric = sliMetric + } + + private enum CodingKeys: String, CodingKey { + case comparisonOperator = "ComparisonOperator" + case metricThreshold = "MetricThreshold" + case sliMetric = "SliMetric" + } + } + + public struct ServiceLevelIndicatorConfig: AWSEncodableShape { + /// The arithmetic operation to use when comparing the specified metric to the threshold. + public let comparisonOperator: ServiceLevelIndicatorComparisonOperator + /// The value that the SLI metric is compared to. + public let metricThreshold: Double + /// Use this structure to specify the metric to be used for the SLO. + public let sliMetricConfig: ServiceLevelIndicatorMetricConfig + + public init(comparisonOperator: ServiceLevelIndicatorComparisonOperator, metricThreshold: Double, sliMetricConfig: ServiceLevelIndicatorMetricConfig) { + self.comparisonOperator = comparisonOperator + self.metricThreshold = metricThreshold + self.sliMetricConfig = sliMetricConfig + } + + public func validate(name: String) throws { + try self.sliMetricConfig.validate(name: "\(name).sliMetricConfig") + } + + private enum CodingKeys: String, CodingKey { + case comparisonOperator = "ComparisonOperator" + case metricThreshold = "MetricThreshold" + case sliMetricConfig = "SliMetricConfig" + } + } + + public struct ServiceLevelIndicatorMetric: AWSDecodableShape { + /// This is a string-to-string map that contains information about the type of object that this SLO is related to. It can include the following fields. Type designates the type of object that this SLO is related to. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let keyAttributes: [String: String]? + /// If this SLO monitors a CloudWatch metric or the result of a CloudWatch metric math expression, this structure includes the information about that metric or expression. + public let metricDataQueries: [MetricDataQuery] + /// If the SLO monitors either the LATENCY or AVAILABILITY metric that Application Signals collects, this field displays which of those metrics is used. + public let metricType: ServiceLevelIndicatorMetricType? + /// If the SLO monitors a specific operation of the service, this field displays that operation name. + public let operationName: String? + + public init(keyAttributes: [String: String]? = nil, metricDataQueries: [MetricDataQuery], metricType: ServiceLevelIndicatorMetricType? = nil, operationName: String? = nil) { + self.keyAttributes = keyAttributes + self.metricDataQueries = metricDataQueries + self.metricType = metricType + self.operationName = operationName + } + + private enum CodingKeys: String, CodingKey { + case keyAttributes = "KeyAttributes" + case metricDataQueries = "MetricDataQueries" + case metricType = "MetricType" + case operationName = "OperationName" + } + } + + public struct ServiceLevelIndicatorMetricConfig: AWSEncodableShape { + /// If this SLO is related to a metric collected by Application Signals, you must use this field to specify which service the SLO metric is related to. To do so, you must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let keyAttributes: [String: String]? + /// If this SLO monitors a CloudWatch metric or the result of a CloudWatch metric math expression, use this structure to specify that metric or expression. + public let metricDataQueries: [MetricDataQuery]? + /// If the SLO is to monitor either the LATENCY or AVAILABILITY metric that Application Signals collects, use this field to specify which of those metrics is used. + public let metricType: ServiceLevelIndicatorMetricType? + /// If the SLO is to monitor a specific operation of the service, use this field to specify the name of that operation. + public let operationName: String? + /// The number of seconds to use as the period for SLO evaluation. Your application's performance is compared to the SLI during each period. For each period, the application is determined to have either achieved or not achieved the necessary performance. + public let periodSeconds: Int? + /// The statistic to use for comparison to the threshold. It can be any CloudWatch statistic or extended statistic. For more information about statistics, see CloudWatch statistics definitions. + public let statistic: String? + + public init(keyAttributes: [String: String]? = nil, metricDataQueries: [MetricDataQuery]? = nil, metricType: ServiceLevelIndicatorMetricType? = nil, operationName: String? = nil, periodSeconds: Int? = nil, statistic: String? = nil) { + self.keyAttributes = keyAttributes + self.metricDataQueries = metricDataQueries + self.metricType = metricType + self.operationName = operationName + self.periodSeconds = periodSeconds + self.statistic = statistic + } + + public func validate(name: String) throws { + try self.keyAttributes?.forEach { + try validate($0.key, name: "keyAttributes.key", parent: name, pattern: "^[a-zA-Z]{1,50}$") + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, max: 1024) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, min: 1) + try validate($0.value, name: "keyAttributes[\"\($0.key)\"]", parent: name, pattern: "^[ -~]*[!-~]+[ -~]*$") + } + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, max: 3) + try self.validate(self.keyAttributes, name: "keyAttributes", parent: name, min: 1) + try self.metricDataQueries?.forEach { + try $0.validate(name: "\(name).metricDataQueries[]") + } + try self.validate(self.operationName, name: "operationName", parent: name, max: 255) + try self.validate(self.operationName, name: "operationName", parent: name, min: 1) + try self.validate(self.periodSeconds, name: "periodSeconds", parent: name, max: 900) + try self.validate(self.periodSeconds, name: "periodSeconds", parent: name, min: 60) + try self.validate(self.statistic, name: "statistic", parent: name, max: 20) + try self.validate(self.statistic, name: "statistic", parent: name, min: 1) + try self.validate(self.statistic, name: "statistic", parent: name, pattern: "^[a-zA-Z0-9.]+$") + } + + private enum CodingKeys: String, CodingKey { + case keyAttributes = "KeyAttributes" + case metricDataQueries = "MetricDataQueries" + case metricType = "MetricType" + case operationName = "OperationName" + case periodSeconds = "PeriodSeconds" + case statistic = "Statistic" + } + } + + public struct ServiceLevelObjective: AWSDecodableShape { + /// The ARN of this SLO. + public let arn: String + /// The date and time that this SLO was created. When used in a raw HTTP Query API, it is formatted as yyyy-MM-dd'T'HH:mm:ss. For example, 2019-07-01T23:59:59. + public let createdTime: Date + /// The description that you created for this SLO. + public let description: String? + public let goal: Goal + /// The time that this SLO was most recently updated. When used in a raw HTTP Query API, it is formatted as yyyy-MM-dd'T'HH:mm:ss. For example, 2019-07-01T23:59:59. + public let lastUpdatedTime: Date + /// The name of this SLO. + public let name: String + /// A structure containing information about the performance metric that this SLO monitors. + public let sli: ServiceLevelIndicator + + public init(arn: String, createdTime: Date, description: String? = nil, goal: Goal, lastUpdatedTime: Date, name: String, sli: ServiceLevelIndicator) { + self.arn = arn + self.createdTime = createdTime + self.description = description + self.goal = goal + self.lastUpdatedTime = lastUpdatedTime + self.name = name + self.sli = sli + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case createdTime = "CreatedTime" + case description = "Description" + case goal = "Goal" + case lastUpdatedTime = "LastUpdatedTime" + case name = "Name" + case sli = "Sli" + } + } + + public struct ServiceLevelObjectiveBudgetReport: AWSDecodableShape { + /// The ARN of the SLO that this report is for. + public let arn: String + /// A number between 0 and 100 that represents the percentage of time periods that the service has attained the SLO's attainment goal, as of the time of the request. + public let attainment: Double? + /// The budget amount remaining before the SLO status becomes BREACHING, at the time specified in the Timestemp parameter of the request. If this value is negative, then the SLO is already in BREACHING status. + public let budgetSecondsRemaining: Int? + /// The status of this SLO, as it relates to the error budget for the entire time interval. OK means that the SLO had remaining budget above the warning threshold, as of the time that you specified in TimeStamp. WARNING means that the SLO's remaining budget was below the warning threshold, as of the time that you specified in TimeStamp. BREACHED means that the SLO's budget was exhausted, as of the time that you specified in TimeStamp. INSUFFICIENT_DATA means that the specifed start and end times were before the SLO was created, or that attainment data is missing. + public let budgetStatus: ServiceLevelObjectiveBudgetStatus + public let goal: Goal? + /// The name of the SLO that this report is for. + public let name: String + /// A structure that contains information about the performance metric that this SLO monitors. + public let sli: ServiceLevelIndicator? + /// The total number of seconds in the error budget for the interval. + public let totalBudgetSeconds: Int? + + public init(arn: String, attainment: Double? = nil, budgetSecondsRemaining: Int? = nil, budgetStatus: ServiceLevelObjectiveBudgetStatus, goal: Goal? = nil, name: String, sli: ServiceLevelIndicator? = nil, totalBudgetSeconds: Int? = nil) { + self.arn = arn + self.attainment = attainment + self.budgetSecondsRemaining = budgetSecondsRemaining + self.budgetStatus = budgetStatus + self.goal = goal + self.name = name + self.sli = sli + self.totalBudgetSeconds = totalBudgetSeconds + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case attainment = "Attainment" + case budgetSecondsRemaining = "BudgetSecondsRemaining" + case budgetStatus = "BudgetStatus" + case goal = "Goal" + case name = "Name" + case sli = "Sli" + case totalBudgetSeconds = "TotalBudgetSeconds" + } + } + + public struct ServiceLevelObjectiveBudgetReportError: AWSDecodableShape { + /// The ARN of the SLO that this error is related to. + public let arn: String + /// The error code for this error. + public let errorCode: String + /// The message for this error. + public let errorMessage: String + /// The name of the SLO that this error is related to. + public let name: String + + public init(arn: String, errorCode: String, errorMessage: String, name: String) { + self.arn = arn + self.errorCode = errorCode + self.errorMessage = errorMessage + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case errorCode = "ErrorCode" + case errorMessage = "ErrorMessage" + case name = "Name" + } + } + + public struct ServiceLevelObjectiveSummary: AWSDecodableShape { + /// The ARN of this service level objective. + public let arn: String + /// The date and time that this service level objective was created. It is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC. + public let createdTime: Date? + /// This is a string-to-string map. It can include the following fields. Type designates the type of object this service level objective is for. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let keyAttributes: [String: String]? + /// The name of the service level objective. + public let name: String + /// If this service level objective is specific to a single operation, this field displays the name of that operation. + public let operationName: String? + + public init(arn: String, createdTime: Date? = nil, keyAttributes: [String: String]? = nil, name: String, operationName: String? = nil) { + self.arn = arn + self.createdTime = createdTime + self.keyAttributes = keyAttributes + self.name = name + self.operationName = operationName + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case createdTime = "CreatedTime" + case keyAttributes = "KeyAttributes" + case name = "Name" + case operationName = "OperationName" + } + } + + public struct ServiceOperation: AWSDecodableShape { + /// An array of structures that each contain information about one metric associated with this service operation that was discovered by Application Signals. + public let metricReferences: [MetricReference] + /// The name of the operation, discovered by Application Signals. + public let name: String + + public init(metricReferences: [MetricReference], name: String) { + self.metricReferences = metricReferences + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case metricReferences = "MetricReferences" + case name = "Name" + } + } + + public struct ServiceSummary: AWSDecodableShape { + /// This structure contains one or more string-to-string maps that help identify this service. It can include platform attributes, application attributes, and telemetry attributes. Platform attributes contain information the service's platform. PlatformType defines the hosted-in platform. EKS.Cluster is the name of the Amazon EKS cluster. K8s.Cluster is the name of the self-hosted Kubernetes cluster. K8s.Namespace is the name of the Kubernetes namespace in either Amazon EKS or Kubernetes clusters. K8s.Workload is the name of the Kubernetes workload in either Amazon EKS or Kubernetes clusters. K8s.Node is the name of the Kubernetes node in either Amazon EKS or Kubernetes clusters. K8s.Pod is the name of the Kubernetes pod in either Amazon EKS or Kubernetes clusters. EC2.AutoScalingGroup is the name of the Amazon EC2 Auto Scaling group. EC2.InstanceId is the ID of the Amazon EC2 instance. Host is the name of the host, for all platform types. Applciation attributes contain information about the application. AWS.Application is the application's name in Amazon Web Services Service Catalog AppRegistry. AWS.Application.ARN is the application's ARN in Amazon Web Services Service Catalog AppRegistry. Telemetry attributes contain telemetry information. Telemetry.SDK is the fingerprint of the OpenTelemetry SDK version for instrumented services. Telemetry.Agent is the fingerprint of the agent used to collect and send telemetry data. Telemetry.Source Specifies the point of application where the telemetry was collected or specifies what was used for the source of telemetry data. + public let attributeMaps: [[String: String]]? + /// This is a string-to-string map that help identify the objects discovered by Application Signals. It can include the following fields. Type designates the type of object this is. ResourceType specifies the type of the resource. This field is used only when the value of the Type field is Resource or AWS::Resource. Name specifies the name of the object. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. Identifier identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. Environment specifies the location where this object is hosted, or what it belongs to. + public let keyAttributes: [String: String] + /// An array of structures that each contain information about one metric associated with this service. + public let metricReferences: [MetricReference] + + public init(attributeMaps: [[String: String]]? = nil, keyAttributes: [String: String], metricReferences: [MetricReference]) { + self.attributeMaps = attributeMaps + self.keyAttributes = keyAttributes + self.metricReferences = metricReferences + } + + private enum CodingKeys: String, CodingKey { + case attributeMaps = "AttributeMaps" + case keyAttributes = "KeyAttributes" + case metricReferences = "MetricReferences" + } + } + + public struct StartDiscoveryInput: AWSEncodableShape { + public init() {} + } + + public struct StartDiscoveryOutput: AWSDecodableShape { + public init() {} + } + + public struct Tag: AWSEncodableShape & AWSDecodableShape { + /// A string that you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources. + public let key: String + /// The value for the specified tag key. + public let value: String + + public init(key: String, value: String) { + self.key = key + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.key, name: "key", parent: name, max: 128) + try self.validate(self.key, name: "key", parent: name, min: 1) + try self.validate(self.value, name: "value", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case key = "Key" + case value = "Value" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the CloudWatch resource that you want to set tags for. The ARN format of an Application Signals SLO is arn:aws:cloudwatch:Region:account-id:slo:slo-name For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference. + public let resourceArn: String + /// The list of key-value pairs to associate with the alarm. + public let tags: [Tag] + + public init(resourceArn: String, tags: [Tag]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1024) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) + try self.tags.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + case tags = "Tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the CloudWatch resource that you want to delete tags from. The ARN format of an Application Signals SLO is arn:aws:cloudwatch:Region:account-id:slo:slo-name For more information about ARN format, see Resource Types Defined by Amazon CloudWatch in the Amazon Web Services General Reference. + public let resourceArn: String + /// The list of tag keys to remove from the resource. + public let tagKeys: [String] + + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1024) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) + try self.tagKeys.forEach { + try validate($0, name: "tagKeys[]", parent: name, max: 128) + try validate($0, name: "tagKeys[]", parent: name, min: 1) + } + try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + case tagKeys = "TagKeys" + } + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateServiceLevelObjectiveInput: AWSEncodableShape { + /// An optional description for the SLO. + public let description: String? + /// A structure that contains the attributes that determine the goal of the SLO. This includes the time period for evaluation and the attainment threshold. + public let goal: Goal? + /// The Amazon Resource Name (ARN) or name of the service level objective that you want to update. + public let id: String + /// A structure that contains information about what performance metric this SLO will monitor. + public let sliConfig: ServiceLevelIndicatorConfig? + + public init(description: String? = nil, goal: Goal? = nil, id: String, sliConfig: ServiceLevelIndicatorConfig? = nil) { + self.description = description + self.goal = goal + self.id = id + self.sliConfig = sliConfig + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encodeIfPresent(self.goal, forKey: .goal) + request.encodePath(self.id, key: "Id") + try container.encodeIfPresent(self.sliConfig, forKey: .sliConfig) + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 1024) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.goal?.validate(name: "\(name).goal") + try self.validate(self.id, name: "id", parent: name, pattern: "^[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$|^arn:aws:application-signals:[^:]*:[^:]*:slo/[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$") + try self.sliConfig?.validate(name: "\(name).sliConfig") + } + + private enum CodingKeys: String, CodingKey { + case description = "Description" + case goal = "Goal" + case sliConfig = "SliConfig" + } + } + + public struct UpdateServiceLevelObjectiveOutput: AWSDecodableShape { + /// A structure that contains information about the SLO that you just updated. + public let slo: ServiceLevelObjective + + public init(slo: ServiceLevelObjective) { + self.slo = slo + } + + private enum CodingKeys: String, CodingKey { + case slo = "Slo" + } + } +} + +// MARK: - Errors + +/// Error enum for ApplicationSignals +public struct ApplicationSignalsErrorType: AWSErrorType { + enum Code: String { + case accessDeniedException = "AccessDeniedException" + case conflictException = "ConflictException" + case resourceNotFoundException = "ResourceNotFoundException" + case serviceQuotaExceededException = "ServiceQuotaExceededException" + case throttlingException = "ThrottlingException" + case validationException = "ValidationException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize ApplicationSignals + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// You don't have sufficient permissions to perform this action. + public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// This operation attempted to create a resource that already exists. + public static var conflictException: Self { .init(.conflictException) } + /// Resource not found. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// This request exceeds a service quota. + public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } + /// The request was throttled because of quota limits. + public static var throttlingException: Self { .init(.throttlingException) } + /// The resource is not valid. + public static var validationException: Self { .init(.validationException) } +} + +extension ApplicationSignalsErrorType: Equatable { + public static func == (lhs: ApplicationSignalsErrorType, rhs: ApplicationSignalsErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension ApplicationSignalsErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/Artifact/Artifact_shapes.swift b/Sources/Soto/Services/Artifact/Artifact_shapes.swift index 2fb2110b07..7109b4c0cb 100644 --- a/Sources/Soto/Services/Artifact/Artifact_shapes.swift +++ b/Sources/Soto/Services/Artifact/Artifact_shapes.swift @@ -385,6 +385,8 @@ extension Artifact { } public struct ReportSummary: AWSDecodableShape { + /// Acceptance type for report. + public let acceptanceType: AcceptanceType? /// ARN for the report resource. public let arn: String? /// Category for the report resource. @@ -416,7 +418,8 @@ extension Artifact { /// Version for the report resource. public let version: Int64? - public init(arn: String? = nil, category: String? = nil, companyName: String? = nil, description: String? = nil, id: String? = nil, name: String? = nil, periodEnd: Date? = nil, periodStart: Date? = nil, productName: String? = nil, series: String? = nil, state: PublishedState? = nil, statusMessage: String? = nil, uploadState: UploadState? = nil, version: Int64? = nil) { + public init(acceptanceType: AcceptanceType? = nil, arn: String? = nil, category: String? = nil, companyName: String? = nil, description: String? = nil, id: String? = nil, name: String? = nil, periodEnd: Date? = nil, periodStart: Date? = nil, productName: String? = nil, series: String? = nil, state: PublishedState? = nil, statusMessage: String? = nil, uploadState: UploadState? = nil, version: Int64? = nil) { + self.acceptanceType = acceptanceType self.arn = arn self.category = category self.companyName = companyName @@ -434,6 +437,7 @@ extension Artifact { } private enum CodingKeys: String, CodingKey { + case acceptanceType = "acceptanceType" case arn = "arn" case category = "category" case companyName = "companyName" diff --git a/Sources/Soto/Services/Athena/Athena_api.swift b/Sources/Soto/Services/Athena/Athena_api.swift index 3ab84a68fa..f78ff9e2fe 100644 --- a/Sources/Soto/Services/Athena/Athena_api.swift +++ b/Sources/Soto/Services/Athena/Athena_api.swift @@ -519,7 +519,7 @@ public struct Athena: AWSService { ) } - /// Returns query execution runtime statistics related to a single execution of a query if you have access to the workgroup in which the query ran. Query execution runtime statistics are returned only when QueryExecutionStatus$State is in a SUCCEEDED or FAILED state. Stage-level input and output row count and data size statistics are not shown when a query has row-level filters defined in Lake Formation. + /// Returns query execution runtime statistics related to a single execution of a query if you have access to the workgroup in which the query ran. Statistics from the Timeline section of the response object are available as soon as QueryExecutionStatus$State is in a SUCCEEDED or FAILED state. The remaining non-timeline statistics in the response (like stage-level input and output row count and data size) are updated asynchronously and may not be available immediately after a query completes. The non-timeline statistics are also not included when a query has row-level filters defined in Lake Formation. @Sendable public func getQueryRuntimeStatistics(_ input: GetQueryRuntimeStatisticsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetQueryRuntimeStatisticsOutput { return try await self.client.execute( diff --git a/Sources/Soto/Services/Athena/Athena_shapes.swift b/Sources/Soto/Services/Athena/Athena_shapes.swift index 826264787d..6b685fca15 100644 --- a/Sources/Soto/Services/Athena/Athena_shapes.swift +++ b/Sources/Soto/Services/Athena/Athena_shapes.swift @@ -782,7 +782,7 @@ extension Athena { try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") try self.validate(self.name, name: "name", parent: name, max: 255) try self.validate(self.name, name: "name", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+$") + try self.validate(self.name, name: "name", parent: name, pattern: "^(?!.*[/:\\\\])[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+$") try self.validate(self.workGroup, name: "workGroup", parent: name, pattern: "^[a-zA-Z0-9._-]{1,128}$") } @@ -1334,7 +1334,7 @@ extension Athena { public func validate(name: String) throws { try self.validate(self.name, name: "name", parent: name, max: 255) try self.validate(self.name, name: "name", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+$") + try self.validate(self.name, name: "name", parent: name, pattern: "^(?!.*[/:\\\\])[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+$") } private enum CodingKeys: String, CodingKey { @@ -2052,7 +2052,7 @@ extension Athena { try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") try self.validate(self.name, name: "name", parent: name, max: 255) try self.validate(self.name, name: "name", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+$") + try self.validate(self.name, name: "name", parent: name, pattern: "^(?!.*[/:\\\\])[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+$") try self.validate(self.notebookS3LocationUri, name: "notebookS3LocationUri", parent: name, max: 1024) try self.validate(self.notebookS3LocationUri, name: "notebookS3LocationUri", parent: name, pattern: "^(https|s3|S3)://([^/]+)/?(.*)$") try self.validate(self.payload, name: "payload", parent: name, max: 10485760) @@ -3340,7 +3340,7 @@ extension Athena { public let encryptionConfiguration: EncryptionConfiguration? /// The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, Athena uses the value for ExpectedBucketOwner when it makes Amazon S3 calls to your specified output location. If the ExpectedBucketOwner Amazon Web Services account ID does not match the actual owner of the Amazon S3 bucket, the call fails with a permissions error. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the ExpectedBucketOwner setting that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings. public let expectedBucketOwner: String? - /// The location in Amazon S3 where your query and calculation results are stored, such as s3://path/to/query/bucket/. To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration. If none of them is set, Athena issues an error that no output location is provided. For more information, see Working with query results, recent queries, and output files. If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + /// The location in Amazon S3 where your query and calculation results are stored, such as s3://path/to/query/bucket/. To run the query, you must specify the query results location using one of the ways: either for individual queries using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration. If none of them is set, Athena issues an error that no output location is provided. If workgroup settings override client-side settings, then the query uses the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. public let outputLocation: String? public init(aclConfiguration: AclConfiguration? = nil, encryptionConfiguration: EncryptionConfiguration? = nil, expectedBucketOwner: String? = nil, outputLocation: String? = nil) { @@ -3371,7 +3371,7 @@ extension Athena { public let encryptionConfiguration: EncryptionConfiguration? /// The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, Athena uses the value for ExpectedBucketOwner when it makes Amazon S3 calls to your specified output location. If the ExpectedBucketOwner Amazon Web Services account ID does not match the actual owner of the Amazon S3 bucket, the call fails with a permissions error. If workgroup settings override client-side settings, then the query uses the ExpectedBucketOwner setting that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings. public let expectedBucketOwner: String? - /// The location in Amazon S3 where your query and calculation results are stored, such as s3://path/to/query/bucket/. For more information, see Working with query results, recent queries, and output files. If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The "workgroup settings override" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + /// The location in Amazon S3 where your query and calculation results are stored, such as s3://path/to/query/bucket/. If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The "workgroup settings override" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. public let outputLocation: String? /// If set to true, indicates that the previously-specified ACL configuration for queries in this workgroup should be ignored and set to null. If set to false or not set, and a value is present in the AclConfiguration of ResultConfigurationUpdates, the AclConfiguration in the workgroup's ResultConfiguration is updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings. public let removeAclConfiguration: Bool? @@ -4240,7 +4240,7 @@ extension Athena { try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") try self.validate(self.name, name: "name", parent: name, max: 255) try self.validate(self.name, name: "name", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+$") + try self.validate(self.name, name: "name", parent: name, pattern: "^(?!.*[/:\\\\])[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+$") try self.validate(self.notebookId, name: "notebookId", parent: name, max: 36) try self.validate(self.notebookId, name: "notebookId", parent: name, min: 1) try self.validate(self.notebookId, name: "notebookId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") @@ -4392,7 +4392,7 @@ extension Athena { public let queryResultsS3AccessGrantsConfiguration: QueryResultsS3AccessGrantsConfiguration? /// If set to true, allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to false, workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is false. For more information about Requester Pays buckets, see Requester Pays Buckets in the Amazon Simple Storage Service Developer Guide. public let requesterPaysEnabled: Bool? - /// The configuration for the workgroup, which includes the location in Amazon S3 where query and calculation results are stored and the encryption option, if any, used for query and calculation results. To run the query, you must specify the query results location using one of the ways: either in the workgroup using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation. If none of them is set, Athena issues an error that no output location is provided. For more information, see Working with query results, recent queries, and output files. + /// The configuration for the workgroup, which includes the location in Amazon S3 where query and calculation results are stored and the encryption option, if any, used for query and calculation results. To run the query, you must specify the query results location using one of the ways: either in the workgroup using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation. If none of them is set, Athena issues an error that no output location is provided. public let resultConfiguration: ResultConfiguration? public init(additionalConfiguration: String? = nil, bytesScannedCutoffPerQuery: Int64? = nil, customerContentEncryptionConfiguration: CustomerContentEncryptionConfiguration? = nil, enableMinimumEncryptionConfiguration: Bool? = nil, enforceWorkGroupConfiguration: Bool? = nil, engineVersion: EngineVersion? = nil, executionRole: String? = nil, identityCenterConfiguration: IdentityCenterConfiguration? = nil, publishCloudWatchMetricsEnabled: Bool? = nil, queryResultsS3AccessGrantsConfiguration: QueryResultsS3AccessGrantsConfiguration? = nil, requesterPaysEnabled: Bool? = nil, resultConfiguration: ResultConfiguration? = nil) { diff --git a/Sources/Soto/Services/AuditManager/AuditManager_api.swift b/Sources/Soto/Services/AuditManager/AuditManager_api.swift index 1b609f207e..881f86362d 100644 --- a/Sources/Soto/Services/AuditManager/AuditManager_api.swift +++ b/Sources/Soto/Services/AuditManager/AuditManager_api.swift @@ -525,7 +525,7 @@ public struct AuditManager: AWSService { ) } - /// Gets a list of all of the Amazon Web Services that you can choose to include in your assessment. When you create an assessment, specify which of these services you want to include to narrow the assessment's scope. + /// Gets a list of the Amazon Web Services from which Audit Manager can collect evidence. Audit Manager defines which Amazon Web Services are in scope for an assessment. Audit Manager infers this scope by examining the assessment’s controls and their data sources, and then mapping this information to one or more of the corresponding Amazon Web Services that are in this list. For information about why it's no longer possible to specify services in scope manually, see I can't edit the services in scope for my assessment in the Troubleshooting section of the Audit Manager user guide. @Sendable public func getServicesInScope(_ input: GetServicesInScopeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetServicesInScopeResponse { return try await self.client.execute( @@ -616,7 +616,7 @@ public struct AuditManager: AWSService { ) } - /// Lists the latest analytics data for control domains across all of your active assessments. A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that control domain. + /// Lists the latest analytics data for control domains across all of your active assessments. Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference. A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that control domain. @Sendable public func listControlDomainInsights(_ input: ListControlDomainInsightsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListControlDomainInsightsResponse { return try await self.client.execute( @@ -629,7 +629,7 @@ public struct AuditManager: AWSService { ) } - /// Lists analytics data for control domains within a specified active assessment. A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that domain. + /// Lists analytics data for control domains within a specified active assessment. Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference. A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that domain. @Sendable public func listControlDomainInsightsByAssessment(_ input: ListControlDomainInsightsByAssessmentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListControlDomainInsightsByAssessmentResponse { return try await self.client.execute( @@ -668,7 +668,7 @@ public struct AuditManager: AWSService { ) } - /// Returns a list of keywords that are pre-mapped to the specified control data source. + /// Returns a list of keywords that are pre-mapped to the specified control data source. @Sendable public func listKeywordsForDataSource(_ input: ListKeywordsForDataSourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListKeywordsForDataSourceResponse { return try await self.client.execute( @@ -1093,7 +1093,7 @@ extension AuditManager { ) } - /// Lists the latest analytics data for control domains across all of your active assessments. A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that control domain. + /// Lists the latest analytics data for control domains across all of your active assessments. Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference. A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that control domain. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -1112,7 +1112,7 @@ extension AuditManager { ) } - /// Lists analytics data for control domains within a specified active assessment. A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that domain. + /// Lists analytics data for control domains within a specified active assessment. Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference. A control domain is listed only if at least one of the controls within that domain collected evidence on the lastUpdated date of controlDomainInsights. If this condition isn’t met, no data is listed for that domain. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -1169,7 +1169,7 @@ extension AuditManager { ) } - /// Returns a list of keywords that are pre-mapped to the specified control data source. + /// Returns a list of keywords that are pre-mapped to the specified control data source. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -1345,6 +1345,7 @@ extension AuditManager.ListControlInsightsByControlDomainRequest: AWSPaginateTok extension AuditManager.ListControlsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> AuditManager.ListControlsRequest { return .init( + controlCatalogId: self.controlCatalogId, controlType: self.controlType, maxResults: self.maxResults, nextToken: token diff --git a/Sources/Soto/Services/AuditManager/AuditManager_shapes.swift b/Sources/Soto/Services/AuditManager/AuditManager_shapes.swift index 7c054af1fd..b2443752f1 100644 --- a/Sources/Soto/Services/AuditManager/AuditManager_shapes.swift +++ b/Sources/Soto/Services/AuditManager/AuditManager_shapes.swift @@ -78,6 +78,12 @@ extension AuditManager { public var description: String { return self.rawValue } } + public enum ControlState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case endOfSupport = "END_OF_SUPPORT" + public var description: String { return self.rawValue } + } + public enum ControlStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case inactive = "INACTIVE" case reviewed = "REVIEWED" @@ -86,11 +92,21 @@ extension AuditManager { } public enum ControlType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case core = "Core" case custom = "Custom" case standard = "Standard" public var description: String { return self.rawValue } } + public enum DataSourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case awsApiCall = "AWS_API_Call" + case awsCloudtrail = "AWS_Cloudtrail" + case awsConfig = "AWS_Config" + case awsSecurityHub = "AWS_Security_Hub" + case manual = "MANUAL" + public var description: String { return self.rawValue } + } + public enum DelegationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case complete = "COMPLETE" case inProgress = "IN_PROGRESS" @@ -207,6 +223,8 @@ extension AuditManager { case awsCloudtrail = "AWS_Cloudtrail" case awsConfig = "AWS_Config" case awsSecurityHub = "AWS_Security_Hub" + case commonControl = "Common_Control" + case coreControl = "Core_Control" case manual = "MANUAL" public var description: String { return self.rawValue } } @@ -1265,6 +1283,8 @@ extension AuditManager { public let lastUpdatedBy: String? /// The name of the control. public let name: String? + /// The state of the control. The END_OF_SUPPORT state is applicable to standard controls only. This state indicates that the standard control can still be used to collect evidence, but Audit Manager is no longer updating or maintaining that control. + public let state: ControlState? /// The tags associated with the control. public let tags: [String: String]? /// The steps that you should follow to determine if the control has been satisfied. @@ -1272,7 +1292,7 @@ extension AuditManager { /// Specifies whether the control is a standard control or a custom control. public let type: ControlType? - public init(actionPlanInstructions: String? = nil, actionPlanTitle: String? = nil, arn: String? = nil, controlMappingSources: [ControlMappingSource]? = nil, controlSources: String? = nil, createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, id: String? = nil, lastUpdatedAt: Date? = nil, lastUpdatedBy: String? = nil, name: String? = nil, tags: [String: String]? = nil, testingInformation: String? = nil, type: ControlType? = nil) { + public init(actionPlanInstructions: String? = nil, actionPlanTitle: String? = nil, arn: String? = nil, controlMappingSources: [ControlMappingSource]? = nil, controlSources: String? = nil, createdAt: Date? = nil, createdBy: String? = nil, description: String? = nil, id: String? = nil, lastUpdatedAt: Date? = nil, lastUpdatedBy: String? = nil, name: String? = nil, state: ControlState? = nil, tags: [String: String]? = nil, testingInformation: String? = nil, type: ControlType? = nil) { self.actionPlanInstructions = actionPlanInstructions self.actionPlanTitle = actionPlanTitle self.arn = arn @@ -1285,6 +1305,7 @@ extension AuditManager { self.lastUpdatedAt = lastUpdatedAt self.lastUpdatedBy = lastUpdatedBy self.name = name + self.state = state self.tags = tags self.testingInformation = testingInformation self.type = type @@ -1303,6 +1324,7 @@ extension AuditManager { case lastUpdatedAt = "lastUpdatedAt" case lastUpdatedBy = "lastUpdatedBy" case name = "name" + case state = "state" case tags = "tags" case testingInformation = "testingInformation" case type = "type" @@ -1335,7 +1357,7 @@ extension AuditManager { public let controlsCountByNoncompliantEvidence: Int? /// A breakdown of the compliance check status for the evidence that’s associated with the control domain. public let evidenceInsights: EvidenceInsights? - /// The unique identifier for the control domain. + /// The unique identifier for the control domain. Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference. public let id: String? /// The time when the control domain insights were last updated. public let lastUpdated: Date? @@ -1427,9 +1449,9 @@ extension AuditManager { public let sourceKeyword: SourceKeyword? /// The name of the source. public let sourceName: String? - /// The setup option for the data source. This option reflects if the evidence collection is automated or manual. + /// The setup option for the data source. This option reflects if the evidence collection method is automated or manual. If you don’t provide a value for sourceSetUpOption, Audit Manager automatically infers and populates the correct value based on the sourceType that you specify. public let sourceSetUpOption: SourceSetUpOption? - /// Specifies one of the five data source types for evidence collection. + /// Specifies which type of data source is used to collect evidence. The source can be an individual data source type, such as AWS_Cloudtrail, AWS_Config, AWS_Security_Hub, AWS_API_Call, or MANUAL. The source can also be a managed grouping of data sources, such as a Core_Control or a Common_Control. public let sourceType: SourceType? /// The instructions for troubleshooting the control. public let troubleshootingText: String? @@ -1452,7 +1474,7 @@ extension AuditManager { try self.validate(self.sourceId, name: "sourceId", parent: name, min: 36) try self.validate(self.sourceId, name: "sourceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") try self.sourceKeyword?.validate(name: "\(name).sourceKeyword") - try self.validate(self.sourceName, name: "sourceName", parent: name, max: 100) + try self.validate(self.sourceName, name: "sourceName", parent: name, max: 300) try self.validate(self.sourceName, name: "sourceName", parent: name, min: 1) try self.validate(self.troubleshootingText, name: "troubleshootingText", parent: name, max: 1000) try self.validate(self.troubleshootingText, name: "troubleshootingText", parent: name, pattern: "^[\\w\\W\\s\\S]*$") @@ -1775,9 +1797,9 @@ extension AuditManager { public let sourceKeyword: SourceKeyword? /// The name of the control mapping data source. public let sourceName: String? - /// The setup option for the data source, which reflects if the evidence collection is automated or manual. + /// The setup option for the data source. This option reflects if the evidence collection method is automated or manual. If you don’t provide a value for sourceSetUpOption, Audit Manager automatically infers and populates the correct value based on the sourceType that you specify. public let sourceSetUpOption: SourceSetUpOption? - /// Specifies one of the five types of data sources for evidence collection. + /// Specifies which type of data source is used to collect evidence. The source can be an individual data source type, such as AWS_Cloudtrail, AWS_Config, AWS_Security_Hub, AWS_API_Call, or MANUAL. The source can also be a managed grouping of data sources, such as a Core_Control or a Common_Control. public let sourceType: SourceType? /// The instructions for troubleshooting the control. public let troubleshootingText: String? @@ -1796,7 +1818,7 @@ extension AuditManager { try self.validate(self.sourceDescription, name: "sourceDescription", parent: name, max: 1000) try self.validate(self.sourceDescription, name: "sourceDescription", parent: name, pattern: "^[\\w\\W\\s\\S]*$") try self.sourceKeyword?.validate(name: "\(name).sourceKeyword") - try self.validate(self.sourceName, name: "sourceName", parent: name, max: 100) + try self.validate(self.sourceName, name: "sourceName", parent: name, max: 300) try self.validate(self.sourceName, name: "sourceName", parent: name, min: 1) try self.validate(self.troubleshootingText, name: "troubleshootingText", parent: name, max: 1000) try self.validate(self.troubleshootingText, name: "troubleshootingText", parent: name, pattern: "^[\\w\\W\\s\\S]*$") @@ -3288,7 +3310,7 @@ extension AuditManager { public struct ListAssessmentControlInsightsByControlDomainRequest: AWSEncodableShape { /// The unique identifier for the active assessment. public let assessmentId: String - /// The unique identifier for the control domain. + /// The unique identifier for the control domain. Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference. public let controlDomainId: String /// Represents the maximum number of results on a page or for an API request call. public let maxResults: Int? @@ -3315,9 +3337,9 @@ extension AuditManager { try self.validate(self.assessmentId, name: "assessmentId", parent: name, max: 36) try self.validate(self.assessmentId, name: "assessmentId", parent: name, min: 36) try self.validate(self.assessmentId, name: "assessmentId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") - try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, max: 36) - try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, min: 36) - try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, max: 2048) + try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, min: 13) + try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, pattern: "^arn:.*:controlcatalog:.*:.*:domain/.*|UNCATEGORIZED|^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1000) @@ -3641,7 +3663,7 @@ extension AuditManager { } public struct ListControlInsightsByControlDomainRequest: AWSEncodableShape { - /// The unique identifier for the control domain. + /// The unique identifier for the control domain. Audit Manager supports the control domains that are provided by Amazon Web Services Control Catalog. For information about how to find a list of available control domains, see ListDomains in the Amazon Web Services Control Catalog API Reference. public let controlDomainId: String /// Represents the maximum number of results on a page or for an API request call. public let maxResults: Int? @@ -3663,9 +3685,9 @@ extension AuditManager { } public func validate(name: String) throws { - try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, max: 36) - try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, min: 36) - try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, max: 2048) + try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, min: 13) + try self.validate(self.controlDomainId, name: "controlDomainId", parent: name, pattern: "^arn:.*:controlcatalog:.*:.*:domain/.*|UNCATEGORIZED|^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1000) @@ -3694,14 +3716,17 @@ extension AuditManager { } public struct ListControlsRequest: AWSEncodableShape { - /// The type of control, such as a standard control or a custom control. + /// A filter that narrows the list of controls to a specific resource from the Amazon Web Services Control Catalog. To use this parameter, specify the ARN of the Control Catalog resource. You can specify either a control domain, a control objective, or a common control. For information about how to find the ARNs for these resources, see ListDomains , ListObjectives , and ListCommonControls . You can only filter by one Control Catalog resource at a time. Specifying multiple resource ARNs isn’t currently supported. If you want to filter by more than one ARN, we recommend that you run the ListControls operation separately for each ARN. Alternatively, specify UNCATEGORIZED to list controls that aren't mapped to a Control Catalog resource. For example, this operation might return a list of custom controls that don't belong to any control domain or control objective. + public let controlCatalogId: String? + /// A filter that narrows the list of controls to a specific type. public let controlType: ControlType - /// Represents the maximum number of results on a page or for an API request call. + /// The maximum number of results on a page or for an API request call. public let maxResults: Int? - /// The pagination token that's used to fetch the next set of results. + /// The pagination token that's used to fetch the next set of results. public let nextToken: String? - public init(controlType: ControlType, maxResults: Int? = nil, nextToken: String? = nil) { + public init(controlCatalogId: String? = nil, controlType: ControlType, maxResults: Int? = nil, nextToken: String? = nil) { + self.controlCatalogId = controlCatalogId self.controlType = controlType self.maxResults = maxResults self.nextToken = nextToken @@ -3710,12 +3735,16 @@ extension AuditManager { public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.controlCatalogId, key: "controlCatalogId") request.encodeQuery(self.controlType, key: "controlType") request.encodeQuery(self.maxResults, key: "maxResults") request.encodeQuery(self.nextToken, key: "nextToken") } public func validate(name: String) throws { + try self.validate(self.controlCatalogId, name: "controlCatalogId", parent: name, max: 2048) + try self.validate(self.controlCatalogId, name: "controlCatalogId", parent: name, min: 13) + try self.validate(self.controlCatalogId, name: "controlCatalogId", parent: name, pattern: "^arn:.*:controlcatalog:.*|UNCATEGORIZED$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1000) @@ -3729,7 +3758,7 @@ extension AuditManager { public struct ListControlsResponse: AWSDecodableShape { /// A list of metadata that the ListControls API returns for each control. public let controlMetadataList: [ControlMetadata]? - /// The pagination token that's used to fetch the next set of results. + /// The pagination token that's used to fetch the next set of results. public let nextToken: String? public init(controlMetadataList: [ControlMetadata]? = nil, nextToken: String? = nil) { @@ -3748,10 +3777,10 @@ extension AuditManager { public let maxResults: Int? /// The pagination token that's used to fetch the next set of results. public let nextToken: String? - /// The control mapping data source that the keywords apply to. - public let source: SourceType + /// The control mapping data source that the keywords apply to. + public let source: DataSourceType - public init(maxResults: Int? = nil, nextToken: String? = nil, source: SourceType) { + public init(maxResults: Int? = nil, nextToken: String? = nil, source: DataSourceType) { self.maxResults = maxResults self.nextToken = nextToken self.source = source @@ -3777,7 +3806,7 @@ extension AuditManager { } public struct ListKeywordsForDataSourceResponse: AWSDecodableShape { - /// The list of keywords for the event mapping source. + /// The list of keywords for the control mapping source. public let keywords: [String]? /// The pagination token that's used to fetch the next set of results. public let nextToken: String? @@ -4071,9 +4100,15 @@ extension AuditManager { public struct Scope: AWSEncodableShape & AWSDecodableShape { /// The Amazon Web Services accounts that are included in the scope of the assessment. public let awsAccounts: [AWSAccount]? - /// The Amazon Web Services services that are included in the scope of the assessment. + /// The Amazon Web Services services that are included in the scope of the assessment. This API parameter is no longer supported. If you use this parameter to specify one or more Amazon Web Services, Audit Manager ignores this input. Instead, the value for awsServices will show as empty. public let awsServices: [AWSService]? + public init(awsAccounts: [AWSAccount]? = nil) { + self.awsAccounts = awsAccounts + self.awsServices = nil + } + + @available(*, deprecated, message: "Members awsServices have been deprecated") public init(awsAccounts: [AWSAccount]? = nil, awsServices: [AWSService]? = nil) { self.awsAccounts = awsAccounts self.awsServices = awsServices @@ -4176,7 +4211,7 @@ extension AuditManager { public func validate(name: String) throws { try self.validate(self.keywordValue, name: "keywordValue", parent: name, max: 100) try self.validate(self.keywordValue, name: "keywordValue", parent: name, min: 1) - try self.validate(self.keywordValue, name: "keywordValue", parent: name, pattern: "^[a-zA-Z_0-9-\\s().]+$") + try self.validate(self.keywordValue, name: "keywordValue", parent: name, pattern: "^[a-zA-Z_0-9-\\s().:\\/]+$") } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/AutoScaling/AutoScaling_api.swift b/Sources/Soto/Services/AutoScaling/AutoScaling_api.swift index 44d4cfda46..6d27e381ce 100644 --- a/Sources/Soto/Services/AutoScaling/AutoScaling_api.swift +++ b/Sources/Soto/Services/AutoScaling/AutoScaling_api.swift @@ -88,7 +88,7 @@ public struct AutoScaling: AWSService { // MARK: API Calls - /// Attaches one or more EC2 instances to the specified Auto Scaling group. When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails. If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups. For more information, see Attach EC2 instances to your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. + /// Attaches one or more EC2 instances to the specified Auto Scaling group. When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails. If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups. For more information, see Detach or attach instances in the Amazon EC2 Auto Scaling User Guide. @Sendable public func attachInstances(_ input: AttachInstancesQuery, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -283,7 +283,7 @@ public struct AutoScaling: AWSService { ) } - /// Deletes the specified scaling policy. Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action. For more information, see Deleting a scaling policy in the Amazon EC2 Auto Scaling User Guide. + /// Deletes the specified scaling policy. Deleting either a step scaling policy or a simple scaling policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action. For more information, see Delete a scaling policy in the Amazon EC2 Auto Scaling User Guide. @Sendable public func deletePolicy(_ input: DeletePolicyType, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -512,7 +512,7 @@ public struct AutoScaling: AWSService { ) } - /// Gets information about the scaling activities in the account and Region. When scaling events occur, you see a record of the scaling activity in the scaling activities. For more information, see Verifying a scaling activity for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. If the scaling event succeeds, the value of the StatusCode element in the response is Successful. If an attempt to launch instances failed, the StatusCode value is Failed or Cancelled and the StatusMessage element in the response indicates the cause of the failure. For help interpreting the StatusMessage, see Troubleshooting Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. + /// Gets information about the scaling activities in the account and Region. When scaling events occur, you see a record of the scaling activity in the scaling activities. For more information, see Verify a scaling activity for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. If the scaling event succeeds, the value of the StatusCode element in the response is Successful. If an attempt to launch instances failed, the StatusCode value is Failed or Cancelled and the StatusMessage element in the response indicates the cause of the failure. For help interpreting the StatusMessage, see Troubleshooting Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. @Sendable public func describeScalingActivities(_ input: DescribeScalingActivitiesType, logger: Logger = AWSClient.loggingDisabled) async throws -> ActivitiesType { return try await self.client.execute( @@ -563,7 +563,7 @@ public struct AutoScaling: AWSService { ) } - /// Describes the termination policies supported by Amazon EC2 Auto Scaling. For more information, see Work with Amazon EC2 Auto Scaling termination policies in the Amazon EC2 Auto Scaling User Guide. + /// Describes the termination policies supported by Amazon EC2 Auto Scaling. For more information, see Configure termination policies for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. @Sendable public func describeTerminationPolicyTypes(logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeTerminationPolicyTypesAnswer { return try await self.client.execute( @@ -601,7 +601,7 @@ public struct AutoScaling: AWSService { ) } - /// Removes one or more instances from the specified Auto Scaling group. After the instances are detached, you can manage them independent of the Auto Scaling group. If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached. If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups. For more information, see Detach EC2 instances from your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. + /// Removes one or more instances from the specified Auto Scaling group. After the instances are detached, you can manage them independent of the Auto Scaling group. If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached. If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups. For more information, see Detach or attach instances in the Amazon EC2 Auto Scaling User Guide. @Sendable public func detachInstances(_ input: DetachInstancesQuery, logger: Logger = AWSClient.loggingDisabled) async throws -> DetachInstancesAnswer { return try await self.client.execute( @@ -744,7 +744,7 @@ public struct AutoScaling: AWSService { ) } - /// Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address. This configuration overwrites any existing configuration. For more information, see Getting Amazon SNS notifications when your Auto Scaling group scales in the Amazon EC2 Auto Scaling User Guide. If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call fails. + /// Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address. This configuration overwrites any existing configuration. For more information, see Amazon SNS notification options for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call fails. @Sendable public func putNotificationConfiguration(_ input: PutNotificationConfigurationType, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -783,7 +783,7 @@ public struct AutoScaling: AWSService { ) } - /// Creates or updates a warm pool for the specified Auto Scaling group. A warm pool is a pool of pre-initialized EC2 instances that sits alongside the Auto Scaling group. Whenever your application needs to scale out, the Auto Scaling group can draw on the warm pool to meet its new desired capacity. For more information and example configurations, see Warm pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. This operation must be called from the Region in which the Auto Scaling group was created. This operation cannot be called on an Auto Scaling group that has a mixed instances policy or a launch template or launch configuration that requests Spot Instances. You can view the instances in the warm pool using the DescribeWarmPool API call. If you are no longer using a warm pool, you can delete it by calling the DeleteWarmPool API. + /// Creates or updates a warm pool for the specified Auto Scaling group. A warm pool is a pool of pre-initialized EC2 instances that sits alongside the Auto Scaling group. Whenever your application needs to scale out, the Auto Scaling group can draw on the warm pool to meet its new desired capacity. This operation must be called from the Region in which the Auto Scaling group was created. You can view the instances in the warm pool using the DescribeWarmPool API call. If you are no longer using a warm pool, you can delete it by calling the DeleteWarmPool API. For more information, see Warm pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. @Sendable public func putWarmPool(_ input: PutWarmPoolType, logger: Logger = AWSClient.loggingDisabled) async throws -> PutWarmPoolAnswer { return try await self.client.execute( @@ -809,7 +809,7 @@ public struct AutoScaling: AWSService { ) } - /// Resumes the specified suspended auto scaling processes, or all suspended process, for the specified Auto Scaling group. For more information, see Suspending and resuming scaling processes in the Amazon EC2 Auto Scaling User Guide. + /// Resumes the specified suspended auto scaling processes, or all suspended process, for the specified Auto Scaling group. For more information, see Suspend and resume Amazon EC2 Auto Scaling processes in the Amazon EC2 Auto Scaling User Guide. @Sendable public func resumeProcesses(_ input: ScalingProcessQuery, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -848,7 +848,7 @@ public struct AutoScaling: AWSService { ) } - /// Sets the health status of the specified instance. For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide. + /// Sets the health status of the specified instance. For more information, see Health checks for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. @Sendable public func setInstanceHealth(_ input: SetInstanceHealthQuery, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -861,7 +861,7 @@ public struct AutoScaling: AWSService { ) } - /// Updates the instance protection settings of the specified instances. This operation cannot be called on instances in a warm pool. For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Using instance scale-in protection in the Amazon EC2 Auto Scaling User Guide. If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails. + /// Updates the instance protection settings of the specified instances. This operation cannot be called on instances in a warm pool. For more information, see Use instance scale-in protection in the Amazon EC2 Auto Scaling User Guide. If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call fails. @Sendable public func setInstanceProtection(_ input: SetInstanceProtectionQuery, logger: Logger = AWSClient.loggingDisabled) async throws -> SetInstanceProtectionAnswer { return try await self.client.execute( @@ -887,7 +887,7 @@ public struct AutoScaling: AWSService { ) } - /// Suspends the specified auto scaling processes, or all processes, for the specified Auto Scaling group. If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly. For more information, see Suspending and resuming scaling processes in the Amazon EC2 Auto Scaling User Guide. To resume processes that have been suspended, call the ResumeProcesses API. + /// Suspends the specified auto scaling processes, or all processes, for the specified Auto Scaling group. If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly. For more information, see Suspend and resume Amazon EC2 Auto Scaling processes in the Amazon EC2 Auto Scaling User Guide. To resume processes that have been suspended, call the ResumeProcesses API. @Sendable public func suspendProcesses(_ input: ScalingProcessQuery, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -900,7 +900,7 @@ public struct AutoScaling: AWSService { ) } - /// Terminates the specified instance and optionally adjusts the desired group size. This operation cannot be called on instances in a warm pool. This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it. If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated. By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Rebalancing activities in the Amazon EC2 Auto Scaling User Guide. + /// Terminates the specified instance and optionally adjusts the desired group size. This operation cannot be called on instances in a warm pool. This call simply makes a termination request. The instance is not terminated immediately. When an instance is terminated, the instance status changes to terminated. You can't connect to or start an instance after you've terminated it. If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are terminated. By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you decrement the desired capacity, your Auto Scaling group can become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might terminate instances in other zones. For more information, see Manual scaling in the Amazon EC2 Auto Scaling User Guide. @Sendable public func terminateInstanceInAutoScalingGroup(_ input: TerminateInstanceInAutoScalingGroupType, logger: Logger = AWSClient.loggingDisabled) async throws -> ActivityType { return try await self.client.execute( @@ -1092,7 +1092,7 @@ extension AutoScaling { ) } - /// Gets information about the scaling activities in the account and Region. When scaling events occur, you see a record of the scaling activity in the scaling activities. For more information, see Verifying a scaling activity for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. If the scaling event succeeds, the value of the StatusCode element in the response is Successful. If an attempt to launch instances failed, the StatusCode value is Failed or Cancelled and the StatusMessage element in the response indicates the cause of the failure. For help interpreting the StatusMessage, see Troubleshooting Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. + /// Gets information about the scaling activities in the account and Region. When scaling events occur, you see a record of the scaling activity in the scaling activities. For more information, see Verify a scaling activity for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. If the scaling event succeeds, the value of the StatusCode element in the response is Successful. If an attempt to launch instances failed, the StatusCode value is Failed or Cancelled and the StatusMessage element in the response indicates the cause of the failure. For help interpreting the StatusMessage, see Troubleshooting Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift b/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift index b689843349..2d48d91d5a 100644 --- a/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift +++ b/Sources/Soto/Services/AutoScaling/AutoScaling_shapes.swift @@ -787,7 +787,7 @@ extension AutoScaling { public let launchConfigurationName: String? /// The launch template for the instance. public let launchTemplate: LaunchTemplateSpecification? - /// The lifecycle state for the instance. The Quarantined state is not used. For information about lifecycle states, see Instance lifecycle in the Amazon EC2 Auto Scaling User Guide. Valid values: Pending | Pending:Wait | Pending:Proceed | Quarantined | InService | Terminating | Terminating:Wait | Terminating:Proceed | Terminated | Detaching | Detached | EnteringStandby | Standby | Warmed:Pending | Warmed:Pending:Wait | Warmed:Pending:Proceed | Warmed:Terminating | Warmed:Terminating:Wait | Warmed:Terminating:Proceed | Warmed:Terminated | Warmed:Stopped | Warmed:Running + /// The lifecycle state for the instance. The Quarantined state is not used. For more information, see Amazon EC2 Auto Scaling instance lifecycle in the Amazon EC2 Auto Scaling User Guide. Valid values: Pending | Pending:Wait | Pending:Proceed | Quarantined | InService | Terminating | Terminating:Wait | Terminating:Proceed | Terminated | Detaching | Detached | EnteringStandby | Standby | Warmed:Pending | Warmed:Pending:Wait | Warmed:Pending:Proceed | Warmed:Terminating | Warmed:Terminating:Wait | Warmed:Terminating:Proceed | Warmed:Terminated | Warmed:Stopped | Warmed:Running public let lifecycleState: String? /// Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in. public let protectedFromScaleIn: Bool? @@ -1094,19 +1094,19 @@ extension AutoScaling { public let defaultInstanceWarmup: Int? /// The desired capacity is the initial capacity of the Auto Scaling group at the time of its creation and the capacity it attempts to maintain. It can scale beyond this capacity if you configure auto scaling. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity, the default is the minimum size of the group. public let desiredCapacity: Int? - /// The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance type selection only. For more information, see Creating an Auto Scaling group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. By default, Amazon EC2 Auto Scaling specifies units, which translates into number of instances. Valid values: units | vcpu | memory-mib + /// The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance type selection only. For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. By default, Amazon EC2 Auto Scaling specifies units, which translates into number of instances. Valid values: units | vcpu | memory-mib public let desiredCapacityType: String? /// The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service and marking it unhealthy due to a failed health check. This is useful if your instances do not immediately pass their health checks after they enter the InService state. For more information, see Set the health check grace period for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. Default: 0 seconds public let healthCheckGracePeriod: Int? - /// A comma-separated value string of one or more health check types. The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that was previously set. + /// A comma-separated value string of one or more health check types. The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that was previously set. public let healthCheckType: String? - /// The ID of the instance used to base the launch configuration on. If specified, Amazon EC2 Auto Scaling uses the configuration values from the specified instance to create a new launch configuration. To get the instance ID, use the Amazon EC2 DescribeInstances API operation. For more information, see Creating an Auto Scaling group using an EC2 instance in the Amazon EC2 Auto Scaling User Guide. + /// The ID of the instance used to base the launch configuration on. If specified, Amazon EC2 Auto Scaling uses the configuration values from the specified instance to create a new launch configuration. To get the instance ID, use the Amazon EC2 DescribeInstances API operation. For more information, see Create an Auto Scaling group using parameters from an existing instance in the Amazon EC2 Auto Scaling User Guide. public let instanceId: String? /// An instance maintenance policy. For more information, see Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide. public let instanceMaintenancePolicy: InstanceMaintenancePolicy? /// The name of the launch configuration to use to launch instances. Conditional: You must specify either a launch template (LaunchTemplate or MixedInstancesPolicy) or a launch configuration (LaunchConfigurationName or InstanceId). public let launchConfigurationName: String? - /// Information used to specify the launch template and version to use to launch instances. Conditional: You must specify either a launch template (LaunchTemplate or MixedInstancesPolicy) or a launch configuration (LaunchConfigurationName or InstanceId). The launch template that is specified must be configured for use with an Auto Scaling group. For more information, see Creating a launch template for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. + /// Information used to specify the launch template and version to use to launch instances. Conditional: You must specify either a launch template (LaunchTemplate or MixedInstancesPolicy) or a launch configuration (LaunchConfigurationName or InstanceId). The launch template that is specified must be configured for use with an Auto Scaling group. For more information, see Create a launch template for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. public let launchTemplate: LaunchTemplateSpecification? /// One or more lifecycle hooks to add to the Auto Scaling group before instances are launched. @OptionalCustomCoding> @@ -1114,7 +1114,7 @@ extension AutoScaling { /// A list of Classic Load Balancers associated with this Auto Scaling group. For Application Load Balancers, Network Load Balancers, and Gateway Load Balancers, specify the TargetGroupARNs property instead. @OptionalCustomCoding> public var loadBalancerNames: [String]? - /// The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). For more information, see Replacing Auto Scaling instances based on maximum instance lifetime in the Amazon EC2 Auto Scaling User Guide. + /// The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). For more information, see Replace Auto Scaling instances based on maximum instance lifetime in the Amazon EC2 Auto Scaling User Guide. public let maxInstanceLifetime: Int? /// The maximum size of the group. With a mixed instances policy that uses instance weighting, Amazon EC2 Auto Scaling may need to go above MaxSize to meet your capacity requirements. In this event, Amazon EC2 Auto Scaling will never go above MaxSize by more than your largest instance weight (weights that define how many units each instance contributes to the desired capacity of the group). public let maxSize: Int? @@ -1122,7 +1122,7 @@ extension AutoScaling { public let minSize: Int? /// The mixed instances policy. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide. public let mixedInstancesPolicy: MixedInstancesPolicy? - /// Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see Using instance scale-in protection in the Amazon EC2 Auto Scaling User Guide. + /// Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see Use instance scale-in protection in the Amazon EC2 Auto Scaling User Guide. public let newInstancesProtectedFromScaleIn: Bool? /// The name of the placement group into which to launch your instances. For more information, see Placement groups in the Amazon EC2 User Guide for Linux Instances. A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group. public let placementGroup: String? @@ -1134,7 +1134,7 @@ extension AutoScaling { /// The Amazon Resource Names (ARN) of the Elastic Load Balancing target groups to associate with the Auto Scaling group. Instances are registered as targets with the target groups. The target groups receive incoming traffic and route requests to one or more registered targets. For more information, see Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. @OptionalCustomCoding> public var targetGroupARNs: [String]? - /// A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see Work with Amazon EC2 Auto Scaling termination policies in the Amazon EC2 Auto Scaling User Guide. Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | NewestInstance | OldestInstance | OldestLaunchConfiguration | OldestLaunchTemplate | arn:aws:lambda:region:account-id:function:my-function:my-alias + /// A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see Configure termination policies for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | NewestInstance | OldestInstance | OldestLaunchConfiguration | OldestLaunchTemplate | arn:aws:lambda:region:account-id:function:my-function:my-alias @OptionalCustomCoding> public var terminationPolicies: [String]? /// The list of traffic sources to attach to this Auto Scaling group. You can use any of the following as traffic sources for an Auto Scaling group: Classic Load Balancer, Application Load Balancer, Gateway Load Balancer, Network Load Balancer, and VPC Lattice. @@ -1266,7 +1266,7 @@ extension AutoScaling { } public struct CreateLaunchConfigurationType: AWSEncodableShape { - /// Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet. If you specify true, each instance in the Auto Scaling group receives a unique public IPv4 address. For more information, see Launching Auto Scaling instances in a VPC in the Amazon EC2 Auto Scaling User Guide. If you specify this property, you must specify at least one subnet for VPCZoneIdentifier when you create your group. + /// Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet. If you specify true, each instance in the Auto Scaling group receives a unique public IPv4 address. For more information, see Provide network connectivity for your Auto Scaling instances using Amazon VPC in the Amazon EC2 Auto Scaling User Guide. If you specify this property, you must specify at least one subnet for VPCZoneIdentifier when you create your group. public let associatePublicIpAddress: Bool? /// The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the Amazon EC2 User Guide for Linux Instances. @OptionalCustomCoding> @@ -1280,27 +1280,27 @@ extension AutoScaling { public let ebsOptimized: Bool? /// The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role. For more information, see IAM role for applications that run on Amazon EC2 instances in the Amazon EC2 Auto Scaling User Guide. public let iamInstanceProfile: String? - /// The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Finding a Linux AMI in the Amazon EC2 User Guide for Linux Instances. If you specify InstanceId, an ImageId is not required. + /// The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Find a Linux AMI in the Amazon EC2 User Guide for Linux Instances. If you specify InstanceId, an ImageId is not required. public let imageId: String? - /// The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, except for the block device mapping. To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request. For more information, see Creating a launch configuration using an EC2 instance in the Amazon EC2 Auto Scaling User Guide. + /// The ID of the instance to use to create the launch configuration. The new launch configuration derives attributes from the instance, except for the block device mapping. To create a launch configuration with a block device mapping or override any other instance attributes, specify them as part of the same request. For more information, see Create a launch configuration in the Amazon EC2 Auto Scaling User Guide. public let instanceId: String? - /// Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring. The default value is true (enabled). When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide. + /// Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring. The default value is true (enabled). When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure monitoring for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide. public let instanceMonitoring: InstanceMonitoring? /// Specifies the instance type of the EC2 instance. For information about available instance types, see Available instance types in the Amazon EC2 User Guide for Linux Instances. If you specify InstanceId, an InstanceType is not required. public let instanceType: String? /// The ID of the kernel associated with the AMI. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide for Linux Instances. public let kernelId: String? - /// The name of the key pair. For more information, see Amazon EC2 key pairs and Linux instances in the Amazon EC2 User Guide for Linux Instances. + /// The name of the key pair. For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide for Linux Instances. public let keyName: String? /// The name of the launch configuration. This name must be unique per Region per account. public let launchConfigurationName: String? - /// The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the Amazon EC2 Auto Scaling User Guide. + /// The metadata options for the instances. For more information, see Configure the instance metadata options in the Amazon EC2 Auto Scaling User Guide. public let metadataOptions: InstanceMetadataOptions? - /// The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default), you must set the value of this property to dedicated. For more information, see Configuring instance tenancy with Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. If you specify PlacementTenancy, you must specify at least one subnet for VPCZoneIdentifier when you create your group. Valid values: default | dedicated + /// The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default), you must set the value of this property to dedicated. If you specify PlacementTenancy, you must specify at least one subnet for VPCZoneIdentifier when you create your group. Valid values: default | dedicated public let placementTenancy: String? /// The ID of the RAM disk to select. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide for Linux Instances. public let ramdiskId: String? - /// A list that contains the security group IDs to assign to the instances in the Auto Scaling group. For more information, see Control traffic to resources using security groups in the Amazon Virtual Private Cloud User Guide. + /// A list that contains the security group IDs to assign to the instances in the Auto Scaling group. For more information, see Control traffic to your Amazon Web Services resources using security groups in the Amazon Virtual Private Cloud User Guide. @OptionalCustomCoding> public var securityGroups: [String]? /// The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Request Spot Instances for fault-tolerant and flexible applications in the Amazon EC2 Auto Scaling User Guide. Valid Range: Minimum value of 0.001 When you change your maximum price by creating a new launch configuration, running instances will continue to run as long as the maximum price for those running instances is higher than the current Spot price. @@ -2448,7 +2448,7 @@ extension AutoScaling { public struct DisableMetricsCollectionQuery: AWSEncodableShape { /// The name of the Auto Scaling group. public let autoScalingGroupName: String? - /// Identifies the metrics to disable. You can specify one or more of the following metrics: GroupMinSize GroupMaxSize GroupDesiredCapacity GroupInServiceInstances GroupPendingInstances GroupStandbyInstances GroupTerminatingInstances GroupTotalInstances GroupInServiceCapacity GroupPendingCapacity GroupStandbyCapacity GroupTerminatingCapacity GroupTotalCapacity WarmPoolDesiredCapacity WarmPoolWarmedCapacity WarmPoolPendingCapacity WarmPoolTerminatingCapacity WarmPoolTotalCapacity GroupAndWarmPoolDesiredCapacity GroupAndWarmPoolTotalCapacity If you omit this property, all metrics are disabled. For more information, see Auto Scaling group metrics in the Amazon EC2 Auto Scaling User Guide. + /// Identifies the metrics to disable. You can specify one or more of the following metrics: GroupMinSize GroupMaxSize GroupDesiredCapacity GroupInServiceInstances GroupPendingInstances GroupStandbyInstances GroupTerminatingInstances GroupTotalInstances GroupInServiceCapacity GroupPendingCapacity GroupStandbyCapacity GroupTerminatingCapacity GroupTotalCapacity WarmPoolDesiredCapacity WarmPoolWarmedCapacity WarmPoolPendingCapacity WarmPoolTerminatingCapacity WarmPoolTotalCapacity GroupAndWarmPoolDesiredCapacity GroupAndWarmPoolTotalCapacity If you omit this property, all metrics are disabled. For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. @OptionalCustomCoding> public var metrics: [String]? @@ -2477,9 +2477,9 @@ extension AutoScaling { public struct Ebs: AWSEncodableShape & AWSDecodableShape { /// Indicates whether the volume is deleted on instance termination. For Amazon EC2 Auto Scaling, the default value is true. public let deleteOnTermination: Bool? - /// Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported instance types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types. If you are creating a volume from a snapshot, you cannot create an unencrypted volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using a launch configuration. If you enable encryption by default, the EBS volumes that you create are always encrypted, either using the Amazon Web Services managed KMS key or a customer-managed KMS key, regardless of whether the snapshot was encrypted. For more information, see Use Amazon Web Services KMS keys to encrypt Amazon EBS volumes in the Amazon EC2 Auto Scaling User Guide. + /// Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Requirements for Amazon EBS encryption in the Amazon EBS User Guide. If your AMI uses encrypted volumes, you can also only launch it on supported instance types. If you are creating a volume from a snapshot, you cannot create an unencrypted volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using a launch configuration. If you enable encryption by default, the EBS volumes that you create are always encrypted, either using the Amazon Web Services managed KMS key or a customer-managed KMS key, regardless of whether the snapshot was encrypted. For more information, see Use Amazon Web Services KMS keys to encrypt Amazon EBS volumes in the Amazon EC2 Auto Scaling User Guide. public let encrypted: Bool? - /// The number of input/output (I/O) operations per second (IOPS) to provision for the volume. For gp3 and io1 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. The following are the supported values for each volume type: gp3: 3,000-16,000 IOPS io1: 100-64,000 IOPS For io1 volumes, we guarantee 64,000 IOPS only for Instances built on the Nitro System. Other instance families guarantee performance up to 32,000 IOPS. Iops is supported when the volume type is gp3 or io1 and required only when the volume type is io1. (Not used with standard, gp2, st1, or sc1 volumes.) + /// The number of input/output (I/O) operations per second (IOPS) to provision for the volume. For gp3 and io1 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. The following are the supported values for each volume type: gp3: 3,000-16,000 IOPS io1: 100-64,000 IOPS For io1 volumes, we guarantee 64,000 IOPS only for Instances built on the Amazon Web Services Nitro System. Other instance families guarantee performance up to 32,000 IOPS. Iops is supported when the volume type is gp3 or io1 and required only when the volume type is io1. (Not used with standard, gp2, st1, or sc1 volumes.) public let iops: Int? /// The snapshot ID of the volume to use. You must specify either a VolumeSize or a SnapshotId. public let snapshotId: String? @@ -2487,7 +2487,7 @@ extension AutoScaling { public let throughput: Int? /// The volume size, in GiBs. The following are the supported volumes sizes for each volume type: gp2 and gp3: 1-16,384 io1: 4-16,384 st1 and sc1: 125-16,384 standard: 1-1,024 You must specify either a SnapshotId or a VolumeSize. If you specify both SnapshotId and VolumeSize, the volume size must be equal or greater than the size of the snapshot. public let volumeSize: Int? - /// The volume type. For more information, see Amazon EBS volume types in the Amazon EC2 User Guide for Linux Instances. Valid values: standard | io1 | gp2 | st1 | sc1 | gp3 + /// The volume type. For more information, see Amazon EBS volume types in the Amazon EBS User Guide. Valid values: standard | io1 | gp2 | st1 | sc1 | gp3 public let volumeType: String? public init(deleteOnTermination: Bool? = nil, encrypted: Bool? = nil, iops: Int? = nil, snapshotId: String? = nil, throughput: Int? = nil, volumeSize: Int? = nil, volumeType: String? = nil) { @@ -2530,7 +2530,7 @@ extension AutoScaling { public let autoScalingGroupName: String? /// The frequency at which Amazon EC2 Auto Scaling sends aggregated data to CloudWatch. The only valid value is 1Minute. public let granularity: String? - /// Identifies the metrics to enable. You can specify one or more of the following metrics: GroupMinSize GroupMaxSize GroupDesiredCapacity GroupInServiceInstances GroupPendingInstances GroupStandbyInstances GroupTerminatingInstances GroupTotalInstances GroupInServiceCapacity GroupPendingCapacity GroupStandbyCapacity GroupTerminatingCapacity GroupTotalCapacity WarmPoolDesiredCapacity WarmPoolWarmedCapacity WarmPoolPendingCapacity WarmPoolTerminatingCapacity WarmPoolTotalCapacity GroupAndWarmPoolDesiredCapacity GroupAndWarmPoolTotalCapacity If you specify Granularity and don't specify any metrics, all metrics are enabled. For more information, see Auto Scaling group metrics in the Amazon EC2 Auto Scaling User Guide. + /// Identifies the metrics to enable. You can specify one or more of the following metrics: GroupMinSize GroupMaxSize GroupDesiredCapacity GroupInServiceInstances GroupPendingInstances GroupStandbyInstances GroupTerminatingInstances GroupTotalInstances GroupInServiceCapacity GroupPendingCapacity GroupStandbyCapacity GroupTerminatingCapacity GroupTotalCapacity WarmPoolDesiredCapacity WarmPoolWarmedCapacity WarmPoolPendingCapacity WarmPoolTerminatingCapacity WarmPoolTotalCapacity GroupAndWarmPoolDesiredCapacity GroupAndWarmPoolTotalCapacity If you specify Granularity and don't specify any metrics, all metrics are enabled. For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. @OptionalCustomCoding> public var metrics: [String]? @@ -2564,7 +2564,7 @@ extension AutoScaling { public struct EnabledMetric: AWSDecodableShape { /// The granularity of the metric. The only valid value is 1Minute. public let granularity: String? - /// One of the following metrics: GroupMinSize GroupMaxSize GroupDesiredCapacity GroupInServiceInstances GroupPendingInstances GroupStandbyInstances GroupTerminatingInstances GroupTotalInstances GroupInServiceCapacity GroupPendingCapacity GroupStandbyCapacity GroupTerminatingCapacity GroupTotalCapacity WarmPoolDesiredCapacity WarmPoolWarmedCapacity WarmPoolPendingCapacity WarmPoolTerminatingCapacity WarmPoolTotalCapacity GroupAndWarmPoolDesiredCapacity GroupAndWarmPoolTotalCapacity For more information, see Auto Scaling group metrics in the Amazon EC2 Auto Scaling User Guide. + /// One of the following metrics: GroupMinSize GroupMaxSize GroupDesiredCapacity GroupInServiceInstances GroupPendingInstances GroupStandbyInstances GroupTerminatingInstances GroupTotalInstances GroupInServiceCapacity GroupPendingCapacity GroupStandbyCapacity GroupTerminatingCapacity GroupTotalCapacity WarmPoolDesiredCapacity WarmPoolWarmedCapacity WarmPoolPendingCapacity WarmPoolTerminatingCapacity WarmPoolTotalCapacity GroupAndWarmPoolDesiredCapacity GroupAndWarmPoolTotalCapacity For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. public let metric: String? public init(granularity: String? = nil, metric: String? = nil) { @@ -2821,7 +2821,7 @@ extension AutoScaling { public let launchConfigurationName: String? /// The launch template for the instance. public let launchTemplate: LaunchTemplateSpecification? - /// A description of the current lifecycle state. The Quarantined state is not used. For information about lifecycle states, see Instance lifecycle in the Amazon EC2 Auto Scaling User Guide. + /// A description of the current lifecycle state. The Quarantined state is not used. For more information, see Amazon EC2 Auto Scaling instance lifecycle in the Amazon EC2 Auto Scaling User Guide. public let lifecycleState: LifecycleState? /// Indicates whether the instance is protected from termination by Amazon EC2 Auto Scaling when scaling in. public let protectedFromScaleIn: Bool? @@ -3217,9 +3217,9 @@ extension AutoScaling { } public struct LaunchConfiguration: AWSDecodableShape { - /// Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet. For more information, see Launching Auto Scaling instances in a VPC in the Amazon EC2 Auto Scaling User Guide. + /// Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet. For more information, see Provide network connectivity for your Auto Scaling instances using Amazon VPC in the Amazon EC2 Auto Scaling User Guide. public let associatePublicIpAddress: Bool? - /// The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block Device Mapping in the Amazon EC2 User Guide for Linux Instances. + /// The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see Block device mappings in the Amazon EC2 User Guide for Linux Instances. @OptionalCustomCoding> public var blockDeviceMappings: [BlockDeviceMapping]? /// Available for backward compatibility. @@ -3229,34 +3229,34 @@ extension AutoScaling { public var classicLinkVPCSecurityGroups: [String]? /// The creation date and time for the launch configuration. public let createdTime: Date? - /// Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances. + /// Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide for Linux Instances. public let ebsOptimized: Bool? /// The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role. For more information, see IAM role for applications that run on Amazon EC2 instances in the Amazon EC2 Auto Scaling User Guide. public let iamInstanceProfile: String? /// The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. For more information, see Find a Linux AMI in the Amazon EC2 User Guide for Linux Instances. public let imageId: String? - /// Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring. For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide. + /// Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring. For more information, see Configure monitoring for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide. public let instanceMonitoring: InstanceMonitoring? /// The instance type for the instances. For information about available instance types, see Available instance types in the Amazon EC2 User Guide for Linux Instances. public let instanceType: String? /// The ID of the kernel associated with the AMI. public let kernelId: String? - /// The name of the key pair. For more information, see Amazon EC2 Key Pairs in the Amazon EC2 User Guide for Linux Instances. + /// The name of the key pair. For more information, see Amazon EC2 key pairs and Amazon EC2 instances in the Amazon EC2 User Guide for Linux Instances. public let keyName: String? /// The Amazon Resource Name (ARN) of the launch configuration. public let launchConfigurationARN: String? /// The name of the launch configuration. public let launchConfigurationName: String? - /// The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the Amazon EC2 Auto Scaling User Guide. + /// The metadata options for the instances. For more information, see Configure the instance metadata options in the Amazon EC2 Auto Scaling User Guide. public let metadataOptions: InstanceMetadataOptions? - /// The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. For more information, see Configuring instance tenancy with Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. + /// The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. public let placementTenancy: String? /// The ID of the RAM disk associated with the AMI. public let ramdiskId: String? - /// A list that contains the security groups to assign to the instances in the Auto Scaling group. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide. + /// A list that contains the security groups to assign to the instances in the Auto Scaling group. For more information, see Control traffic to your Amazon Web Services resources using security groups in the Amazon Virtual Private Cloud User Guide. @OptionalCustomCoding> public var securityGroups: [String]? - /// The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Requesting Spot Instances in the Amazon EC2 Auto Scaling User Guide. + /// The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Requesting Spot Instances for fault-tolerant and flexible applications in the Amazon EC2 Auto Scaling User Guide. public let spotPrice: String? /// The user data to make available to the launched EC2 instances. For more information, see Instance metadata and user data (Linux) and Instance metadata and user data (Windows). If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB. public let userData: String? @@ -3404,11 +3404,11 @@ extension AutoScaling { public struct LaunchTemplateOverrides: AWSEncodableShape & AWSDecodableShape { /// The instance requirements. Amazon EC2 Auto Scaling uses your specified requirements to identify instance types. Then, it uses your On-Demand and Spot allocation strategies to launch instances from these instance types. You can specify up to four separate sets of instance requirements per Auto Scaling group. This is useful for provisioning instances from different Amazon Machine Images (AMIs) in the same Auto Scaling group. To do this, create the AMIs and create a new launch template for each AMI. Then, create a compatible set of instance requirements for each launch template. If you specify InstanceRequirements, you can't specify InstanceType. public let instanceRequirements: InstanceRequirements? - /// The instance type, such as m3.xlarge. You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see Instance types in the Amazon Elastic Compute Cloud User Guide. You can specify up to 40 instance types per Auto Scaling group. + /// The instance type, such as m3.xlarge. You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see Instance types in the Amazon EC2 User Guide for Linux Instances. You can specify up to 40 instance types per Auto Scaling group. public let instanceType: String? /// Provides a launch template for the specified instance type or set of instance requirements. For example, some instance types might require a launch template with a different AMI. If not provided, Amazon EC2 Auto Scaling uses the launch template that's specified in the LaunchTemplate definition. For more information, see Specifying a different launch template for an instance type in the Amazon EC2 Auto Scaling User Guide. You can specify up to 20 launch templates per Auto Scaling group. The launch templates specified in the overrides and in the LaunchTemplate definition count towards this limit. public let launchTemplateSpecification: LaunchTemplateSpecification? - /// If you provide a list of instance types to use, you can specify the number of capacity units provided by each instance type in terms of virtual CPUs, memory, storage, throughput, or other relative performance characteristic. When a Spot or On-Demand Instance is launched, the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling launches instances until the desired capacity is totally fulfilled, even if this results in an overage. For example, if there are two units remaining to fulfill capacity, and Amazon EC2 Auto Scaling can only launch an instance with a WeightedCapacity of five units, the instance is launched, and the desired capacity is exceeded by three units. For more information, see Configuring instance weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. Value must be in the range of 1–999. If you specify a value for WeightedCapacity for one instance type, you must specify a value for WeightedCapacity for all of them. Every Auto Scaling group has three size parameters (DesiredCapacity, MaxSize, and MinSize). Usually, you set these sizes based on a specific number of instances. However, if you configure a mixed instances policy that defines weights for the instance types, you must specify these sizes with the same units that you use for weighting instances. + /// If you provide a list of instance types to use, you can specify the number of capacity units provided by each instance type in terms of virtual CPUs, memory, storage, throughput, or other relative performance characteristic. When a Spot or On-Demand Instance is launched, the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling launches instances until the desired capacity is totally fulfilled, even if this results in an overage. For example, if there are two units remaining to fulfill capacity, and Amazon EC2 Auto Scaling can only launch an instance with a WeightedCapacity of five units, the instance is launched, and the desired capacity is exceeded by three units. For more information, see Configure an Auto Scaling group to use instance weights in the Amazon EC2 Auto Scaling User Guide. Value must be in the range of 1–999. If you specify a value for WeightedCapacity for one instance type, you must specify a value for WeightedCapacity for all of them. Every Auto Scaling group has three size parameters (DesiredCapacity, MaxSize, and MinSize). Usually, you set these sizes based on a specific number of instances. However, if you configure a mixed instances policy that defines weights for the instance types, you must specify these sizes with the same units that you use for weighting instances. public let weightedCapacity: String? public init(instanceRequirements: InstanceRequirements? = nil, instanceType: String? = nil, launchTemplateSpecification: LaunchTemplateSpecification? = nil, weightedCapacity: String? = nil) { @@ -3528,7 +3528,7 @@ extension AutoScaling { public let notificationMetadata: String? /// The Amazon Resource Name (ARN) of the notification target that Amazon EC2 Auto Scaling sends notifications to when an instance is in a wait state for the lifecycle hook. You can specify an Amazon SNS topic or an Amazon SQS queue. public let notificationTargetARN: String? - /// The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see Configure a notification target for a lifecycle hook in the Amazon EC2 Auto Scaling User Guide. Valid only if the notification target is an Amazon SNS topic or an Amazon SQS queue. + /// The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see Prepare to add a lifecycle hook to your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. Valid only if the notification target is an Amazon SNS topic or an Amazon SQS queue. public let roleARN: String? public init(defaultResult: String? = nil, heartbeatTimeout: Int? = nil, lifecycleHookName: String? = nil, lifecycleTransition: String? = nil, notificationMetadata: String? = nil, notificationTargetARN: String? = nil, roleARN: String? = nil) { @@ -3947,7 +3947,7 @@ extension AutoScaling { } public struct PredictiveScalingConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Defaults to HonorMaxCapacity if not specified. The following are possible values: HonorMaxCapacity - Amazon EC2 Auto Scaling cannot scale out capacity higher than the maximum capacity. The maximum capacity is enforced as a hard limit. IncreaseMaxCapacity - Amazon EC2 Auto Scaling can scale out capacity higher than the maximum capacity when the forecast capacity is close to or exceeds the maximum capacity. The upper limit is determined by the forecasted capacity and the value for MaxCapacityBuffer. + /// Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Defaults to HonorMaxCapacity if not specified. The following are possible values: HonorMaxCapacity - Amazon EC2 Auto Scaling can't increase the maximum capacity of the group when the forecast capacity is close to or exceeds the maximum capacity. IncreaseMaxCapacity - Amazon EC2 Auto Scaling can increase the maximum capacity of the group when the forecast capacity is close to or exceeds the maximum capacity. The upper limit is determined by the forecasted capacity and the value for MaxCapacityBuffer. Use caution when allowing the maximum capacity to be automatically increased. This can lead to more instances being launched than intended if the increased maximum capacity is not monitored and managed. The increased maximum capacity then becomes the new normal maximum capacity for the Auto Scaling group until you manually update it. The maximum capacity does not automatically decrease back to the original maximum. public let maxCapacityBreachBehavior: PredictiveScalingMaxCapacityBreachBehavior? /// The size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. The value is specified as a percentage relative to the forecast capacity. For example, if the buffer is 10, this means a 10 percent buffer, such that if the forecast capacity is 50, and the maximum capacity is 40, then the effective maximum capacity is 55. If set to 0, Amazon EC2 Auto Scaling may scale capacity higher than the maximum capacity to equal but not exceed forecast capacity. Required if the MaxCapacityBreachBehavior property is set to IncreaseMaxCapacity, and cannot be used otherwise. public let maxCapacityBuffer: Int? @@ -4292,7 +4292,7 @@ extension AutoScaling { public let autoScalingGroupName: String? /// A cooldown period, in seconds, that applies to a specific simple scaling policy. When a cooldown period is specified here, it overrides the default cooldown. Valid only if the policy type is SimpleScaling. For more information, see Scaling cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. Default: None public let cooldown: Int? - /// Indicates whether the scaling policy is enabled or disabled. The default is enabled. For more information, see Disabling a scaling policy for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. + /// Indicates whether the scaling policy is enabled or disabled. The default is enabled. For more information, see Disable a scaling policy for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. public let enabled: Bool? /// Not needed if the default instance warmup is defined for the group. The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. This warm-up period applies to instances launched due to a specific target tracking or step scaling policy. When a warm-up period is specified here, it overrides the default instance warmup. Valid only if the policy type is TargetTrackingScaling or StepScaling. The default is to use the value for the default instance warmup defined for the group. If default instance warmup is null, then EstimatedInstanceWarmup falls back to the value of default cooldown. public let estimatedInstanceWarmup: Int? @@ -4526,7 +4526,7 @@ extension AutoScaling { public let autoRollback: Bool? /// (Optional) The amount of time, in seconds, to wait after a checkpoint before continuing. This property is optional, but if you specify a value for it, you must also specify a value for CheckpointPercentages. If you specify a value for CheckpointPercentages and not for CheckpointDelay, the CheckpointDelay defaults to 3600 (1 hour). public let checkpointDelay: Int? - /// (Optional) Threshold values for each checkpoint in ascending order. Each number must be unique. To replace all instances in the Auto Scaling group, the last number in the array must be 100. For usage examples, see Adding checkpoints to an instance refresh in the Amazon EC2 Auto Scaling User Guide. + /// (Optional) Threshold values for each checkpoint in ascending order. Each number must be unique. To replace all instances in the Auto Scaling group, the last number in the array must be 100. For usage examples, see Add checkpoints to an instance refresh in the Amazon EC2 Auto Scaling User Guide. @OptionalCustomCoding> public var checkpointPercentages: [Int]? /// A time period, in seconds, during which an instance refresh waits before moving on to replacing the next instance after a new instance enters the InService state. This property is not required for normal usage. Instead, use the DefaultInstanceWarmup property of the Auto Scaling group. The InstanceWarmup and DefaultInstanceWarmup properties work the same way. Only specify this property if you must override the DefaultInstanceWarmup property. If you do not specify this property, the instance warmup by default is the value of the DefaultInstanceWarmup property, if defined (which is recommended in all cases), or the HealthCheckGracePeriod property otherwise. @@ -4905,7 +4905,7 @@ extension AutoScaling { public let healthStatus: String? /// The ID of the instance. public let instanceId: String? - /// If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod specified for the group, by default, this call respects the grace period. Set this to False, to have the call not respect the grace period associated with the group. For more information about the health check grace period, see CreateAutoScalingGroup in the Amazon EC2 Auto Scaling API Reference. + /// If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod specified for the group, by default, this call respects the grace period. Set this to False, to have the call not respect the grace period associated with the group. For more information about the health check grace period, see Set the health check grace period for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. public let shouldRespectGracePeriod: Bool? public init(healthStatus: String? = nil, instanceId: String? = nil, shouldRespectGracePeriod: Bool? = nil) { @@ -5191,7 +5191,7 @@ extension AutoScaling { try self.validate(self.expression, name: "expression", parent: name, max: 2047) try self.validate(self.expression, name: "expression", parent: name, min: 1) try self.validate(self.expression, name: "expression", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") - try self.validate(self.id, name: "id", parent: name, max: 255) + try self.validate(self.id, name: "id", parent: name, max: 64) try self.validate(self.id, name: "id", parent: name, min: 1) try self.validate(self.id, name: "id", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") try self.validate(self.label, name: "label", parent: name, max: 2047) @@ -5355,11 +5355,11 @@ extension AutoScaling { public let defaultInstanceWarmup: Int? /// The desired capacity is the initial capacity of the Auto Scaling group after this operation completes and the capacity it attempts to maintain. This number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. public let desiredCapacity: Int? - /// The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance type selection only. For more information, see Creating an Auto Scaling group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. By default, Amazon EC2 Auto Scaling specifies units, which translates into number of instances. Valid values: units | vcpu | memory-mib + /// The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance type selection only. For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide. By default, Amazon EC2 Auto Scaling specifies units, which translates into number of instances. Valid values: units | vcpu | memory-mib public let desiredCapacityType: String? /// The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service and marking it unhealthy due to a failed health check. This is useful if your instances do not immediately pass their health checks after they enter the InService state. For more information, see Set the health check grace period for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. public let healthCheckGracePeriod: Int? - /// A comma-separated value string of one or more health check types. The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that was previously set. + /// A comma-separated value string of one or more health check types. The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot be disabled. For more information, see Health checks for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that was previously set. public let healthCheckType: String? /// An instance maintenance policy. For more information, see Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide. public let instanceMaintenancePolicy: InstanceMaintenancePolicy? @@ -5375,13 +5375,13 @@ extension AutoScaling { public let minSize: Int? /// The mixed instances policy. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide. public let mixedInstancesPolicy: MixedInstancesPolicy? - /// Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see Using instance scale-in protection in the Amazon EC2 Auto Scaling User Guide. + /// Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see Use instance scale-in protection in the Amazon EC2 Auto Scaling User Guide. public let newInstancesProtectedFromScaleIn: Bool? /// The name of an existing placement group into which to launch your instances. For more information, see Placement groups in the Amazon EC2 User Guide for Linux Instances. A cluster placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group. public let placementGroup: String? /// The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other Amazon Web Services on your behalf. For more information, see Service-linked roles in the Amazon EC2 Auto Scaling User Guide. public let serviceLinkedRoleARN: String? - /// A policy or a list of policies that are used to select the instances to terminate. The policies are executed in the order that you list them. For more information, see Work with Amazon EC2 Auto Scaling termination policies in the Amazon EC2 Auto Scaling User Guide. Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | NewestInstance | OldestInstance | OldestLaunchConfiguration | OldestLaunchTemplate | arn:aws:lambda:region:account-id:function:my-function:my-alias + /// A policy or a list of policies that are used to select the instances to terminate. The policies are executed in the order that you list them. For more information, see Configure termination policies for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | NewestInstance | OldestInstance | OldestLaunchConfiguration | OldestLaunchTemplate | arn:aws:lambda:region:account-id:function:my-function:my-alias @OptionalCustomCoding> public var terminationPolicies: [String]? /// A comma-separated list of subnet IDs for a virtual private cloud (VPC). If you specify VPCZoneIdentifier with AvailabilityZones, the subnets that you specify must reside in those Availability Zones. diff --git a/Sources/Soto/Services/B2bi/B2bi_shapes.swift b/Sources/Soto/Services/B2bi/B2bi_shapes.swift index 1de087a1fa..861377db03 100644 --- a/Sources/Soto/Services/B2bi/B2bi_shapes.swift +++ b/Sources/Soto/Services/B2bi/B2bi_shapes.swift @@ -263,7 +263,7 @@ extension B2bi { public struct CreatePartnershipRequest: AWSEncodableShape { /// Specifies a list of the capabilities associated with this partnership. - public let capabilities: [String]? + public let capabilities: [String] /// Reserved for future use. public let clientToken: String? /// Specifies the email address associated with this trading partner. @@ -277,7 +277,7 @@ extension B2bi { /// Specifies the key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to resources (capabilities, partnerships, and so on) for any purpose. public let tags: [Tag]? - public init(capabilities: [String]? = nil, clientToken: String? = CreatePartnershipRequest.idempotencyToken(), email: String, name: String, phone: String? = nil, profileId: String, tags: [Tag]? = nil) { + public init(capabilities: [String], clientToken: String? = CreatePartnershipRequest.idempotencyToken(), email: String, name: String, phone: String? = nil, profileId: String, tags: [Tag]? = nil) { self.capabilities = capabilities self.clientToken = clientToken self.email = email @@ -288,7 +288,7 @@ extension B2bi { } public func validate(name: String) throws { - try self.capabilities?.forEach { + try self.capabilities.forEach { try validate($0, name: "capabilities[]", parent: name, max: 64) try validate($0, name: "capabilities[]", parent: name, min: 1) try validate($0, name: "capabilities[]", parent: name, pattern: "^[a-zA-Z0-9_-]+$") @@ -474,7 +474,7 @@ extension B2bi { public let ediType: EdiType /// Specifies that the currently supported file formats for EDI transformations are JSON and XML. public let fileFormat: FileFormat - /// Specifies the name of the mapping template for the transformer. This template is used to convert the input document into the correct set of objects. + /// Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. public let mappingTemplate: String /// Specifies the name of the transformer, used to identify it. public let name: String @@ -523,7 +523,7 @@ extension B2bi { public let ediType: EdiType /// Returns that the currently supported file formats for EDI transformations are JSON and XML. public let fileFormat: FileFormat - /// Returns the name of the mapping template for the transformer. This template is used to convert the input document into the correct set of objects. + /// Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. public let mappingTemplate: String /// Returns the name of the transformer, used to identify it. public let name: String @@ -982,7 +982,7 @@ extension B2bi { public let ediType: EdiType /// Returns that the currently supported file formats for EDI transformations are JSON and XML. public let fileFormat: FileFormat - /// Returns the name of the mapping template for the transformer. This template is used to convert the input document into the correct set of objects. + /// Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. public let mappingTemplate: String /// Returns a timestamp for last time the transformer was modified. @OptionalCustomCoding @@ -1454,7 +1454,7 @@ extension B2bi { public let fileFormat: FileFormat /// Specify the contents of the EDI (electronic data interchange) XML or JSON file that is used as input for the transform. public let inputFileContent: String - /// Specifies the name of the mapping template for the transformer. This template is used to convert the input document into the correct set of objects. + /// Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. public let mappingTemplate: String public init(fileFormat: FileFormat, inputFileContent: String, mappingTemplate: String) { @@ -1534,7 +1534,7 @@ extension B2bi { public let ediType: EdiType /// Returns that the currently supported file formats for EDI transformations are JSON and XML. public let fileFormat: FileFormat - /// Returns the name of the mapping template for the transformer. This template is used to convert the input document into the correct set of objects. + /// Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. public let mappingTemplate: String /// Returns a timestamp representing the date and time for the most recent change for the transformer object. @OptionalCustomCoding @@ -1895,7 +1895,7 @@ extension B2bi { public let ediType: EdiType? /// Specifies that the currently supported file formats for EDI transformations are JSON and XML. public let fileFormat: FileFormat? - /// Specifies the name of the mapping template for the transformer. This template is used to convert the input document into the correct set of objects. + /// Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. public let mappingTemplate: String? /// Specify a new name for the transformer, if you want to update it. public let name: String? @@ -1956,7 +1956,7 @@ extension B2bi { public let ediType: EdiType /// Returns that the currently supported file formats for EDI transformations are JSON and XML. public let fileFormat: FileFormat - /// Returns the name of the mapping template for the transformer. This template is used to convert the input document into the correct set of objects. + /// Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. public let mappingTemplate: String /// Returns a timestamp for last time the transformer was modified. @CustomCoding diff --git a/Sources/Soto/Services/BackupStorage/BackupStorage_api.swift b/Sources/Soto/Services/BackupStorage/BackupStorage_api.swift deleted file mode 100644 index e77f8287e7..0000000000 --- a/Sources/Soto/Services/BackupStorage/BackupStorage_api.swift +++ /dev/null @@ -1,270 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2023 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -@_exported import SotoCore - -/// Service object for interacting with AWS BackupStorage service. -/// -/// The frontend service for Cryo Storage. -public struct BackupStorage: AWSService { - // MARK: Member variables - - /// Client used for communication with AWS - public let client: AWSClient - /// Service configuration - public let config: AWSServiceConfig - - // MARK: Initialization - - /// Initialize the BackupStorage client - /// - parameters: - /// - client: AWSClient used to process requests - /// - region: Region of server you want to communicate with. This will override the partition parameter. - /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). - /// - endpoint: Custom endpoint URL to use instead of standard AWS servers - /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded - /// - timeout: Timeout value for HTTP requests - /// - byteBufferAllocator: Allocator for ByteBuffers - /// - options: Service options - public init( - client: AWSClient, - region: SotoCore.Region? = nil, - partition: AWSPartition = .aws, - endpoint: String? = nil, - middleware: AWSMiddlewareProtocol? = nil, - timeout: TimeAmount? = nil, - byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), - options: AWSServiceConfig.Options = [] - ) { - self.client = client - self.config = AWSServiceConfig( - region: region, - partition: region?.partition ?? partition, - serviceName: "BackupStorage", - serviceIdentifier: "backupstorage", - signingName: "backup-storage", - serviceProtocol: .restjson, - apiVersion: "2018-04-10", - endpoint: endpoint, - errorType: BackupStorageErrorType.self, - middleware: middleware, - timeout: timeout, - byteBufferAllocator: byteBufferAllocator, - options: options - ) - } - - - - - - // MARK: API Calls - - /// Delete Object from the incremental base Backup. - @Sendable - public func deleteObject(_ input: DeleteObjectInput, logger: Logger = AWSClient.loggingDisabled) async throws { - return try await self.client.execute( - operation: "DeleteObject", - path: "/backup-jobs/{BackupJobId}/object/{ObjectName}", - httpMethod: .DELETE, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Gets the specified object's chunk. - @Sendable - public func getChunk(_ input: GetChunkInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetChunkOutput { - return try await self.client.execute( - operation: "GetChunk", - path: "/restore-jobs/{StorageJobId}/chunk/{ChunkToken}", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Get metadata associated with an Object. - @Sendable - public func getObjectMetadata(_ input: GetObjectMetadataInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetObjectMetadataOutput { - return try await self.client.execute( - operation: "GetObjectMetadata", - path: "/restore-jobs/{StorageJobId}/object/{ObjectToken}/metadata", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// List chunks in a given Object - @Sendable - public func listChunks(_ input: ListChunksInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListChunksOutput { - return try await self.client.execute( - operation: "ListChunks", - path: "/restore-jobs/{StorageJobId}/chunks/{ObjectToken}/list", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// List all Objects in a given Backup. - @Sendable - public func listObjects(_ input: ListObjectsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListObjectsOutput { - return try await self.client.execute( - operation: "ListObjects", - path: "/restore-jobs/{StorageJobId}/objects/list", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Complete upload - @Sendable - public func notifyObjectComplete(_ input: NotifyObjectCompleteInput, logger: Logger = AWSClient.loggingDisabled) async throws -> NotifyObjectCompleteOutput { - return try await self.client.execute( - operation: "NotifyObjectComplete", - path: "/backup-jobs/{BackupJobId}/object/{UploadId}/complete", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Upload chunk. - @Sendable - public func putChunk(_ input: PutChunkInput, logger: Logger = AWSClient.loggingDisabled) async throws -> PutChunkOutput { - return try await self.client.execute( - operation: "PutChunk", - path: "/backup-jobs/{BackupJobId}/chunk/{UploadId}/{ChunkIndex}", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Upload object that can store object metadata String and data blob in single API call using inline chunk field. - @Sendable - public func putObject(_ input: PutObjectInput, logger: Logger = AWSClient.loggingDisabled) async throws -> PutObjectOutput { - return try await self.client.execute( - operation: "PutObject", - path: "/backup-jobs/{BackupJobId}/object/{ObjectName}/put-object", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// Start upload containing one or many chunks. - @Sendable - public func startObject(_ input: StartObjectInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartObjectOutput { - return try await self.client.execute( - operation: "StartObject", - path: "/backup-jobs/{BackupJobId}/object/{ObjectName}", - httpMethod: .PUT, - serviceConfig: self.config, - input: input, - logger: logger - ) - } -} - -extension BackupStorage { - /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public - /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. - public init(from: BackupStorage, patch: AWSServiceConfig.Patch) { - self.client = from.client - self.config = from.config.with(patch: patch) - } -} - -// MARK: Paginators - -@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) -extension BackupStorage { - /// List chunks in a given Object - /// Return PaginatorSequence for operation. - /// - /// - Parameters: - /// - input: Input for request - /// - logger: Logger used flot logging - public func listChunksPaginator( - _ input: ListChunksInput, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listChunks, - inputKey: \ListChunksInput.nextToken, - outputKey: \ListChunksOutput.nextToken, - logger: logger - ) - } - - /// List all Objects in a given Backup. - /// Return PaginatorSequence for operation. - /// - /// - Parameters: - /// - input: Input for request - /// - logger: Logger used flot logging - public func listObjectsPaginator( - _ input: ListObjectsInput, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listObjects, - inputKey: \ListObjectsInput.nextToken, - outputKey: \ListObjectsOutput.nextToken, - logger: logger - ) - } -} - -extension BackupStorage.ListChunksInput: AWSPaginateToken { - public func usingPaginationToken(_ token: String) -> BackupStorage.ListChunksInput { - return .init( - maxResults: self.maxResults, - nextToken: token, - objectToken: self.objectToken, - storageJobId: self.storageJobId - ) - } -} - -extension BackupStorage.ListObjectsInput: AWSPaginateToken { - public func usingPaginationToken(_ token: String) -> BackupStorage.ListObjectsInput { - return .init( - createdAfter: self.createdAfter, - createdBefore: self.createdBefore, - maxResults: self.maxResults, - nextToken: token, - startingObjectName: self.startingObjectName, - startingObjectPrefix: self.startingObjectPrefix, - storageJobId: self.storageJobId - ) - } -} diff --git a/Sources/Soto/Services/BackupStorage/BackupStorage_shapes.swift b/Sources/Soto/Services/BackupStorage/BackupStorage_shapes.swift deleted file mode 100644 index 7b5e754709..0000000000 --- a/Sources/Soto/Services/BackupStorage/BackupStorage_shapes.swift +++ /dev/null @@ -1,660 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2023 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -#if os(Linux) && compiler(<5.10) -// swift-corelibs-foundation hasn't been updated with Sendable conformances -@preconcurrency import Foundation -#else -import Foundation -#endif -@_spi(SotoInternal) import SotoCore - -extension BackupStorage { - // MARK: Enums - - public enum DataChecksumAlgorithm: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case sha256 = "SHA256" - public var description: String { return self.rawValue } - } - - public enum SummaryChecksumAlgorithm: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case summary = "SUMMARY" - public var description: String { return self.rawValue } - } - - // MARK: Shapes - - public struct BackupObject: AWSDecodableShape { - /// Number of chunks in object - public let chunksCount: Int64? - /// Metadata string associated with the Object - public let metadataString: String? - /// Object name - public let name: String - /// Object checksum - public let objectChecksum: String - /// Checksum algorithm - public let objectChecksumAlgorithm: SummaryChecksumAlgorithm - /// Object token - public let objectToken: String - - public init(chunksCount: Int64? = nil, metadataString: String? = nil, name: String, objectChecksum: String, objectChecksumAlgorithm: SummaryChecksumAlgorithm, objectToken: String) { - self.chunksCount = chunksCount - self.metadataString = metadataString - self.name = name - self.objectChecksum = objectChecksum - self.objectChecksumAlgorithm = objectChecksumAlgorithm - self.objectToken = objectToken - } - - private enum CodingKeys: String, CodingKey { - case chunksCount = "ChunksCount" - case metadataString = "MetadataString" - case name = "Name" - case objectChecksum = "ObjectChecksum" - case objectChecksumAlgorithm = "ObjectChecksumAlgorithm" - case objectToken = "ObjectToken" - } - } - - public struct Chunk: AWSDecodableShape { - /// Chunk checksum - public let checksum: String - /// Checksum algorithm - public let checksumAlgorithm: DataChecksumAlgorithm - /// Chunk token - public let chunkToken: String - /// Chunk index - public let index: Int64 - /// Chunk length - public let length: Int64 - - public init(checksum: String, checksumAlgorithm: DataChecksumAlgorithm, chunkToken: String, index: Int64, length: Int64) { - self.checksum = checksum - self.checksumAlgorithm = checksumAlgorithm - self.chunkToken = chunkToken - self.index = index - self.length = length - } - - private enum CodingKeys: String, CodingKey { - case checksum = "Checksum" - case checksumAlgorithm = "ChecksumAlgorithm" - case chunkToken = "ChunkToken" - case index = "Index" - case length = "Length" - } - } - - public struct DeleteObjectInput: AWSEncodableShape { - /// Backup job Id for the in-progress backup. - public let backupJobId: String - /// The name of the Object. - public let objectName: String - - public init(backupJobId: String, objectName: String) { - self.backupJobId = backupJobId - self.objectName = objectName - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.backupJobId, key: "BackupJobId") - request.encodePath(self.objectName, key: "ObjectName") - } - - private enum CodingKeys: CodingKey {} - } - - public struct GetChunkInput: AWSEncodableShape { - /// Chunk token - public let chunkToken: String - /// Storage job id - public let storageJobId: String - - public init(chunkToken: String, storageJobId: String) { - self.chunkToken = chunkToken - self.storageJobId = storageJobId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.chunkToken, key: "ChunkToken") - request.encodePath(self.storageJobId, key: "StorageJobId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct GetChunkOutput: AWSDecodableShape { - public static let _options: AWSShapeOptions = [.rawPayload] - /// Data checksum - public let checksum: String - /// Checksum algorithm - public let checksumAlgorithm: DataChecksumAlgorithm - /// Chunk data - public let data: AWSHTTPBody - /// Data length - public let length: Int64 - - public init(checksum: String, checksumAlgorithm: DataChecksumAlgorithm, data: AWSHTTPBody, length: Int64) { - self.checksum = checksum - self.checksumAlgorithm = checksumAlgorithm - self.data = data - self.length = length - } - - public init(from decoder: Decoder) throws { - let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer - let container = try decoder.singleValueContainer() - self.checksum = try response.decodeHeader(String.self, key: "x-amz-checksum") - self.checksumAlgorithm = try response.decodeHeader(DataChecksumAlgorithm.self, key: "x-amz-checksum-algorithm") - self.data = try container.decode(AWSHTTPBody.self) - self.length = try response.decodeHeader(Int64.self, key: "x-amz-data-length") - } - - private enum CodingKeys: CodingKey {} - } - - public struct GetObjectMetadataInput: AWSEncodableShape { - /// Object token. - public let objectToken: String - /// Backup job id for the in-progress backup. - public let storageJobId: String - - public init(objectToken: String, storageJobId: String) { - self.objectToken = objectToken - self.storageJobId = storageJobId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.objectToken, key: "ObjectToken") - request.encodePath(self.storageJobId, key: "StorageJobId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct GetObjectMetadataOutput: AWSDecodableShape { - public static let _options: AWSShapeOptions = [.rawPayload] - /// Metadata blob. - public let metadataBlob: AWSHTTPBody - /// MetadataBlob checksum. - public let metadataBlobChecksum: String? - /// Checksum algorithm. - public let metadataBlobChecksumAlgorithm: DataChecksumAlgorithm? - /// The size of MetadataBlob. - public let metadataBlobLength: Int64? - /// Metadata string. - public let metadataString: String? - - public init(metadataBlob: AWSHTTPBody, metadataBlobChecksum: String? = nil, metadataBlobChecksumAlgorithm: DataChecksumAlgorithm? = nil, metadataBlobLength: Int64? = nil, metadataString: String? = nil) { - self.metadataBlob = metadataBlob - self.metadataBlobChecksum = metadataBlobChecksum - self.metadataBlobChecksumAlgorithm = metadataBlobChecksumAlgorithm - self.metadataBlobLength = metadataBlobLength - self.metadataString = metadataString - } - - public init(from decoder: Decoder) throws { - let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer - let container = try decoder.singleValueContainer() - self.metadataBlob = try container.decode(AWSHTTPBody.self) - self.metadataBlobChecksum = try response.decodeHeaderIfPresent(String.self, key: "x-amz-checksum") - self.metadataBlobChecksumAlgorithm = try response.decodeHeaderIfPresent(DataChecksumAlgorithm.self, key: "x-amz-checksum-algorithm") - self.metadataBlobLength = try response.decodeHeaderIfPresent(Int64.self, key: "x-amz-data-length") - self.metadataString = try response.decodeHeaderIfPresent(String.self, key: "x-amz-metadata-string") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListChunksInput: AWSEncodableShape { - /// Maximum number of chunks - public let maxResults: Int? - /// Pagination token - public let nextToken: String? - /// Object token - public let objectToken: String - /// Storage job id - public let storageJobId: String - - public init(maxResults: Int? = nil, nextToken: String? = nil, objectToken: String, storageJobId: String) { - self.maxResults = maxResults - self.nextToken = nextToken - self.objectToken = objectToken - self.storageJobId = storageJobId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodeQuery(self.maxResults, key: "max-results") - request.encodeQuery(self.nextToken, key: "next-token") - request.encodePath(self.objectToken, key: "ObjectToken") - request.encodePath(self.storageJobId, key: "StorageJobId") - } - - public func validate(name: String) throws { - try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListChunksOutput: AWSDecodableShape { - /// List of chunks - public let chunkList: [Chunk] - /// Pagination token - public let nextToken: String? - - public init(chunkList: [Chunk], nextToken: String? = nil) { - self.chunkList = chunkList - self.nextToken = nextToken - } - - private enum CodingKeys: String, CodingKey { - case chunkList = "ChunkList" - case nextToken = "NextToken" - } - } - - public struct ListObjectsInput: AWSEncodableShape { - /// (Optional) Created after filter - public let createdAfter: Date? - /// (Optional) Created before filter - public let createdBefore: Date? - /// Maximum objects count - public let maxResults: Int? - /// Pagination token - public let nextToken: String? - /// Optional, specifies the starting Object name to list from. Ignored if NextToken is not NULL - public let startingObjectName: String? - /// Optional, specifies the starting Object prefix to list from. Ignored if NextToken is not NULL - public let startingObjectPrefix: String? - /// Storage job id - public let storageJobId: String - - public init(createdAfter: Date? = nil, createdBefore: Date? = nil, maxResults: Int? = nil, nextToken: String? = nil, startingObjectName: String? = nil, startingObjectPrefix: String? = nil, storageJobId: String) { - self.createdAfter = createdAfter - self.createdBefore = createdBefore - self.maxResults = maxResults - self.nextToken = nextToken - self.startingObjectName = startingObjectName - self.startingObjectPrefix = startingObjectPrefix - self.storageJobId = storageJobId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodeQuery(self.createdAfter, key: "created-after") - request.encodeQuery(self.createdBefore, key: "created-before") - request.encodeQuery(self.maxResults, key: "max-results") - request.encodeQuery(self.nextToken, key: "next-token") - request.encodeQuery(self.startingObjectName, key: "starting-object-name") - request.encodeQuery(self.startingObjectPrefix, key: "starting-object-prefix") - request.encodePath(self.storageJobId, key: "StorageJobId") - } - - public func validate(name: String) throws { - try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListObjectsOutput: AWSDecodableShape { - /// Pagination token - public let nextToken: String? - /// Object list - public let objectList: [BackupObject] - - public init(nextToken: String? = nil, objectList: [BackupObject]) { - self.nextToken = nextToken - self.objectList = objectList - } - - private enum CodingKeys: String, CodingKey { - case nextToken = "NextToken" - case objectList = "ObjectList" - } - } - - public struct NotifyObjectCompleteInput: AWSEncodableShape { - public static let _options: AWSShapeOptions = [.allowStreaming, .allowChunkedStreaming] - /// Backup job Id for the in-progress backup - public let backupJobId: String - /// Optional metadata associated with an Object. Maximum length is 4MB. - public let metadataBlob: AWSHTTPBody? - /// Checksum of MetadataBlob. - public let metadataBlobChecksum: String? - /// Checksum algorithm. - public let metadataBlobChecksumAlgorithm: DataChecksumAlgorithm? - /// The size of MetadataBlob. - public let metadataBlobLength: Int64? - /// Optional metadata associated with an Object. Maximum string length is 256 bytes. - public let metadataString: String? - /// Object checksum - public let objectChecksum: String - /// Checksum algorithm - public let objectChecksumAlgorithm: SummaryChecksumAlgorithm - /// Upload Id for the in-progress upload - public let uploadId: String - - public init(backupJobId: String, metadataBlob: AWSHTTPBody? = nil, metadataBlobChecksum: String? = nil, metadataBlobChecksumAlgorithm: DataChecksumAlgorithm? = nil, metadataBlobLength: Int64? = nil, metadataString: String? = nil, objectChecksum: String, objectChecksumAlgorithm: SummaryChecksumAlgorithm, uploadId: String) { - self.backupJobId = backupJobId - self.metadataBlob = metadataBlob - self.metadataBlobChecksum = metadataBlobChecksum - self.metadataBlobChecksumAlgorithm = metadataBlobChecksumAlgorithm - self.metadataBlobLength = metadataBlobLength - self.metadataString = metadataString - self.objectChecksum = objectChecksum - self.objectChecksumAlgorithm = objectChecksumAlgorithm - self.uploadId = uploadId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.singleValueContainer() - request.encodePath(self.backupJobId, key: "BackupJobId") - try container.encode(self.metadataBlob) - request.encodeQuery(self.metadataBlobChecksum, key: "metadata-checksum") - request.encodeQuery(self.metadataBlobChecksumAlgorithm, key: "metadata-checksum-algorithm") - request.encodeQuery(self.metadataBlobLength, key: "metadata-blob-length") - request.encodeQuery(self.metadataString, key: "metadata-string") - request.encodeQuery(self.objectChecksum, key: "checksum") - request.encodeQuery(self.objectChecksumAlgorithm, key: "checksum-algorithm") - request.encodePath(self.uploadId, key: "UploadId") - } - - public func validate(name: String) throws { - try self.validate(self.metadataString, name: "metadataString", parent: name, pattern: "^.{1,256}$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct NotifyObjectCompleteOutput: AWSDecodableShape { - /// Object checksum - public let objectChecksum: String - /// Checksum algorithm - public let objectChecksumAlgorithm: SummaryChecksumAlgorithm - - public init(objectChecksum: String, objectChecksumAlgorithm: SummaryChecksumAlgorithm) { - self.objectChecksum = objectChecksum - self.objectChecksumAlgorithm = objectChecksumAlgorithm - } - - private enum CodingKeys: String, CodingKey { - case objectChecksum = "ObjectChecksum" - case objectChecksumAlgorithm = "ObjectChecksumAlgorithm" - } - } - - public struct PutChunkInput: AWSEncodableShape { - public static let _options: AWSShapeOptions = [.allowStreaming, .allowChunkedStreaming] - /// Backup job Id for the in-progress backup. - public let backupJobId: String - /// Data checksum - public let checksum: String - /// Checksum algorithm - public let checksumAlgorithm: DataChecksumAlgorithm - /// Describes this chunk's position relative to the other chunks - public let chunkIndex: Int64 - /// Data to be uploaded - public let data: AWSHTTPBody - /// Data length - public let length: Int64 - /// Upload Id for the in-progress upload. - public let uploadId: String - - public init(backupJobId: String, checksum: String, checksumAlgorithm: DataChecksumAlgorithm, chunkIndex: Int64, data: AWSHTTPBody, length: Int64 = 0, uploadId: String) { - self.backupJobId = backupJobId - self.checksum = checksum - self.checksumAlgorithm = checksumAlgorithm - self.chunkIndex = chunkIndex - self.data = data - self.length = length - self.uploadId = uploadId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.singleValueContainer() - request.encodePath(self.backupJobId, key: "BackupJobId") - request.encodeQuery(self.checksum, key: "checksum") - request.encodeQuery(self.checksumAlgorithm, key: "checksum-algorithm") - request.encodePath(self.chunkIndex, key: "ChunkIndex") - try container.encode(self.data) - request.encodeQuery(self.length, key: "length") - request.encodePath(self.uploadId, key: "UploadId") - } - - private enum CodingKeys: CodingKey {} - } - - public struct PutChunkOutput: AWSDecodableShape { - /// Chunk checksum - public let chunkChecksum: String - /// Checksum algorithm - public let chunkChecksumAlgorithm: DataChecksumAlgorithm - - public init(chunkChecksum: String, chunkChecksumAlgorithm: DataChecksumAlgorithm) { - self.chunkChecksum = chunkChecksum - self.chunkChecksumAlgorithm = chunkChecksumAlgorithm - } - - private enum CodingKeys: String, CodingKey { - case chunkChecksum = "ChunkChecksum" - case chunkChecksumAlgorithm = "ChunkChecksumAlgorithm" - } - } - - public struct PutObjectInput: AWSEncodableShape { - public static let _options: AWSShapeOptions = [.allowStreaming, .allowChunkedStreaming] - /// Backup job Id for the in-progress backup. - public let backupJobId: String - /// Inline chunk data to be uploaded. - public let inlineChunk: AWSHTTPBody? - /// Inline chunk checksum - public let inlineChunkChecksum: String? - /// Inline chunk checksum algorithm - public let inlineChunkChecksumAlgorithm: String? - /// Length of the inline chunk data. - public let inlineChunkLength: Int64? - /// Store user defined metadata like backup checksum, disk ids, restore metadata etc. - public let metadataString: String? - /// object checksum - public let objectChecksum: String? - /// object checksum algorithm - public let objectChecksumAlgorithm: SummaryChecksumAlgorithm? - /// The name of the Object to be uploaded. - public let objectName: String - /// Throw an exception if Object name is already exist. - public let throwOnDuplicate: Bool? - - public init(backupJobId: String, inlineChunk: AWSHTTPBody? = nil, inlineChunkChecksum: String? = nil, inlineChunkChecksumAlgorithm: String? = nil, inlineChunkLength: Int64? = nil, metadataString: String? = nil, objectChecksum: String? = nil, objectChecksumAlgorithm: SummaryChecksumAlgorithm? = nil, objectName: String, throwOnDuplicate: Bool? = nil) { - self.backupJobId = backupJobId - self.inlineChunk = inlineChunk - self.inlineChunkChecksum = inlineChunkChecksum - self.inlineChunkChecksumAlgorithm = inlineChunkChecksumAlgorithm - self.inlineChunkLength = inlineChunkLength - self.metadataString = metadataString - self.objectChecksum = objectChecksum - self.objectChecksumAlgorithm = objectChecksumAlgorithm - self.objectName = objectName - self.throwOnDuplicate = throwOnDuplicate - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.singleValueContainer() - request.encodePath(self.backupJobId, key: "BackupJobId") - try container.encode(self.inlineChunk) - request.encodeQuery(self.inlineChunkChecksum, key: "checksum") - request.encodeQuery(self.inlineChunkChecksumAlgorithm, key: "checksum-algorithm") - request.encodeQuery(self.inlineChunkLength, key: "length") - request.encodeQuery(self.metadataString, key: "metadata-string") - request.encodeQuery(self.objectChecksum, key: "object-checksum") - request.encodeQuery(self.objectChecksumAlgorithm, key: "object-checksum-algorithm") - request.encodePath(self.objectName, key: "ObjectName") - request.encodeQuery(self.throwOnDuplicate, key: "throwOnDuplicate") - } - - private enum CodingKeys: CodingKey {} - } - - public struct PutObjectOutput: AWSDecodableShape { - /// Inline chunk checksum - public let inlineChunkChecksum: String - /// Inline chunk checksum algorithm - public let inlineChunkChecksumAlgorithm: DataChecksumAlgorithm - /// object checksum - public let objectChecksum: String - /// object checksum algorithm - public let objectChecksumAlgorithm: SummaryChecksumAlgorithm - - public init(inlineChunkChecksum: String, inlineChunkChecksumAlgorithm: DataChecksumAlgorithm, objectChecksum: String, objectChecksumAlgorithm: SummaryChecksumAlgorithm) { - self.inlineChunkChecksum = inlineChunkChecksum - self.inlineChunkChecksumAlgorithm = inlineChunkChecksumAlgorithm - self.objectChecksum = objectChecksum - self.objectChecksumAlgorithm = objectChecksumAlgorithm - } - - private enum CodingKeys: String, CodingKey { - case inlineChunkChecksum = "InlineChunkChecksum" - case inlineChunkChecksumAlgorithm = "InlineChunkChecksumAlgorithm" - case objectChecksum = "ObjectChecksum" - case objectChecksumAlgorithm = "ObjectChecksumAlgorithm" - } - } - - public struct StartObjectInput: AWSEncodableShape { - /// Backup job Id for the in-progress backup - public let backupJobId: String - /// Name for the object. - public let objectName: String - /// Throw an exception if Object name is already exist. - public let throwOnDuplicate: Bool? - - public init(backupJobId: String, objectName: String, throwOnDuplicate: Bool? = nil) { - self.backupJobId = backupJobId - self.objectName = objectName - self.throwOnDuplicate = throwOnDuplicate - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.backupJobId, key: "BackupJobId") - request.encodePath(self.objectName, key: "ObjectName") - try container.encodeIfPresent(self.throwOnDuplicate, forKey: .throwOnDuplicate) - } - - private enum CodingKeys: String, CodingKey { - case throwOnDuplicate = "ThrowOnDuplicate" - } - } - - public struct StartObjectOutput: AWSDecodableShape { - /// Upload Id for a given upload. - public let uploadId: String - - public init(uploadId: String) { - self.uploadId = uploadId - } - - private enum CodingKeys: String, CodingKey { - case uploadId = "UploadId" - } - } -} - -// MARK: - Errors - -/// Error enum for BackupStorage -public struct BackupStorageErrorType: AWSErrorType { - enum Code: String { - case accessDeniedException = "AccessDeniedException" - case dataAlreadyExistsException = "DataAlreadyExistsException" - case illegalArgumentException = "IllegalArgumentException" - case kmsInvalidKeyUsageException = "KMSInvalidKeyUsageException" - case notReadableInputStreamException = "NotReadableInputStreamException" - case resourceNotFoundException = "ResourceNotFoundException" - case retryableException = "RetryableException" - case serviceInternalException = "ServiceInternalException" - case serviceUnavailableException = "ServiceUnavailableException" - case throttlingException = "ThrottlingException" - } - - private let error: Code - public let context: AWSErrorContext? - - /// initialize BackupStorage - public init?(errorCode: String, context: AWSErrorContext) { - guard let error = Code(rawValue: errorCode) else { return nil } - self.error = error - self.context = context - } - - internal init(_ error: Code) { - self.error = error - self.context = nil - } - - /// return error code string - public var errorCode: String { self.error.rawValue } - - public static var accessDeniedException: Self { .init(.accessDeniedException) } - /// Non-retryable exception. Attempted to create already existing object or chunk. This message contains a checksum of already presented data. - public static var dataAlreadyExistsException: Self { .init(.dataAlreadyExistsException) } - /// Non-retryable exception, indicates client error (wrong argument passed to API). See exception message for details. - public static var illegalArgumentException: Self { .init(.illegalArgumentException) } - /// Non-retryable exception. Indicates the KMS key usage is incorrect. See exception message for details. - public static var kmsInvalidKeyUsageException: Self { .init(.kmsInvalidKeyUsageException) } - /// Retryalble exception. Indicated issues while reading an input stream due to the networking issues or connection drop on the client side. - public static var notReadableInputStreamException: Self { .init(.notReadableInputStreamException) } - /// Non-retryable exception. Attempted to make an operation on non-existing or expired resource. - public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } - /// Retryable exception. In general indicates internal failure that can be fixed by retry. - public static var retryableException: Self { .init(.retryableException) } - /// Deprecated. To be removed from the model. - public static var serviceInternalException: Self { .init(.serviceInternalException) } - /// Retryable exception, indicates internal server error. - public static var serviceUnavailableException: Self { .init(.serviceUnavailableException) } - /// Increased rate over throttling limits. Can be retried with exponential backoff. - public static var throttlingException: Self { .init(.throttlingException) } -} - -extension BackupStorageErrorType: Equatable { - public static func == (lhs: BackupStorageErrorType, rhs: BackupStorageErrorType) -> Bool { - lhs.error == rhs.error - } -} - -extension BackupStorageErrorType: CustomStringConvertible { - public var description: String { - return "\(self.error.rawValue): \(self.message ?? "")" - } -} diff --git a/Sources/Soto/Services/Batch/Batch_api.swift b/Sources/Soto/Services/Batch/Batch_api.swift index 85330e7ee4..e89fbd9b8d 100644 --- a/Sources/Soto/Services/Batch/Batch_api.swift +++ b/Sources/Soto/Services/Batch/Batch_api.swift @@ -280,6 +280,19 @@ public struct Batch: AWSService { ) } + /// Provides a list of the first 100 RUNNABLE jobs associated to a single job queue. + @Sendable + public func getJobQueueSnapshot(_ input: GetJobQueueSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetJobQueueSnapshotResponse { + return try await self.client.execute( + operation: "GetJobQueueSnapshot", + path: "/v1/getjobqueuesnapshot", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a list of Batch jobs. You must specify only one of the following items: A job queue ID to return a list of jobs in that job queue A multi-node parallel job ID to return a list of nodes for that job An array job ID to return a list of the children for that job You can filter the results by job status with the jobStatus parameter. If you don't specify a status, only RUNNING jobs are returned. @Sendable public func listJobs(_ input: ListJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListJobsResponse { diff --git a/Sources/Soto/Services/Batch/Batch_shapes.swift b/Sources/Soto/Services/Batch/Batch_shapes.swift index 90624cc77b..30b6de81b2 100644 --- a/Sources/Soto/Services/Batch/Batch_shapes.swift +++ b/Sources/Soto/Services/Batch/Batch_shapes.swift @@ -2284,6 +2284,66 @@ extension Batch { } } + public struct FrontOfQueueDetail: AWSDecodableShape { + /// The Amazon Resource Names (ARNs) of the first 100 RUNNABLE jobs in a named job queue. For first-in-first-out (FIFO) job queues, jobs are ordered based on their submission time. For fair share scheduling (FSS) job queues, jobs are ordered based on their job priority and share usage. + public let jobs: [FrontOfQueueJobSummary]? + /// The Unix timestamp (in milliseconds) for when each of the first 100 RUNNABLE jobs were last updated. + public let lastUpdatedAt: Int64? + + public init(jobs: [FrontOfQueueJobSummary]? = nil, lastUpdatedAt: Int64? = nil) { + self.jobs = jobs + self.lastUpdatedAt = lastUpdatedAt + } + + private enum CodingKeys: String, CodingKey { + case jobs = "jobs" + case lastUpdatedAt = "lastUpdatedAt" + } + } + + public struct FrontOfQueueJobSummary: AWSDecodableShape { + /// The Unix timestamp (in milliseconds) for when the job transitioned to its current position in the job queue. + public let earliestTimeAtPosition: Int64? + /// The ARN for a job in a named job queue. + public let jobArn: String? + + public init(earliestTimeAtPosition: Int64? = nil, jobArn: String? = nil) { + self.earliestTimeAtPosition = earliestTimeAtPosition + self.jobArn = jobArn + } + + private enum CodingKeys: String, CodingKey { + case earliestTimeAtPosition = "earliestTimeAtPosition" + case jobArn = "jobArn" + } + } + + public struct GetJobQueueSnapshotRequest: AWSEncodableShape { + /// The job queue’s name or full queue Amazon Resource Name (ARN). + public let jobQueue: String? + + public init(jobQueue: String? = nil) { + self.jobQueue = jobQueue + } + + private enum CodingKeys: String, CodingKey { + case jobQueue = "jobQueue" + } + } + + public struct GetJobQueueSnapshotResponse: AWSDecodableShape { + /// The list of the first 100 RUNNABLE jobs in each job queue. For first-in-first-out (FIFO) job queues, jobs are ordered based on their submission time. For fair share scheduling (FSS) job queues, jobs are ordered based on their job priority and share usage. + public let frontOfQueue: FrontOfQueueDetail? + + public init(frontOfQueue: FrontOfQueueDetail? = nil) { + self.frontOfQueue = frontOfQueue + } + + private enum CodingKeys: String, CodingKey { + case frontOfQueue = "frontOfQueue" + } + } + public struct Host: AWSEncodableShape & AWSDecodableShape { /// The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported. This parameter isn't applicable to jobs that run on Fargate resources. Don't provide this for these jobs. public let sourcePath: String? @@ -2770,7 +2830,7 @@ extension Batch { public let jobQueue: String? /// The job status used to filter jobs in the specified queue. If the filters parameter is specified, the jobStatus parameter is ignored and jobs with any status are returned. If you don't specify a status, only RUNNING jobs are returned. public let jobStatus: JobStatus? - /// The maximum number of results returned by ListJobs in paginated output. When this parameter is used, ListJobs only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then ListJobs returns up to 100 results and a nextToken value if applicable. + /// The maximum number of results returned by ListJobs in a paginated output. When this parameter is used, ListJobs returns up to maxResults results in a single page and a nextToken response element, if applicable. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value. The following outlines key parameters and limitations: The minimum value is 1. When --job-status is used, Batch returns up to 1000 values. When --filters is used, Batch returns up to 100 values. If neither parameter is used, then ListJobs returns up to 1000 results (jobs that are in the RUNNING status) and a nextToken value, if applicable. public let maxResults: Int? /// The job ID for a multi-node parallel job. Specifying a multi-node parallel job ID with this parameter lists all nodes that are associated with the specified job. public let multiNodeJobId: String? diff --git a/Sources/Soto/Services/Bedrock/Bedrock_api.swift b/Sources/Soto/Services/Bedrock/Bedrock_api.swift index 7d75ff0ab1..70ede198d7 100644 --- a/Sources/Soto/Services/Bedrock/Bedrock_api.swift +++ b/Sources/Soto/Services/Bedrock/Bedrock_api.swift @@ -75,23 +75,33 @@ public struct Bedrock: AWSService { "bedrock-ap-south-1": "bedrock.ap-south-1.amazonaws.com", "bedrock-ap-southeast-1": "bedrock.ap-southeast-1.amazonaws.com", "bedrock-ap-southeast-2": "bedrock.ap-southeast-2.amazonaws.com", + "bedrock-ca-central-1": "bedrock.ca-central-1.amazonaws.com", "bedrock-eu-central-1": "bedrock.eu-central-1.amazonaws.com", "bedrock-eu-west-1": "bedrock.eu-west-1.amazonaws.com", + "bedrock-eu-west-2": "bedrock.eu-west-2.amazonaws.com", "bedrock-eu-west-3": "bedrock.eu-west-3.amazonaws.com", + "bedrock-fips-ca-central-1": "bedrock-fips.ca-central-1.amazonaws.com", "bedrock-fips-us-east-1": "bedrock-fips.us-east-1.amazonaws.com", + "bedrock-fips-us-gov-west-1": "bedrock-fips.us-gov-west-1.amazonaws.com", "bedrock-fips-us-west-2": "bedrock-fips.us-west-2.amazonaws.com", "bedrock-runtime-ap-northeast-1": "bedrock-runtime.ap-northeast-1.amazonaws.com", "bedrock-runtime-ap-south-1": "bedrock-runtime.ap-south-1.amazonaws.com", "bedrock-runtime-ap-southeast-1": "bedrock-runtime.ap-southeast-1.amazonaws.com", "bedrock-runtime-ap-southeast-2": "bedrock-runtime.ap-southeast-2.amazonaws.com", + "bedrock-runtime-ca-central-1": "bedrock-runtime.ca-central-1.amazonaws.com", "bedrock-runtime-eu-central-1": "bedrock-runtime.eu-central-1.amazonaws.com", "bedrock-runtime-eu-west-1": "bedrock-runtime.eu-west-1.amazonaws.com", + "bedrock-runtime-eu-west-2": "bedrock-runtime.eu-west-2.amazonaws.com", "bedrock-runtime-eu-west-3": "bedrock-runtime.eu-west-3.amazonaws.com", + "bedrock-runtime-fips-ca-central-1": "bedrock-runtime-fips.ca-central-1.amazonaws.com", "bedrock-runtime-fips-us-east-1": "bedrock-runtime-fips.us-east-1.amazonaws.com", + "bedrock-runtime-fips-us-gov-west-1": "bedrock-runtime-fips.us-gov-west-1.amazonaws.com", "bedrock-runtime-fips-us-west-2": "bedrock-runtime-fips.us-west-2.amazonaws.com", + "bedrock-runtime-sa-east-1": "bedrock-runtime.sa-east-1.amazonaws.com", "bedrock-runtime-us-east-1": "bedrock-runtime.us-east-1.amazonaws.com", "bedrock-runtime-us-gov-west-1": "bedrock-runtime.us-gov-west-1.amazonaws.com", "bedrock-runtime-us-west-2": "bedrock-runtime.us-west-2.amazonaws.com", + "bedrock-sa-east-1": "bedrock.sa-east-1.amazonaws.com", "bedrock-us-east-1": "bedrock.us-east-1.amazonaws.com", "bedrock-us-gov-west-1": "bedrock.us-gov-west-1.amazonaws.com", "bedrock-us-west-2": "bedrock.us-west-2.amazonaws.com" diff --git a/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift b/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift index c794df4cd8..df83b87600 100644 --- a/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift +++ b/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift @@ -335,6 +335,8 @@ extension BedrockAgent { public let failureReasons: [String]? /// The foundation model used for orchestration by the agent. public let foundationModel: String? + /// The guardrails configuration assigned to the agent. + public let guardrailConfiguration: GuardrailConfiguration? /// The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent. A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout. public let idleSessionTTLInSeconds: Int /// Instructions that tell the agent what it should do and how it should interact with users. @@ -350,7 +352,7 @@ extension BedrockAgent { @CustomCoding public var updatedAt: Date - public init(agentArn: String, agentId: String, agentName: String, agentResourceRoleArn: String, agentStatus: AgentStatus, agentVersion: String, clientToken: String? = nil, createdAt: Date, customerEncryptionKeyArn: String? = nil, description: String? = nil, failureReasons: [String]? = nil, foundationModel: String? = nil, idleSessionTTLInSeconds: Int, instruction: String? = nil, preparedAt: Date? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, recommendedActions: [String]? = nil, updatedAt: Date) { + public init(agentArn: String, agentId: String, agentName: String, agentResourceRoleArn: String, agentStatus: AgentStatus, agentVersion: String, clientToken: String? = nil, createdAt: Date, customerEncryptionKeyArn: String? = nil, description: String? = nil, failureReasons: [String]? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int, instruction: String? = nil, preparedAt: Date? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, recommendedActions: [String]? = nil, updatedAt: Date) { self.agentArn = agentArn self.agentId = agentId self.agentName = agentName @@ -363,6 +365,7 @@ extension BedrockAgent { self.description = description self.failureReasons = failureReasons self.foundationModel = foundationModel + self.guardrailConfiguration = guardrailConfiguration self.idleSessionTTLInSeconds = idleSessionTTLInSeconds self.instruction = instruction self.preparedAt = preparedAt @@ -384,6 +387,7 @@ extension BedrockAgent { case description = "description" case failureReasons = "failureReasons" case foundationModel = "foundationModel" + case guardrailConfiguration = "guardrailConfiguration" case idleSessionTTLInSeconds = "idleSessionTTLInSeconds" case instruction = "instruction" case preparedAt = "preparedAt" @@ -677,17 +681,20 @@ extension BedrockAgent { public let agentStatus: AgentStatus /// The description of the agent. public let description: String? + /// The details of the guardrails configuration in the agent summary. + public let guardrailConfiguration: GuardrailConfiguration? /// The latest version of the agent. public let latestAgentVersion: String? /// The time at which the agent was last updated. @CustomCoding public var updatedAt: Date - public init(agentId: String, agentName: String, agentStatus: AgentStatus, description: String? = nil, latestAgentVersion: String? = nil, updatedAt: Date) { + public init(agentId: String, agentName: String, agentStatus: AgentStatus, description: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, latestAgentVersion: String? = nil, updatedAt: Date) { self.agentId = agentId self.agentName = agentName self.agentStatus = agentStatus self.description = description + self.guardrailConfiguration = guardrailConfiguration self.latestAgentVersion = latestAgentVersion self.updatedAt = updatedAt } @@ -697,6 +704,7 @@ extension BedrockAgent { case agentName = "agentName" case agentStatus = "agentStatus" case description = "description" + case guardrailConfiguration = "guardrailConfiguration" case latestAgentVersion = "latestAgentVersion" case updatedAt = "updatedAt" } @@ -724,6 +732,8 @@ extension BedrockAgent { public let failureReasons: [String]? /// The foundation model that the version invokes. public let foundationModel: String? + /// The guardrails configuration assigned to the agent version. + public let guardrailConfiguration: GuardrailConfiguration? /// The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent. A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout. public let idleSessionTTLInSeconds: Int /// The instructions provided to the agent. @@ -738,7 +748,7 @@ extension BedrockAgent { /// The version number. public let version: String - public init(agentArn: String, agentId: String, agentName: String, agentResourceRoleArn: String, agentStatus: AgentStatus, createdAt: Date, customerEncryptionKeyArn: String? = nil, description: String? = nil, failureReasons: [String]? = nil, foundationModel: String? = nil, idleSessionTTLInSeconds: Int, instruction: String? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, recommendedActions: [String]? = nil, updatedAt: Date, version: String) { + public init(agentArn: String, agentId: String, agentName: String, agentResourceRoleArn: String, agentStatus: AgentStatus, createdAt: Date, customerEncryptionKeyArn: String? = nil, description: String? = nil, failureReasons: [String]? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int, instruction: String? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, recommendedActions: [String]? = nil, updatedAt: Date, version: String) { self.agentArn = agentArn self.agentId = agentId self.agentName = agentName @@ -749,6 +759,7 @@ extension BedrockAgent { self.description = description self.failureReasons = failureReasons self.foundationModel = foundationModel + self.guardrailConfiguration = guardrailConfiguration self.idleSessionTTLInSeconds = idleSessionTTLInSeconds self.instruction = instruction self.promptOverrideConfiguration = promptOverrideConfiguration @@ -768,6 +779,7 @@ extension BedrockAgent { case description = "description" case failureReasons = "failureReasons" case foundationModel = "foundationModel" + case guardrailConfiguration = "guardrailConfiguration" case idleSessionTTLInSeconds = "idleSessionTTLInSeconds" case instruction = "instruction" case promptOverrideConfiguration = "promptOverrideConfiguration" @@ -789,16 +801,19 @@ extension BedrockAgent { public var createdAt: Date /// The description of the version of the agent. public let description: String? + /// The details of the guardrails configuration in the agent version summary. + public let guardrailConfiguration: GuardrailConfiguration? /// The time at which the version was last updated. @CustomCoding public var updatedAt: Date - public init(agentName: String, agentStatus: AgentStatus, agentVersion: String, createdAt: Date, description: String? = nil, updatedAt: Date) { + public init(agentName: String, agentStatus: AgentStatus, agentVersion: String, createdAt: Date, description: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, updatedAt: Date) { self.agentName = agentName self.agentStatus = agentStatus self.agentVersion = agentVersion self.createdAt = createdAt self.description = description + self.guardrailConfiguration = guardrailConfiguration self.updatedAt = updatedAt } @@ -808,6 +823,7 @@ extension BedrockAgent { case agentVersion = "agentVersion" case createdAt = "createdAt" case description = "description" + case guardrailConfiguration = "guardrailConfiguration" case updatedAt = "updatedAt" } } @@ -872,6 +888,24 @@ extension BedrockAgent { } } + public struct BedrockEmbeddingModelConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The dimensions details for the vector configuration used on the Bedrock embeddings model. + public let dimensions: Int? + + public init(dimensions: Int? = nil) { + self.dimensions = dimensions + } + + public func validate(name: String) throws { + try self.validate(self.dimensions, name: "dimensions", parent: name, max: 4096) + try self.validate(self.dimensions, name: "dimensions", parent: name, min: 0) + } + + private enum CodingKeys: String, CodingKey { + case dimensions = "dimensions" + } + } + public struct ChunkingConfiguration: AWSEncodableShape & AWSDecodableShape { /// Knowledge base can split your source data into chunks. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for NONE, then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk. FIXED_SIZE – Amazon Bedrock splits your source data into chunks of the approximate size that you set in the fixedSizeChunkingConfiguration. NONE – Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files. public let chunkingStrategy: ChunkingStrategy @@ -1070,6 +1104,8 @@ extension BedrockAgent { public let description: String? /// The foundation model to be used for orchestration by the agent you create. public let foundationModel: String? + /// The unique Guardrail configuration assigned to the agent when it is created. + public let guardrailConfiguration: GuardrailConfiguration? /// The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent. A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout. public let idleSessionTTLInSeconds: Int? /// Instructions that tell the agent what it should do and how it should interact with users. @@ -1079,13 +1115,14 @@ extension BedrockAgent { /// Any tags that you want to attach to the agent. public let tags: [String: String]? - public init(agentName: String, agentResourceRoleArn: String? = nil, clientToken: String? = CreateAgentRequest.idempotencyToken(), customerEncryptionKeyArn: String? = nil, description: String? = nil, foundationModel: String? = nil, idleSessionTTLInSeconds: Int? = nil, instruction: String? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, tags: [String: String]? = nil) { + public init(agentName: String, agentResourceRoleArn: String? = nil, clientToken: String? = CreateAgentRequest.idempotencyToken(), customerEncryptionKeyArn: String? = nil, description: String? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int? = nil, instruction: String? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil, tags: [String: String]? = nil) { self.agentName = agentName self.agentResourceRoleArn = agentResourceRoleArn self.clientToken = clientToken self.customerEncryptionKeyArn = customerEncryptionKeyArn self.description = description self.foundationModel = foundationModel + self.guardrailConfiguration = guardrailConfiguration self.idleSessionTTLInSeconds = idleSessionTTLInSeconds self.instruction = instruction self.promptOverrideConfiguration = promptOverrideConfiguration @@ -1095,7 +1132,7 @@ extension BedrockAgent { public func validate(name: String) throws { try self.validate(self.agentName, name: "agentName", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") try self.validate(self.agentResourceRoleArn, name: "agentResourceRoleArn", parent: name, max: 2048) - try self.validate(self.agentResourceRoleArn, name: "agentResourceRoleArn", parent: name, pattern: "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/(service-role/)?AmazonBedrockExecutionRoleForAgents_.+$") + try self.validate(self.agentResourceRoleArn, name: "agentResourceRoleArn", parent: name, pattern: "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/.+$") try self.validate(self.clientToken, name: "clientToken", parent: name, max: 256) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$") @@ -1107,9 +1144,10 @@ extension BedrockAgent { try self.validate(self.foundationModel, name: "foundationModel", parent: name, max: 2048) try self.validate(self.foundationModel, name: "foundationModel", parent: name, min: 1) try self.validate(self.foundationModel, name: "foundationModel", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)$") + try self.guardrailConfiguration?.validate(name: "\(name).guardrailConfiguration") try self.validate(self.idleSessionTTLInSeconds, name: "idleSessionTTLInSeconds", parent: name, max: 3600) try self.validate(self.idleSessionTTLInSeconds, name: "idleSessionTTLInSeconds", parent: name, min: 60) - try self.validate(self.instruction, name: "instruction", parent: name, max: 1200) + try self.validate(self.instruction, name: "instruction", parent: name, max: 4000) try self.validate(self.instruction, name: "instruction", parent: name, min: 40) try self.promptOverrideConfiguration?.validate(name: "\(name).promptOverrideConfiguration") try self.tags?.forEach { @@ -1128,6 +1166,7 @@ extension BedrockAgent { case customerEncryptionKeyArn = "customerEncryptionKeyArn" case description = "description" case foundationModel = "foundationModel" + case guardrailConfiguration = "guardrailConfiguration" case idleSessionTTLInSeconds = "idleSessionTTLInSeconds" case instruction = "instruction" case promptOverrideConfiguration = "promptOverrideConfiguration" @@ -1715,6 +1754,23 @@ extension BedrockAgent { public init() {} } + public struct EmbeddingModelConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The vector configuration details on the Bedrock embeddings model. + public let bedrockEmbeddingModelConfiguration: BedrockEmbeddingModelConfiguration? + + public init(bedrockEmbeddingModelConfiguration: BedrockEmbeddingModelConfiguration? = nil) { + self.bedrockEmbeddingModelConfiguration = bedrockEmbeddingModelConfiguration + } + + public func validate(name: String) throws { + try self.bedrockEmbeddingModelConfiguration?.validate(name: "\(name).bedrockEmbeddingModelConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case bedrockEmbeddingModelConfiguration = "bedrockEmbeddingModelConfiguration" + } + } + public struct FixedSizeChunkingConfiguration: AWSEncodableShape & AWSDecodableShape { /// The maximum number of tokens to include in a chunk. public let maxTokens: Int @@ -2086,6 +2142,29 @@ extension BedrockAgent { } } + public struct GuardrailConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The guardrails identifier assigned to the guardrails configuration. + public let guardrailIdentifier: String? + /// The guardrails version assigned to the guardrails configuration. + public let guardrailVersion: String? + + public init(guardrailIdentifier: String? = nil, guardrailVersion: String? = nil) { + self.guardrailIdentifier = guardrailIdentifier + self.guardrailVersion = guardrailVersion + } + + public func validate(name: String) throws { + try self.validate(self.guardrailIdentifier, name: "guardrailIdentifier", parent: name, max: 2048) + try self.validate(self.guardrailIdentifier, name: "guardrailIdentifier", parent: name, pattern: "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$") + try self.validate(self.guardrailVersion, name: "guardrailVersion", parent: name, pattern: "^(([0-9]{1,8})|(DRAFT))$") + } + + private enum CodingKeys: String, CodingKey { + case guardrailIdentifier = "guardrailIdentifier" + case guardrailVersion = "guardrailVersion" + } + } + public struct InferenceConfiguration: AWSEncodableShape & AWSDecodableShape { /// The maximum number of tokens to allow in the generated response. public let maximumLength: Int? @@ -3158,7 +3237,7 @@ extension BedrockAgent { } public struct PromptConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see Prompt template placeholder variables. + /// Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see Prompt template placeholder variables. For more information, see Configure the prompt templates. public let basePromptTemplate: String? /// Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the promptType. For more information, see Inference parameters for foundation models. public let inferenceConfiguration: InferenceConfiguration? @@ -3197,7 +3276,7 @@ extension BedrockAgent { } public struct PromptOverrideConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. + /// The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see Parser Lambda function in Agents for Amazon Bedrock. public let overrideLambda: String? /// Contains configurations to override a prompt template in one part of an agent sequence. For more information, see Advanced prompts. public let promptConfigurations: [PromptConfiguration] @@ -3839,6 +3918,8 @@ extension BedrockAgent { public let description: String? /// Specifies a new foundation model to be used for orchestration by the agent. public let foundationModel: String? + /// The unique Guardrail configuration assigned to the agent when it is updated. + public let guardrailConfiguration: GuardrailConfiguration? /// The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent. A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout. public let idleSessionTTLInSeconds: Int? /// Specifies new instructions that tell the agent what it should do and how it should interact with users. @@ -3846,13 +3927,14 @@ extension BedrockAgent { /// Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts. public let promptOverrideConfiguration: PromptOverrideConfiguration? - public init(agentId: String, agentName: String, agentResourceRoleArn: String, customerEncryptionKeyArn: String? = nil, description: String? = nil, foundationModel: String? = nil, idleSessionTTLInSeconds: Int? = nil, instruction: String? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil) { + public init(agentId: String, agentName: String, agentResourceRoleArn: String, customerEncryptionKeyArn: String? = nil, description: String? = nil, foundationModel: String? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, idleSessionTTLInSeconds: Int? = nil, instruction: String? = nil, promptOverrideConfiguration: PromptOverrideConfiguration? = nil) { self.agentId = agentId self.agentName = agentName self.agentResourceRoleArn = agentResourceRoleArn self.customerEncryptionKeyArn = customerEncryptionKeyArn self.description = description self.foundationModel = foundationModel + self.guardrailConfiguration = guardrailConfiguration self.idleSessionTTLInSeconds = idleSessionTTLInSeconds self.instruction = instruction self.promptOverrideConfiguration = promptOverrideConfiguration @@ -3867,6 +3949,7 @@ extension BedrockAgent { try container.encodeIfPresent(self.customerEncryptionKeyArn, forKey: .customerEncryptionKeyArn) try container.encodeIfPresent(self.description, forKey: .description) try container.encodeIfPresent(self.foundationModel, forKey: .foundationModel) + try container.encodeIfPresent(self.guardrailConfiguration, forKey: .guardrailConfiguration) try container.encodeIfPresent(self.idleSessionTTLInSeconds, forKey: .idleSessionTTLInSeconds) try container.encodeIfPresent(self.instruction, forKey: .instruction) try container.encodeIfPresent(self.promptOverrideConfiguration, forKey: .promptOverrideConfiguration) @@ -3876,7 +3959,7 @@ extension BedrockAgent { try self.validate(self.agentId, name: "agentId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") try self.validate(self.agentName, name: "agentName", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") try self.validate(self.agentResourceRoleArn, name: "agentResourceRoleArn", parent: name, max: 2048) - try self.validate(self.agentResourceRoleArn, name: "agentResourceRoleArn", parent: name, pattern: "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/(service-role/)?AmazonBedrockExecutionRoleForAgents_.+$") + try self.validate(self.agentResourceRoleArn, name: "agentResourceRoleArn", parent: name, pattern: "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/.+$") try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, max: 2048) try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, min: 1) try self.validate(self.customerEncryptionKeyArn, name: "customerEncryptionKeyArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") @@ -3885,9 +3968,10 @@ extension BedrockAgent { try self.validate(self.foundationModel, name: "foundationModel", parent: name, max: 2048) try self.validate(self.foundationModel, name: "foundationModel", parent: name, min: 1) try self.validate(self.foundationModel, name: "foundationModel", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)$") + try self.guardrailConfiguration?.validate(name: "\(name).guardrailConfiguration") try self.validate(self.idleSessionTTLInSeconds, name: "idleSessionTTLInSeconds", parent: name, max: 3600) try self.validate(self.idleSessionTTLInSeconds, name: "idleSessionTTLInSeconds", parent: name, min: 60) - try self.validate(self.instruction, name: "instruction", parent: name, max: 1200) + try self.validate(self.instruction, name: "instruction", parent: name, max: 4000) try self.validate(self.instruction, name: "instruction", parent: name, min: 40) try self.promptOverrideConfiguration?.validate(name: "\(name).promptOverrideConfiguration") } @@ -3898,6 +3982,7 @@ extension BedrockAgent { case customerEncryptionKeyArn = "customerEncryptionKeyArn" case description = "description" case foundationModel = "foundationModel" + case guardrailConfiguration = "guardrailConfiguration" case idleSessionTTLInSeconds = "idleSessionTTLInSeconds" case instruction = "instruction" case promptOverrideConfiguration = "promptOverrideConfiguration" @@ -4075,19 +4160,24 @@ extension BedrockAgent { public struct VectorKnowledgeBaseConfiguration: AWSEncodableShape & AWSDecodableShape { /// The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base. public let embeddingModelArn: String + /// The embeddings model configuration details for the vector model used in Knowledge Base. + public let embeddingModelConfiguration: EmbeddingModelConfiguration? - public init(embeddingModelArn: String) { + public init(embeddingModelArn: String, embeddingModelConfiguration: EmbeddingModelConfiguration? = nil) { self.embeddingModelArn = embeddingModelArn + self.embeddingModelConfiguration = embeddingModelConfiguration } public func validate(name: String) throws { - try self.validate(self.embeddingModelArn, name: "embeddingModelArn", parent: name, max: 1011) + try self.validate(self.embeddingModelArn, name: "embeddingModelArn", parent: name, max: 2048) try self.validate(self.embeddingModelArn, name: "embeddingModelArn", parent: name, min: 20) - try self.validate(self.embeddingModelArn, name: "embeddingModelArn", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}))$") + try self.validate(self.embeddingModelArn, name: "embeddingModelArn", parent: name, pattern: "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$") + try self.embeddingModelConfiguration?.validate(name: "\(name).embeddingModelConfiguration") } private enum CodingKeys: String, CodingKey { case embeddingModelArn = "embeddingModelArn" + case embeddingModelConfiguration = "embeddingModelConfiguration" } } diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift index eddaddad07..8e17f31de6 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_api.swift @@ -74,7 +74,7 @@ public struct BedrockAgentRuntime: AWSService { // MARK: API Calls - /// Sends a prompt for the agent to process and respond to. Use return control event type for function calling. The CLI doesn't support InvokeAgent. To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or parameters returned from the action group. Use return control event type for function calling. The response is returned in the bytes field of the chunk object. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. Errors are also surfaced in the response. + /// The CLI doesn't support InvokeAgent. Sends a prompt for the agent to process and respond to. Note the following fields for the request: To continue the same conversation with an agent, use the same sessionId value in the request. To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement. End a conversation by setting endSession to true. In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group. The response is returned in the bytes field of the chunk object. The attribution object contains citations for parts of the response. If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response. If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field. Errors are also surfaced in the response. @Sendable public func invokeAgent(_ input: InvokeAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InvokeAgentResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift index 4aa63f99a0..83d337b5ff 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift @@ -38,6 +38,102 @@ extension BedrockAgentRuntime { public var description: String { return self.rawValue } } + public enum GuadrailAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case intervened = "INTERVENED" + case none = "NONE" + public var description: String { return self.rawValue } + } + + public enum GuardrailAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case intervened = "INTERVENED" + case none = "NONE" + public var description: String { return self.rawValue } + } + + public enum GuardrailContentFilterConfidence: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case high = "HIGH" + case low = "LOW" + case medium = "MEDIUM" + case none = "NONE" + public var description: String { return self.rawValue } + } + + public enum GuardrailContentFilterType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case hate = "HATE" + case insults = "INSULTS" + case misconduct = "MISCONDUCT" + case promptAttack = "PROMPT_ATTACK" + case sexual = "SEXUAL" + case violence = "VIOLENCE" + public var description: String { return self.rawValue } + } + + public enum GuardrailContentPolicyAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case blocked = "BLOCKED" + public var description: String { return self.rawValue } + } + + public enum GuardrailManagedWordType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case profanity = "PROFANITY" + public var description: String { return self.rawValue } + } + + public enum GuardrailPiiEntityType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case address = "ADDRESS" + case age = "AGE" + case awsAccessKey = "AWS_ACCESS_KEY" + case awsSecretKey = "AWS_SECRET_KEY" + case caHealthNumber = "CA_HEALTH_NUMBER" + case caSocialInsuranceNumber = "CA_SOCIAL_INSURANCE_NUMBER" + case creditDebitCardCvv = "CREDIT_DEBIT_CARD_CVV" + case creditDebitCardExpiry = "CREDIT_DEBIT_CARD_EXPIRY" + case creditDebitCardNumber = "CREDIT_DEBIT_CARD_NUMBER" + case driverId = "DRIVER_ID" + case email = "EMAIL" + case internationalBankAccountNumber = "INTERNATIONAL_BANK_ACCOUNT_NUMBER" + case ipAddress = "IP_ADDRESS" + case licensePlate = "LICENSE_PLATE" + case macAddress = "MAC_ADDRESS" + case name = "NAME" + case password = "PASSWORD" + case phone = "PHONE" + case pin = "PIN" + case swiftCode = "SWIFT_CODE" + case ukNationalHealthServiceNumber = "UK_NATIONAL_HEALTH_SERVICE_NUMBER" + case ukNationalInsuranceNumber = "UK_NATIONAL_INSURANCE_NUMBER" + case ukUniqueTaxpayerReferenceNumber = "UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER" + case url = "URL" + case usBankAccountNumber = "US_BANK_ACCOUNT_NUMBER" + case usBankRoutingNumber = "US_BANK_ROUTING_NUMBER" + case usIndividualTaxIdentificationNumber = "US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER" + case usPassportNumber = "US_PASSPORT_NUMBER" + case usSocialSecurityNumber = "US_SOCIAL_SECURITY_NUMBER" + case username = "USERNAME" + case vehicleIdentificationNumber = "VEHICLE_IDENTIFICATION_NUMBER" + public var description: String { return self.rawValue } + } + + public enum GuardrailSensitiveInformationPolicyAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case anonymized = "ANONYMIZED" + case blocked = "BLOCKED" + public var description: String { return self.rawValue } + } + + public enum GuardrailTopicPolicyAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case blocked = "BLOCKED" + public var description: String { return self.rawValue } + } + + public enum GuardrailTopicType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case deny = "DENY" + public var description: String { return self.rawValue } + } + + public enum GuardrailWordPolicyAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case blocked = "BLOCKED" + public var description: String { return self.rawValue } + } + public enum InvocationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case actionGroup = "ACTION_GROUP" case finish = "FINISH" @@ -342,28 +438,32 @@ extension BedrockAgentRuntime { } public enum RetrievalFilter: AWSEncodableShape, Sendable { - /// Knowledge base data sources whose metadata attributes fulfill all the filter conditions inside this list are returned. + /// Knowledge base data sources are returned if their metadata attributes fulfill all the filter conditions inside this list. case andAll([RetrievalFilter]) - /// Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value matches the value in this object are returned. + /// Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value matches the value in this object. The following example would return data sources with an animal attribute whose value is cat: "equals": { "key": "animal", "value": "cat" } case equals(FilterAttribute) - /// Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is greater than the value in this object are returned. + /// Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is greater than the value in this object. The following example would return data sources with an year attribute whose value is greater than 1989: "greaterThan": { "key": "year", "value": 1989 } case greaterThan(FilterAttribute) - /// Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is greater than or equal to the value in this object are returned. + /// Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is greater than or equal to the value in this object. The following example would return data sources with an year attribute whose value is greater than or equal to 1989: "greaterThanOrEquals": { "key": "year", "value": 1989 } case greaterThanOrEquals(FilterAttribute) - /// Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is in the list specified in the value in this object are returned. + /// Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is in the list specified in the value in this object. The following example would return data sources with an animal attribute that is either cat or dog: "in": { "key": "animal", "value": ["cat", "dog"] } case `in`(FilterAttribute) - /// Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is less than the value in this object are returned. + /// Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is less than the value in this object. The following example would return data sources with an year attribute whose value is less than to 1989. "lessThan": { "key": "year", "value": 1989 } case lessThan(FilterAttribute) - /// Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is less than or equal to the value in this object are returned. + /// Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is less than or equal to the value in this object. The following example would return data sources with an year attribute whose value is less than or equal to 1989. "lessThanOrEquals": { "key": "year", "value": 1989 } case lessThanOrEquals(FilterAttribute) - /// Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value doesn't match the value in this object are returned. + /// Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is a list that contains the value as one of its members. The following example would return data sources with an animals attribute that is a list containing a cat member (for example ["dog", "cat"]). "listContains": { "key": "animals", "value": "cat" } + case listContains(FilterAttribute) + /// Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value doesn't match the value in this object are returned. The following example would return data sources that don't contain an animal attribute whose value is cat. "notEquals": { "key": "animal", "value": "cat" } case notEquals(FilterAttribute) - /// Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value isn't in the list specified in the value in this object are returned. + /// Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value isn't in the list specified in the value in this object. The following example would return data sources whose animal attribute is neither cat nor dog. "notIn": { "key": "animal", "value": ["cat", "dog"] } case notIn(FilterAttribute) - /// Knowledge base data sources whose metadata attributes fulfill at least one of the filter conditions inside this list are returned. + /// Knowledge base data sources are returned if their metadata attributes fulfill at least one of the filter conditions inside this list. case orAll([RetrievalFilter]) - /// Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value starts with the value in this object are returned. This filter is currently only supported for Amazon OpenSearch Serverless vector stores. + /// Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value starts with the value in this object. This filter is currently only supported for Amazon OpenSearch Serverless vector stores. The following example would return data sources with an animal attribute starts with ca (for example, cat or camel). "startsWith": { "key": "animal", "value": "ca" } case startsWith(FilterAttribute) + /// Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is one of the following: A string that contains the value as a substring. The following example would return data sources with an animal attribute that contains the substring at (for example cat). "stringContains": { "key": "animal", "value": "at" } A list with a member that contains the value as a substring. The following example would return data sources with an animals attribute that is a list containing a member that contains the substring at (for example ["dog", "cat"]). "stringContains": { "key": "animals", "value": "at" } + case stringContains(FilterAttribute) public func encode(to encoder: Encoder) throws { var container = encoder.container(keyedBy: CodingKeys.self) @@ -382,6 +482,8 @@ extension BedrockAgentRuntime { try container.encode(value, forKey: .lessThan) case .lessThanOrEquals(let value): try container.encode(value, forKey: .lessThanOrEquals) + case .listContains(let value): + try container.encode(value, forKey: .listContains) case .notEquals(let value): try container.encode(value, forKey: .notEquals) case .notIn(let value): @@ -390,6 +492,8 @@ extension BedrockAgentRuntime { try container.encode(value, forKey: .orAll) case .startsWith(let value): try container.encode(value, forKey: .startsWith) + case .stringContains(let value): + try container.encode(value, forKey: .stringContains) } } @@ -413,6 +517,8 @@ extension BedrockAgentRuntime { try value.validate(name: "\(name).lessThan") case .lessThanOrEquals(let value): try value.validate(name: "\(name).lessThanOrEquals") + case .listContains(let value): + try value.validate(name: "\(name).listContains") case .notEquals(let value): try value.validate(name: "\(name).notEquals") case .notIn(let value): @@ -425,6 +531,8 @@ extension BedrockAgentRuntime { try self.validate(value, name: "orAll", parent: name, min: 2) case .startsWith(let value): try value.validate(name: "\(name).startsWith") + case .stringContains(let value): + try value.validate(name: "\(name).stringContains") } } @@ -436,16 +544,20 @@ extension BedrockAgentRuntime { case `in` = "in" case lessThan = "lessThan" case lessThanOrEquals = "lessThanOrEquals" + case listContains = "listContains" case notEquals = "notEquals" case notIn = "notIn" case orAll = "orAll" case startsWith = "startsWith" + case stringContains = "stringContains" } } public enum Trace: AWSDecodableShape, Sendable { /// Contains information about the failure of the interaction. case failureTrace(FailureTrace) + /// The trace details for a trace defined in the Guardrail filter. + case guardrailTrace(GuardrailTrace) /// Details about the orchestration step, in which the agent determines the order in which actions are executed and which knowledge bases are retrieved. case orchestrationTrace(OrchestrationTrace) /// Details about the post-processing step, in which the agent shapes the response.. @@ -466,6 +578,9 @@ extension BedrockAgentRuntime { case .failureTrace: let value = try container.decode(FailureTrace.self, forKey: .failureTrace) self = .failureTrace(value) + case .guardrailTrace: + let value = try container.decode(GuardrailTrace.self, forKey: .guardrailTrace) + self = .guardrailTrace(value) case .orchestrationTrace: let value = try container.decode(OrchestrationTrace.self, forKey: .orchestrationTrace) self = .orchestrationTrace(value) @@ -480,6 +595,7 @@ extension BedrockAgentRuntime { private enum CodingKeys: String, CodingKey { case failureTrace = "failureTrace" + case guardrailTrace = "guardrailTrace" case orchestrationTrace = "orchestrationTrace" case postProcessingTrace = "postProcessingTrace" case preProcessingTrace = "preProcessingTrace" @@ -618,7 +734,7 @@ extension BedrockAgentRuntime { public let httpMethod: String? /// http status code from API execution response (for example: 200, 400, 500). public let httpStatusCode: Int? - /// The response body from the API operation. The key of the object is the content type. The response may be returned directly or from the Lambda function. + /// The response body from the API operation. The key of the object is the content type (currently, only TEXT is supported). The response may be returned directly or from the Lambda function. public let responseBody: [String: ContentBody]? /// Controls the final response state returned to end user when API/Function execution failed. When this state is FAILURE, the request would fail with dependency failure exception. When this state is REPROMPT, the API/function response will be sent to model for re-prompt public let responseState: ResponseState? @@ -785,18 +901,35 @@ extension BedrockAgentRuntime { } public struct ExternalSourcesGenerationConfiguration: AWSEncodableShape { + /// Additional model parameters and their corresponding values not included in the textInferenceConfig structure for an external source. Takes in custom model parameters specific to the language model being used. + public let additionalModelRequestFields: [String: String]? + /// The configuration details for the guardrail. + public let guardrailConfiguration: GuardrailConfiguration? + /// Configuration settings for inference when using RetrieveAndGenerate to generate responses while using an external source. + public let inferenceConfig: InferenceConfig? /// Contain the textPromptTemplate string for the external source wrapper object. public let promptTemplate: PromptTemplate? - public init(promptTemplate: PromptTemplate? = nil) { + public init(additionalModelRequestFields: [String: String]? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, inferenceConfig: InferenceConfig? = nil, promptTemplate: PromptTemplate? = nil) { + self.additionalModelRequestFields = additionalModelRequestFields + self.guardrailConfiguration = guardrailConfiguration + self.inferenceConfig = inferenceConfig self.promptTemplate = promptTemplate } public func validate(name: String) throws { + try self.additionalModelRequestFields?.forEach { + try validate($0.key, name: "additionalModelRequestFields.key", parent: name, max: 100) + try validate($0.key, name: "additionalModelRequestFields.key", parent: name, min: 1) + } + try self.inferenceConfig?.validate(name: "\(name).inferenceConfig") try self.promptTemplate?.validate(name: "\(name).promptTemplate") } private enum CodingKeys: String, CodingKey { + case additionalModelRequestFields = "additionalModelRequestFields" + case guardrailConfiguration = "guardrailConfiguration" + case inferenceConfig = "inferenceConfig" case promptTemplate = "promptTemplate" } } @@ -933,7 +1066,7 @@ extension BedrockAgentRuntime { public let actionGroup: String /// The name of the function that was called. public let function: String? - /// The response from the function call using the parameters. The response may be returned directly or from the Lambda function. + /// The response from the function call using the parameters. The key of the object is the content type (currently, only TEXT is supported). The response may be returned directly or from the Lambda function. public let responseBody: [String: ContentBody]? /// Controls the final response state returned to end user when API/Function execution failed. When this state is FAILURE, the request would fail with dependency failure exception. When this state is REPROMPT, the API/function response will be sent to model for re-prompt public let responseState: ResponseState? @@ -967,22 +1100,309 @@ extension BedrockAgentRuntime { } public struct GenerationConfiguration: AWSEncodableShape { + /// Additional model parameters and corresponding values not included in the textInferenceConfig structure for a knowledge base. This allows users to provide custom model parameters specific to the language model being used. + public let additionalModelRequestFields: [String: String]? + /// The configuration details for the guardrail. + public let guardrailConfiguration: GuardrailConfiguration? + /// Configuration settings for inference when using RetrieveAndGenerate to generate responses while using a knowledge base as a source. + public let inferenceConfig: InferenceConfig? /// Contains the template for the prompt that's sent to the model for response generation. public let promptTemplate: PromptTemplate? - public init(promptTemplate: PromptTemplate? = nil) { + public init(additionalModelRequestFields: [String: String]? = nil, guardrailConfiguration: GuardrailConfiguration? = nil, inferenceConfig: InferenceConfig? = nil, promptTemplate: PromptTemplate? = nil) { + self.additionalModelRequestFields = additionalModelRequestFields + self.guardrailConfiguration = guardrailConfiguration + self.inferenceConfig = inferenceConfig self.promptTemplate = promptTemplate } public func validate(name: String) throws { + try self.additionalModelRequestFields?.forEach { + try validate($0.key, name: "additionalModelRequestFields.key", parent: name, max: 100) + try validate($0.key, name: "additionalModelRequestFields.key", parent: name, min: 1) + } + try self.inferenceConfig?.validate(name: "\(name).inferenceConfig") try self.promptTemplate?.validate(name: "\(name).promptTemplate") } private enum CodingKeys: String, CodingKey { + case additionalModelRequestFields = "additionalModelRequestFields" + case guardrailConfiguration = "guardrailConfiguration" + case inferenceConfig = "inferenceConfig" case promptTemplate = "promptTemplate" } } + public struct GuardrailAssessment: AWSDecodableShape { + /// Content policy details of the Guardrail. + public let contentPolicy: GuardrailContentPolicyAssessment? + /// Sensitive Information policy details of Guardrail. + public let sensitiveInformationPolicy: GuardrailSensitiveInformationPolicyAssessment? + /// Topic policy details of the Guardrail. + public let topicPolicy: GuardrailTopicPolicyAssessment? + /// Word policy details of the Guardrail. + public let wordPolicy: GuardrailWordPolicyAssessment? + + public init(contentPolicy: GuardrailContentPolicyAssessment? = nil, sensitiveInformationPolicy: GuardrailSensitiveInformationPolicyAssessment? = nil, topicPolicy: GuardrailTopicPolicyAssessment? = nil, wordPolicy: GuardrailWordPolicyAssessment? = nil) { + self.contentPolicy = contentPolicy + self.sensitiveInformationPolicy = sensitiveInformationPolicy + self.topicPolicy = topicPolicy + self.wordPolicy = wordPolicy + } + + private enum CodingKeys: String, CodingKey { + case contentPolicy = "contentPolicy" + case sensitiveInformationPolicy = "sensitiveInformationPolicy" + case topicPolicy = "topicPolicy" + case wordPolicy = "wordPolicy" + } + } + + public struct GuardrailConfiguration: AWSEncodableShape { + /// The unique identifier for the guardrail. + public let guardrailId: String + /// The version of the guardrail. + public let guardrailVersion: String + + public init(guardrailId: String, guardrailVersion: String) { + self.guardrailId = guardrailId + self.guardrailVersion = guardrailVersion + } + + private enum CodingKeys: String, CodingKey { + case guardrailId = "guardrailId" + case guardrailVersion = "guardrailVersion" + } + } + + public struct GuardrailContentFilter: AWSDecodableShape { + /// The action placed on the content by the Guardrail filter. + public let action: GuardrailContentPolicyAction? + /// The confidence level regarding the content detected in the filter by the Guardrail. + public let confidence: GuardrailContentFilterConfidence? + /// The type of content detected in the filter by the Guardrail. + public let type: GuardrailContentFilterType? + + public init(action: GuardrailContentPolicyAction? = nil, confidence: GuardrailContentFilterConfidence? = nil, type: GuardrailContentFilterType? = nil) { + self.action = action + self.confidence = confidence + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case confidence = "confidence" + case type = "type" + } + } + + public struct GuardrailContentPolicyAssessment: AWSDecodableShape { + /// The filter details of the policy assessment used in the Guardrails filter. + public let filters: [GuardrailContentFilter]? + + public init(filters: [GuardrailContentFilter]? = nil) { + self.filters = filters + } + + private enum CodingKeys: String, CodingKey { + case filters = "filters" + } + } + + public struct GuardrailCustomWord: AWSDecodableShape { + /// The action details for the custom word filter in the Guardrail. + public let action: GuardrailWordPolicyAction? + /// The match details for the custom word filter in the Guardrail. + public let match: String? + + public init(action: GuardrailWordPolicyAction? = nil, match: String? = nil) { + self.action = action + self.match = match + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case match = "match" + } + } + + public struct GuardrailManagedWord: AWSDecodableShape { + /// The action details for the managed word filter in the Guardrail. + public let action: GuardrailWordPolicyAction? + /// The match details for the managed word filter in the Guardrail. + public let match: String? + /// The type details for the managed word filter in the Guardrail. + public let type: GuardrailManagedWordType? + + public init(action: GuardrailWordPolicyAction? = nil, match: String? = nil, type: GuardrailManagedWordType? = nil) { + self.action = action + self.match = match + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case match = "match" + case type = "type" + } + } + + public struct GuardrailPiiEntityFilter: AWSDecodableShape { + /// The action of the Guardrail filter to identify and remove PII. + public let action: GuardrailSensitiveInformationPolicyAction? + /// The match to settings in the Guardrail filter to identify and remove PII. + public let match: String? + /// The type of PII the Guardrail filter has identified and removed. + public let type: GuardrailPiiEntityType? + + public init(action: GuardrailSensitiveInformationPolicyAction? = nil, match: String? = nil, type: GuardrailPiiEntityType? = nil) { + self.action = action + self.match = match + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case match = "match" + case type = "type" + } + } + + public struct GuardrailRegexFilter: AWSDecodableShape { + /// The action details for the regex filter used in the Guardrail. + public let action: GuardrailSensitiveInformationPolicyAction? + /// The match details for the regex filter used in the Guardrail. + public let match: String? + /// The name details for the regex filter used in the Guardrail. + public let name: String? + /// The regex details for the regex filter used in the Guardrail. + public let regex: String? + + public init(action: GuardrailSensitiveInformationPolicyAction? = nil, match: String? = nil, name: String? = nil, regex: String? = nil) { + self.action = action + self.match = match + self.name = name + self.regex = regex + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case match = "match" + case name = "name" + case regex = "regex" + } + } + + public struct GuardrailSensitiveInformationPolicyAssessment: AWSDecodableShape { + /// The details of the PII entities used in the sensitive policy assessment for the Guardrail. + public let piiEntities: [GuardrailPiiEntityFilter]? + /// The details of the regexes used in the sensitive policy assessment for the Guardrail. + public let regexes: [GuardrailRegexFilter]? + + public init(piiEntities: [GuardrailPiiEntityFilter]? = nil, regexes: [GuardrailRegexFilter]? = nil) { + self.piiEntities = piiEntities + self.regexes = regexes + } + + private enum CodingKeys: String, CodingKey { + case piiEntities = "piiEntities" + case regexes = "regexes" + } + } + + public struct GuardrailTopic: AWSDecodableShape { + /// The action details on a specific topic in the Guardrail. + public let action: GuardrailTopicPolicyAction? + /// The name details on a specific topic in the Guardrail. + public let name: String? + /// The type details on a specific topic in the Guardrail. + public let type: GuardrailTopicType? + + public init(action: GuardrailTopicPolicyAction? = nil, name: String? = nil, type: GuardrailTopicType? = nil) { + self.action = action + self.name = name + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case name = "name" + case type = "type" + } + } + + public struct GuardrailTopicPolicyAssessment: AWSDecodableShape { + /// The topic details of the policy assessment used in the Guardrail. + public let topics: [GuardrailTopic]? + + public init(topics: [GuardrailTopic]? = nil) { + self.topics = topics + } + + private enum CodingKeys: String, CodingKey { + case topics = "topics" + } + } + + public struct GuardrailTrace: AWSDecodableShape { + /// The trace action details used with the Guardrail. + public let action: GuardrailAction? + /// The details of the input assessments used in the Guardrail Trace. + public let inputAssessments: [GuardrailAssessment]? + /// The details of the output assessments used in the Guardrail Trace. + public let outputAssessments: [GuardrailAssessment]? + /// The details of the trace Id used in the Guardrail Trace. + public let traceId: String? + + public init(action: GuardrailAction? = nil, inputAssessments: [GuardrailAssessment]? = nil, outputAssessments: [GuardrailAssessment]? = nil, traceId: String? = nil) { + self.action = action + self.inputAssessments = inputAssessments + self.outputAssessments = outputAssessments + self.traceId = traceId + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case inputAssessments = "inputAssessments" + case outputAssessments = "outputAssessments" + case traceId = "traceId" + } + } + + public struct GuardrailWordPolicyAssessment: AWSDecodableShape { + /// The custom word details for words defined in the Guardrail filter. + public let customWords: [GuardrailCustomWord]? + /// The managed word lists for words defined in the Guardrail filter. + public let managedWordLists: [GuardrailManagedWord]? + + public init(customWords: [GuardrailCustomWord]? = nil, managedWordLists: [GuardrailManagedWord]? = nil) { + self.customWords = customWords + self.managedWordLists = managedWordLists + } + + private enum CodingKeys: String, CodingKey { + case customWords = "customWords" + case managedWordLists = "managedWordLists" + } + } + + public struct InferenceConfig: AWSEncodableShape { + /// Configuration settings specific to text generation while generating responses using RetrieveAndGenerate. + public let textInferenceConfig: TextInferenceConfig? + + public init(textInferenceConfig: TextInferenceConfig? = nil) { + self.textInferenceConfig = textInferenceConfig + } + + public func validate(name: String) throws { + try self.textInferenceConfig?.validate(name: "\(name).textInferenceConfig") + } + + private enum CodingKeys: String, CodingKey { + case textInferenceConfig = "textInferenceConfig" + } + } + public struct InferenceConfiguration: AWSDecodableShape { /// The maximum number of tokens allowed in the generated response. public let maximumLength: Int? @@ -1058,11 +1478,11 @@ extension BedrockAgentRuntime { public let enableTrace: Bool? /// Specifies whether to end the session with the agent or not. public let endSession: Bool? - /// The prompt text to send the agent. + /// The prompt text to send the agent. If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored. public let inputText: String? /// The unique identifier of the session. Use the same value across requests to continue the same conversation. public let sessionId: String - /// Contains parameters that specify various attributes of the session. For more information, see Control session context. + /// Contains parameters that specify various attributes of the session. For more information, see Control session context. If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored. public let sessionState: SessionState? public init(agentAliasId: String, agentId: String, enableTrace: Bool? = nil, endSession: Bool? = nil, inputText: String? = nil, sessionId: String, sessionState: SessionState? = nil) { @@ -1671,19 +2091,23 @@ extension BedrockAgentRuntime { public struct RetrieveAndGenerateResponse: AWSDecodableShape { /// A list of segments of the generated response that are based on sources in the knowledge base, alongside information about the sources. public let citations: [Citation]? + /// Specifies if there is a guardrail intervention in the response. + public let guardrailAction: GuadrailAction? /// Contains the response generated from querying the knowledge base. public let output: RetrieveAndGenerateOutput /// The unique identifier of the session. Reuse the same value to continue the same session with the knowledge base. public let sessionId: String - public init(citations: [Citation]? = nil, output: RetrieveAndGenerateOutput, sessionId: String) { + public init(citations: [Citation]? = nil, guardrailAction: GuadrailAction? = nil, output: RetrieveAndGenerateOutput, sessionId: String) { self.citations = citations + self.guardrailAction = guardrailAction self.output = output self.sessionId = sessionId } private enum CodingKeys: String, CodingKey { case citations = "citations" + case guardrailAction = "guardrailAction" case output = "output" case sessionId = "sessionId" } @@ -1837,11 +2261,11 @@ extension BedrockAgentRuntime { } public struct SessionState: AWSEncodableShape { - /// The identifier of the invocation. + /// The identifier of the invocation of an action. This value must match the invocationId returned in the InvokeAgent response for the action whose results are provided in the returnControlInvocationResults field. For more information, see Return control to the agent developer and Control session context. public let invocationId: String? /// Contains attributes that persist across a prompt and the values of those attributes. These attributes replace the $prompt_session_attributes$ placeholder variable in the orchestration prompt template. For more information, see Prompt template placeholder variables. public let promptSessionAttributes: [String: String]? - /// Contains information about the results from the action group invocation. + /// Contains information about the results from the action group invocation. For more information, see Return control to the agent developer and Control session context. If you include this field, the inputText field will be ignored. public let returnControlInvocationResults: [InvocationResultMember]? /// Contains attributes that persist across a session and the values of those attributes. public let sessionAttributes: [String: String]? @@ -1883,6 +2307,41 @@ extension BedrockAgentRuntime { } } + public struct TextInferenceConfig: AWSEncodableShape { + /// The maximum number of tokens to generate in the output text. Do not use the minimum of 0 or the maximum of 65536. The limit values described here are arbitary values, for actual values consult the limits defined by your specific model. + public let maxTokens: Int? + /// A list of sequences of characters that, if generated, will cause the model to stop generating further tokens. Do not use a minimum length of 1 or a maximum length of 1000. The limit values described here are arbitary values, for actual values consult the limits defined by your specific model. + public let stopSequences: [String]? + /// Controls the random-ness of text generated by the language model, influencing how much the model sticks to the most predictable next words versus exploring more surprising options. A lower temperature value (e.g. 0.2 or 0.3) makes model outputs more deterministic or predictable, while a higher temperature (e.g. 0.8 or 0.9) makes the outputs more creative or unpredictable. + public let temperature: Float? + /// A probability distribution threshold which controls what the model considers for the set of possible next tokens. The model will only consider the top p% of the probability distribution when generating the next token. + public let topP: Float? + + public init(maxTokens: Int? = nil, stopSequences: [String]? = nil, temperature: Float? = nil, topP: Float? = nil) { + self.maxTokens = maxTokens + self.stopSequences = stopSequences + self.temperature = temperature + self.topP = topP + } + + public func validate(name: String) throws { + try self.validate(self.maxTokens, name: "maxTokens", parent: name, max: 65536) + try self.validate(self.maxTokens, name: "maxTokens", parent: name, min: 0) + try self.validate(self.stopSequences, name: "stopSequences", parent: name, max: 4) + try self.validate(self.temperature, name: "temperature", parent: name, max: 1.0) + try self.validate(self.temperature, name: "temperature", parent: name, min: 0.0) + try self.validate(self.topP, name: "topP", parent: name, max: 1.0) + try self.validate(self.topP, name: "topP", parent: name, min: 0.0) + } + + private enum CodingKeys: String, CodingKey { + case maxTokens = "maxTokens" + case stopSequences = "stopSequences" + case temperature = "temperature" + case topP = "topP" + } + } + public struct TextResponsePart: AWSDecodableShape { /// Contains information about where the text with a citation begins and ends in the generated output. public let span: Span? diff --git a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift index 761aab1eca..314557549f 100644 --- a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift +++ b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_api.swift @@ -74,6 +74,32 @@ public struct BedrockRuntime: AWSService { // MARK: API Calls + /// Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, see Converse API examples in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action. + @Sendable + public func converse(_ input: ConverseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ConverseResponse { + return try await self.client.execute( + operation: "Converse", + path: "/model/{modelId}/converse", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Sends messages to the specified Amazon Bedrock model and returns the response in a stream. ConverseStream provides a consistent API that works with all Amazon Bedrock models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. To find out if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported field in the response. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, see Conversation streaming example in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModelWithResponseStream action. + @Sendable + public func converseStream(_ input: ConverseStreamRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ConverseStreamResponse { + return try await self.client.execute( + operation: "ConverseStream", + path: "/model/{modelId}/converse-stream", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Invokes the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. You use model inference to generate text, images, and embeddings. For example code, see Invoke model code examples in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action. @Sendable public func invokeModel(_ input: InvokeModelRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InvokeModelResponse { diff --git a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift index 0623aa4ba3..23bc0630b1 100644 --- a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift @@ -26,68 +26,1214 @@ import Foundation extension BedrockRuntime { // MARK: Enums + public enum ConversationRole: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case assistant = "assistant" + case user = "user" + public var description: String { return self.rawValue } + } + + public enum DocumentFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case csv = "csv" + case doc = "doc" + case docx = "docx" + case html = "html" + case md = "md" + case pdf = "pdf" + case txt = "txt" + case xls = "xls" + case xlsx = "xlsx" + public var description: String { return self.rawValue } + } + + public enum GuardrailContentFilterConfidence: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case high = "HIGH" + case low = "LOW" + case medium = "MEDIUM" + case none = "NONE" + public var description: String { return self.rawValue } + } + + public enum GuardrailContentFilterType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case hate = "HATE" + case insults = "INSULTS" + case misconduct = "MISCONDUCT" + case promptAttack = "PROMPT_ATTACK" + case sexual = "SEXUAL" + case violence = "VIOLENCE" + public var description: String { return self.rawValue } + } + + public enum GuardrailContentPolicyAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case blocked = "BLOCKED" + public var description: String { return self.rawValue } + } + + public enum GuardrailManagedWordType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case profanity = "PROFANITY" + public var description: String { return self.rawValue } + } + + public enum GuardrailPiiEntityType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case address = "ADDRESS" + case age = "AGE" + case awsAccessKey = "AWS_ACCESS_KEY" + case awsSecretKey = "AWS_SECRET_KEY" + case caHealthNumber = "CA_HEALTH_NUMBER" + case caSocialInsuranceNumber = "CA_SOCIAL_INSURANCE_NUMBER" + case creditDebitCardCvv = "CREDIT_DEBIT_CARD_CVV" + case creditDebitCardExpiry = "CREDIT_DEBIT_CARD_EXPIRY" + case creditDebitCardNumber = "CREDIT_DEBIT_CARD_NUMBER" + case driverId = "DRIVER_ID" + case email = "EMAIL" + case internationalBankAccountNumber = "INTERNATIONAL_BANK_ACCOUNT_NUMBER" + case ipAddress = "IP_ADDRESS" + case licensePlate = "LICENSE_PLATE" + case macAddress = "MAC_ADDRESS" + case name = "NAME" + case password = "PASSWORD" + case phone = "PHONE" + case pin = "PIN" + case swiftCode = "SWIFT_CODE" + case ukNationalHealthServiceNumber = "UK_NATIONAL_HEALTH_SERVICE_NUMBER" + case ukNationalInsuranceNumber = "UK_NATIONAL_INSURANCE_NUMBER" + case ukUniqueTaxpayerReferenceNumber = "UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER" + case url = "URL" + case usBankAccountNumber = "US_BANK_ACCOUNT_NUMBER" + case usBankRoutingNumber = "US_BANK_ROUTING_NUMBER" + case usIndividualTaxIdentificationNumber = "US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER" + case usPassportNumber = "US_PASSPORT_NUMBER" + case usSocialSecurityNumber = "US_SOCIAL_SECURITY_NUMBER" + case username = "USERNAME" + case vehicleIdentificationNumber = "VEHICLE_IDENTIFICATION_NUMBER" + public var description: String { return self.rawValue } + } + + public enum GuardrailSensitiveInformationPolicyAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case anonymized = "ANONYMIZED" + case blocked = "BLOCKED" + public var description: String { return self.rawValue } + } + + public enum GuardrailStreamProcessingMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `async` = "async" + case sync = "sync" + public var description: String { return self.rawValue } + } + + public enum GuardrailTopicPolicyAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case blocked = "BLOCKED" + public var description: String { return self.rawValue } + } + + public enum GuardrailTopicType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case deny = "DENY" + public var description: String { return self.rawValue } + } + + public enum GuardrailTrace: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "disabled" + case enabled = "enabled" + public var description: String { return self.rawValue } + } + + public enum GuardrailWordPolicyAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case blocked = "BLOCKED" + public var description: String { return self.rawValue } + } + + public enum ImageFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case gif = "gif" + case jpeg = "jpeg" + case png = "png" + case webp = "webp" + public var description: String { return self.rawValue } + } + + public enum StopReason: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case contentFiltered = "content_filtered" + case endTurn = "end_turn" + case guardrailIntervened = "guardrail_intervened" + case maxTokens = "max_tokens" + case stopSequence = "stop_sequence" + case toolUse = "tool_use" + public var description: String { return self.rawValue } + } + + public enum ToolResultStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case error = "error" + case success = "success" + public var description: String { return self.rawValue } + } + public enum Trace: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" public var description: String { return self.rawValue } } - public enum ResponseStream: AWSDecodableShape, Sendable { - /// Content included in the response. - case chunk(PayloadPart) - /// An internal server error occurred. Retry your request. - case internalServerException(InternalServerException) - /// An error occurred while streaming the response. Retry your request. - case modelStreamErrorException(ModelStreamErrorException) - /// The request took too long to process. Processing time exceeded the model timeout length. - case modelTimeoutException(ModelTimeoutException) - /// The number or frequency of requests exceeds the limit. Resubmit your request later. - case throttlingException(ThrottlingException) - /// Input validation failed. Check your request parameters and retry the request. - case validationException(ValidationException) + public enum ContentBlock: AWSEncodableShape & AWSDecodableShape, Sendable { + /// A document to include in the message. + case document(DocumentBlock) + /// Contains the content to assess with the guardrail. If you don't specify guardContent in a call to the Converse API, the guardrail (if passed in the Converse API) assesses the entire message. For more information, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. + /// + case guardContent(GuardrailConverseContentBlock) + /// Image to include in the message. This field is only supported by Anthropic Claude 3 models. + case image(ImageBlock) + /// Text to include in the message. + case text(String) + /// The result for a tool request that a model makes. + case toolResult(ToolResultBlock) + /// Information about a tool use request from a model. + case toolUse(ToolUseBlock) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .document: + let value = try container.decode(DocumentBlock.self, forKey: .document) + self = .document(value) + case .guardContent: + let value = try container.decode(GuardrailConverseContentBlock.self, forKey: .guardContent) + self = .guardContent(value) + case .image: + let value = try container.decode(ImageBlock.self, forKey: .image) + self = .image(value) + case .text: + let value = try container.decode(String.self, forKey: .text) + self = .text(value) + case .toolResult: + let value = try container.decode(ToolResultBlock.self, forKey: .toolResult) + self = .toolResult(value) + case .toolUse: + let value = try container.decode(ToolUseBlock.self, forKey: .toolUse) + self = .toolUse(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .document(let value): + try container.encode(value, forKey: .document) + case .guardContent(let value): + try container.encode(value, forKey: .guardContent) + case .image(let value): + try container.encode(value, forKey: .image) + case .text(let value): + try container.encode(value, forKey: .text) + case .toolResult(let value): + try container.encode(value, forKey: .toolResult) + case .toolUse(let value): + try container.encode(value, forKey: .toolUse) + } + } + + public func validate(name: String) throws { + switch self { + case .toolResult(let value): + try value.validate(name: "\(name).toolResult") + case .toolUse(let value): + try value.validate(name: "\(name).toolUse") + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case document = "document" + case guardContent = "guardContent" + case image = "image" + case text = "text" + case toolResult = "toolResult" + case toolUse = "toolUse" + } + } + + public enum ContentBlockDelta: AWSDecodableShape, Sendable { + /// The content text. + case text(String) + /// Information about a tool that the model is requesting to use. + case toolUse(ToolUseBlockDelta) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .text: + let value = try container.decode(String.self, forKey: .text) + self = .text(value) + case .toolUse: + let value = try container.decode(ToolUseBlockDelta.self, forKey: .toolUse) + self = .toolUse(value) + } + } + + private enum CodingKeys: String, CodingKey { + case text = "text" + case toolUse = "toolUse" + } + } + + public enum ConverseStreamOutput: AWSDecodableShape, Sendable { + /// The messages output content block delta. + case contentBlockDelta(ContentBlockDeltaEvent) + /// Start information for a content block. + case contentBlockStart(ContentBlockStartEvent) + /// Stop information for a content block. + case contentBlockStop(ContentBlockStopEvent) + /// An internal server error occurred. Retry your request. + case internalServerException(InternalServerException) + /// Message start information. + case messageStart(MessageStartEvent) + /// Message stop information. + case messageStop(MessageStopEvent) + /// Metadata for the converse output stream. + case metadata(ConverseStreamMetadataEvent) + /// A streaming error occurred. Retry your request. + case modelStreamErrorException(ModelStreamErrorException) + /// The number of requests exceeds the limit. Resubmit your request later. + case throttlingException(ThrottlingException) + /// Input validation failed. Check your request parameters and retry the request. + case validationException(ValidationException) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .contentBlockDelta: + let value = try container.decode(ContentBlockDeltaEvent.self, forKey: .contentBlockDelta) + self = .contentBlockDelta(value) + case .contentBlockStart: + let value = try container.decode(ContentBlockStartEvent.self, forKey: .contentBlockStart) + self = .contentBlockStart(value) + case .contentBlockStop: + let value = try container.decode(ContentBlockStopEvent.self, forKey: .contentBlockStop) + self = .contentBlockStop(value) + case .internalServerException: + let value = try container.decode(InternalServerException.self, forKey: .internalServerException) + self = .internalServerException(value) + case .messageStart: + let value = try container.decode(MessageStartEvent.self, forKey: .messageStart) + self = .messageStart(value) + case .messageStop: + let value = try container.decode(MessageStopEvent.self, forKey: .messageStop) + self = .messageStop(value) + case .metadata: + let value = try container.decode(ConverseStreamMetadataEvent.self, forKey: .metadata) + self = .metadata(value) + case .modelStreamErrorException: + let value = try container.decode(ModelStreamErrorException.self, forKey: .modelStreamErrorException) + self = .modelStreamErrorException(value) + case .throttlingException: + let value = try container.decode(ThrottlingException.self, forKey: .throttlingException) + self = .throttlingException(value) + case .validationException: + let value = try container.decode(ValidationException.self, forKey: .validationException) + self = .validationException(value) + } + } + + private enum CodingKeys: String, CodingKey { + case contentBlockDelta = "contentBlockDelta" + case contentBlockStart = "contentBlockStart" + case contentBlockStop = "contentBlockStop" + case internalServerException = "internalServerException" + case messageStart = "messageStart" + case messageStop = "messageStop" + case metadata = "metadata" + case modelStreamErrorException = "modelStreamErrorException" + case throttlingException = "throttlingException" + case validationException = "validationException" + } + } + + public enum ResponseStream: AWSDecodableShape, Sendable { + /// Content included in the response. + case chunk(PayloadPart) + /// An internal server error occurred. Retry your request. + case internalServerException(InternalServerException) + /// An error occurred while streaming the response. Retry your request. + case modelStreamErrorException(ModelStreamErrorException) + /// The request took too long to process. Processing time exceeded the model timeout length. + case modelTimeoutException(ModelTimeoutException) + /// The number or frequency of requests exceeds the limit. Resubmit your request later. + case throttlingException(ThrottlingException) + /// Input validation failed. Check your request parameters and retry the request. + case validationException(ValidationException) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .chunk: + let value = try container.decode(PayloadPart.self, forKey: .chunk) + self = .chunk(value) + case .internalServerException: + let value = try container.decode(InternalServerException.self, forKey: .internalServerException) + self = .internalServerException(value) + case .modelStreamErrorException: + let value = try container.decode(ModelStreamErrorException.self, forKey: .modelStreamErrorException) + self = .modelStreamErrorException(value) + case .modelTimeoutException: + let value = try container.decode(ModelTimeoutException.self, forKey: .modelTimeoutException) + self = .modelTimeoutException(value) + case .throttlingException: + let value = try container.decode(ThrottlingException.self, forKey: .throttlingException) + self = .throttlingException(value) + case .validationException: + let value = try container.decode(ValidationException.self, forKey: .validationException) + self = .validationException(value) + } + } + + private enum CodingKeys: String, CodingKey { + case chunk = "chunk" + case internalServerException = "internalServerException" + case modelStreamErrorException = "modelStreamErrorException" + case modelTimeoutException = "modelTimeoutException" + case throttlingException = "throttlingException" + case validationException = "validationException" + } + } + + public enum SystemContentBlock: AWSEncodableShape, Sendable { + /// A content block to assess with the guardrail. Use with the Converse API (Converse and ConverseStream). For more information, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. + case guardContent(GuardrailConverseContentBlock) + /// A system prompt for the model. + case text(String) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .guardContent(let value): + try container.encode(value, forKey: .guardContent) + case .text(let value): + try container.encode(value, forKey: .text) + } + } + + public func validate(name: String) throws { + switch self { + case .text(let value): + try self.validate(value, name: "text", parent: name, min: 1) + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case guardContent = "guardContent" + case text = "text" + } + } + + public enum ToolChoice: AWSEncodableShape, Sendable { + /// The model must request at least one tool (no text is generated). + case any(AnyToolChoice) + /// (Default). The Model automatically decides if a tool should be called or whether to generate text instead. + case auto(AutoToolChoice) + /// The Model must request the specified tool. Only supported by Anthropic Claude 3 models. + case tool(SpecificToolChoice) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .any(let value): + try container.encode(value, forKey: .any) + case .auto(let value): + try container.encode(value, forKey: .auto) + case .tool(let value): + try container.encode(value, forKey: .tool) + } + } + + public func validate(name: String) throws { + switch self { + case .tool(let value): + try value.validate(name: "\(name).tool") + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case any = "any" + case auto = "auto" + case tool = "tool" + } + } + + public enum ToolResultContentBlock: AWSEncodableShape & AWSDecodableShape, Sendable { + /// A tool result that is a document. + case document(DocumentBlock) + /// A tool result that is an image. This field is only supported by Anthropic Claude 3 models. + case image(ImageBlock) + /// A tool result that is JSON format data. + case json(String) + /// A tool result that is text. + case text(String) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .document: + let value = try container.decode(DocumentBlock.self, forKey: .document) + self = .document(value) + case .image: + let value = try container.decode(ImageBlock.self, forKey: .image) + self = .image(value) + case .json: + let value = try container.decode(String.self, forKey: .json) + self = .json(value) + case .text: + let value = try container.decode(String.self, forKey: .text) + self = .text(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .document(let value): + try container.encode(value, forKey: .document) + case .image(let value): + try container.encode(value, forKey: .image) + case .json(let value): + try container.encode(value, forKey: .json) + case .text(let value): + try container.encode(value, forKey: .text) + } + } + + private enum CodingKeys: String, CodingKey { + case document = "document" + case image = "image" + case json = "json" + case text = "text" + } + } + + // MARK: Shapes + + public struct AnyToolChoice: AWSEncodableShape { + public init() {} + } + + public struct AutoToolChoice: AWSEncodableShape { + public init() {} + } + + public struct ContentBlockDeltaEvent: AWSDecodableShape { + /// The block index for a content block delta event. + public let contentBlockIndex: Int + /// The delta for a content block delta event. + public let delta: ContentBlockDelta + + public init(contentBlockIndex: Int, delta: ContentBlockDelta) { + self.contentBlockIndex = contentBlockIndex + self.delta = delta + } + + private enum CodingKeys: String, CodingKey { + case contentBlockIndex = "contentBlockIndex" + case delta = "delta" + } + } + + public struct ContentBlockStartEvent: AWSDecodableShape { + /// The index for a content block start event. + public let contentBlockIndex: Int + /// Start information about a content block start event. + public let start: ContentBlockStart + + public init(contentBlockIndex: Int, start: ContentBlockStart) { + self.contentBlockIndex = contentBlockIndex + self.start = start + } + + private enum CodingKeys: String, CodingKey { + case contentBlockIndex = "contentBlockIndex" + case start = "start" + } + } + + public struct ContentBlockStopEvent: AWSDecodableShape { + /// The index for a content block. + public let contentBlockIndex: Int + + public init(contentBlockIndex: Int) { + self.contentBlockIndex = contentBlockIndex + } + + private enum CodingKeys: String, CodingKey { + case contentBlockIndex = "contentBlockIndex" + } + } + + public struct ConverseMetrics: AWSDecodableShape { + /// The latency of the call to Converse, in milliseconds. + public let latencyMs: Int64 + + public init(latencyMs: Int64) { + self.latencyMs = latencyMs + } + + private enum CodingKeys: String, CodingKey { + case latencyMs = "latencyMs" + } + } + + public struct ConverseRequest: AWSEncodableShape { + /// Additional inference parameters that the model supports, beyond the base set of inference parameters that Converse supports in the inferenceConfig field. For more information, see Model parameters. + public let additionalModelRequestFields: String? + /// Additional model parameters field paths to return in the response. Converse returns the requested fields as a JSON Pointer object in the additionalModelResponseFields field. The following is example JSON for additionalModelResponseFieldPaths. [ "/stop_sequence" ] For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation. Converse rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by Converse. + public let additionalModelResponseFieldPaths: [String]? + /// Configuration information for a guardrail that you want to use in the request. + public let guardrailConfig: GuardrailConfiguration? + /// Inference parameters to pass to the model. Converse supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field. + public let inferenceConfig: InferenceConfiguration? + /// The messages that you want to send to the model. + public let messages: [Message] + /// The identifier for the model that you want to call. The modelId to provide depends on the type of model that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide. + public let modelId: String + /// A system prompt to pass to the model. + public let system: [SystemContentBlock]? + /// Configuration information for the tools that the model can use when generating a response. This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models. + public let toolConfig: ToolConfiguration? + + public init(additionalModelRequestFields: String? = nil, additionalModelResponseFieldPaths: [String]? = nil, guardrailConfig: GuardrailConfiguration? = nil, inferenceConfig: InferenceConfiguration? = nil, messages: [Message], modelId: String, system: [SystemContentBlock]? = nil, toolConfig: ToolConfiguration? = nil) { + self.additionalModelRequestFields = additionalModelRequestFields + self.additionalModelResponseFieldPaths = additionalModelResponseFieldPaths + self.guardrailConfig = guardrailConfig + self.inferenceConfig = inferenceConfig + self.messages = messages + self.modelId = modelId + self.system = system + self.toolConfig = toolConfig + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.additionalModelRequestFields, forKey: .additionalModelRequestFields) + try container.encodeIfPresent(self.additionalModelResponseFieldPaths, forKey: .additionalModelResponseFieldPaths) + try container.encodeIfPresent(self.guardrailConfig, forKey: .guardrailConfig) + try container.encodeIfPresent(self.inferenceConfig, forKey: .inferenceConfig) + try container.encode(self.messages, forKey: .messages) + request.encodePath(self.modelId, key: "modelId") + try container.encodeIfPresent(self.system, forKey: .system) + try container.encodeIfPresent(self.toolConfig, forKey: .toolConfig) + } + + public func validate(name: String) throws { + try self.validate(self.additionalModelResponseFieldPaths, name: "additionalModelResponseFieldPaths", parent: name, max: 10) + try self.guardrailConfig?.validate(name: "\(name).guardrailConfig") + try self.inferenceConfig?.validate(name: "\(name).inferenceConfig") + try self.messages.forEach { + try $0.validate(name: "\(name).messages[]") + } + try self.validate(self.modelId, name: "modelId", parent: name, max: 2048) + try self.validate(self.modelId, name: "modelId", parent: name, min: 1) + try self.validate(self.modelId, name: "modelId", parent: name, pattern: "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$") + try self.system?.forEach { + try $0.validate(name: "\(name).system[]") + } + try self.toolConfig?.validate(name: "\(name).toolConfig") + } + + private enum CodingKeys: String, CodingKey { + case additionalModelRequestFields = "additionalModelRequestFields" + case additionalModelResponseFieldPaths = "additionalModelResponseFieldPaths" + case guardrailConfig = "guardrailConfig" + case inferenceConfig = "inferenceConfig" + case messages = "messages" + case system = "system" + case toolConfig = "toolConfig" + } + } + + public struct ConverseResponse: AWSDecodableShape { + /// Additional fields in the response that are unique to the model. + public let additionalModelResponseFields: String? + /// Metrics for the call to Converse. + public let metrics: ConverseMetrics + /// The result from the call to Converse. + public let output: ConverseOutput + /// The reason why the model stopped generating output. + public let stopReason: StopReason + /// A trace object that contains information about the Guardrail behavior. + public let trace: ConverseTrace? + /// The total number of tokens used in the call to Converse. The total includes the tokens input to the model and the tokens generated by the model. + public let usage: TokenUsage + + public init(additionalModelResponseFields: String? = nil, metrics: ConverseMetrics, output: ConverseOutput, stopReason: StopReason, trace: ConverseTrace? = nil, usage: TokenUsage) { + self.additionalModelResponseFields = additionalModelResponseFields + self.metrics = metrics + self.output = output + self.stopReason = stopReason + self.trace = trace + self.usage = usage + } + + private enum CodingKeys: String, CodingKey { + case additionalModelResponseFields = "additionalModelResponseFields" + case metrics = "metrics" + case output = "output" + case stopReason = "stopReason" + case trace = "trace" + case usage = "usage" + } + } + + public struct ConverseStreamMetadataEvent: AWSDecodableShape { + /// The metrics for the conversation stream metadata event. + public let metrics: ConverseStreamMetrics + /// The trace object in the response from ConverseStream that contains information about the guardrail behavior. + public let trace: ConverseStreamTrace? + /// Usage information for the conversation stream event. + public let usage: TokenUsage + + public init(metrics: ConverseStreamMetrics, trace: ConverseStreamTrace? = nil, usage: TokenUsage) { + self.metrics = metrics + self.trace = trace + self.usage = usage + } + + private enum CodingKeys: String, CodingKey { + case metrics = "metrics" + case trace = "trace" + case usage = "usage" + } + } + + public struct ConverseStreamMetrics: AWSDecodableShape { + /// The latency for the streaming request, in milliseconds. + public let latencyMs: Int64 + + public init(latencyMs: Int64) { + self.latencyMs = latencyMs + } + + private enum CodingKeys: String, CodingKey { + case latencyMs = "latencyMs" + } + } + + public struct ConverseStreamRequest: AWSEncodableShape { + /// Additional inference parameters that the model supports, beyond the base set of inference parameters that ConverseStream supports in the inferenceConfig field. + public let additionalModelRequestFields: String? + /// Additional model parameters field paths to return in the response. ConverseStream returns the requested fields as a JSON Pointer object in the additionalModelResponseFields field. The following is example JSON for additionalModelResponseFieldPaths. [ "/stop_sequence" ] For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation. ConverseStream rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by ConverseStream. + public let additionalModelResponseFieldPaths: [String]? + /// Configuration information for a guardrail that you want to use in the request. + public let guardrailConfig: GuardrailStreamConfiguration? + /// Inference parameters to pass to the model. ConverseStream supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field. + public let inferenceConfig: InferenceConfiguration? + /// The messages that you want to send to the model. + public let messages: [Message] + /// The ID for the model. The modelId to provide depends on the type of model that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide. + public let modelId: String + /// A system prompt to send to the model. + public let system: [SystemContentBlock]? + /// Configuration information for the tools that the model can use when generating a response. This field is only supported by Anthropic Claude 3 models. + public let toolConfig: ToolConfiguration? + + public init(additionalModelRequestFields: String? = nil, additionalModelResponseFieldPaths: [String]? = nil, guardrailConfig: GuardrailStreamConfiguration? = nil, inferenceConfig: InferenceConfiguration? = nil, messages: [Message], modelId: String, system: [SystemContentBlock]? = nil, toolConfig: ToolConfiguration? = nil) { + self.additionalModelRequestFields = additionalModelRequestFields + self.additionalModelResponseFieldPaths = additionalModelResponseFieldPaths + self.guardrailConfig = guardrailConfig + self.inferenceConfig = inferenceConfig + self.messages = messages + self.modelId = modelId + self.system = system + self.toolConfig = toolConfig + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.additionalModelRequestFields, forKey: .additionalModelRequestFields) + try container.encodeIfPresent(self.additionalModelResponseFieldPaths, forKey: .additionalModelResponseFieldPaths) + try container.encodeIfPresent(self.guardrailConfig, forKey: .guardrailConfig) + try container.encodeIfPresent(self.inferenceConfig, forKey: .inferenceConfig) + try container.encode(self.messages, forKey: .messages) + request.encodePath(self.modelId, key: "modelId") + try container.encodeIfPresent(self.system, forKey: .system) + try container.encodeIfPresent(self.toolConfig, forKey: .toolConfig) + } + + public func validate(name: String) throws { + try self.validate(self.additionalModelResponseFieldPaths, name: "additionalModelResponseFieldPaths", parent: name, max: 10) + try self.guardrailConfig?.validate(name: "\(name).guardrailConfig") + try self.inferenceConfig?.validate(name: "\(name).inferenceConfig") + try self.messages.forEach { + try $0.validate(name: "\(name).messages[]") + } + try self.validate(self.modelId, name: "modelId", parent: name, max: 2048) + try self.validate(self.modelId, name: "modelId", parent: name, min: 1) + try self.validate(self.modelId, name: "modelId", parent: name, pattern: "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$") + try self.system?.forEach { + try $0.validate(name: "\(name).system[]") + } + try self.toolConfig?.validate(name: "\(name).toolConfig") + } + + private enum CodingKeys: String, CodingKey { + case additionalModelRequestFields = "additionalModelRequestFields" + case additionalModelResponseFieldPaths = "additionalModelResponseFieldPaths" + case guardrailConfig = "guardrailConfig" + case inferenceConfig = "inferenceConfig" + case messages = "messages" + case system = "system" + case toolConfig = "toolConfig" + } + } + + public struct ConverseStreamResponse: AWSDecodableShape { + public static let _options: AWSShapeOptions = [.rawPayload] + /// The output stream that the model generated. + public let stream: AWSEventStream + + public init(stream: AWSEventStream) { + self.stream = stream + } + + public init(from decoder: Decoder) throws { + let container = try decoder.singleValueContainer() + self.stream = try container.decode(AWSEventStream.self) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ConverseStreamTrace: AWSDecodableShape { + /// The guardrail trace object. + public let guardrail: GuardrailTraceAssessment? + + public init(guardrail: GuardrailTraceAssessment? = nil) { + self.guardrail = guardrail + } + + private enum CodingKeys: String, CodingKey { + case guardrail = "guardrail" + } + } + + public struct ConverseTrace: AWSDecodableShape { + /// The guardrail trace object. + public let guardrail: GuardrailTraceAssessment? + + public init(guardrail: GuardrailTraceAssessment? = nil) { + self.guardrail = guardrail + } + + private enum CodingKeys: String, CodingKey { + case guardrail = "guardrail" + } + } + + public struct DocumentBlock: AWSEncodableShape & AWSDecodableShape { + /// The format of a document, or its extension. + public let format: DocumentFormat + /// A name for the document. + public let name: String + /// Contains the content of the document. + public let source: DocumentSource + + public init(format: DocumentFormat, name: String, source: DocumentSource) { + self.format = format + self.name = name + self.source = source + } + + private enum CodingKeys: String, CodingKey { + case format = "format" + case name = "name" + case source = "source" + } + } + + public struct GuardrailAssessment: AWSDecodableShape { + /// The content policy. + public let contentPolicy: GuardrailContentPolicyAssessment? + /// The sensitive information policy. + public let sensitiveInformationPolicy: GuardrailSensitiveInformationPolicyAssessment? + /// The topic policy. + public let topicPolicy: GuardrailTopicPolicyAssessment? + /// The word policy. + public let wordPolicy: GuardrailWordPolicyAssessment? + + public init(contentPolicy: GuardrailContentPolicyAssessment? = nil, sensitiveInformationPolicy: GuardrailSensitiveInformationPolicyAssessment? = nil, topicPolicy: GuardrailTopicPolicyAssessment? = nil, wordPolicy: GuardrailWordPolicyAssessment? = nil) { + self.contentPolicy = contentPolicy + self.sensitiveInformationPolicy = sensitiveInformationPolicy + self.topicPolicy = topicPolicy + self.wordPolicy = wordPolicy + } + + private enum CodingKeys: String, CodingKey { + case contentPolicy = "contentPolicy" + case sensitiveInformationPolicy = "sensitiveInformationPolicy" + case topicPolicy = "topicPolicy" + case wordPolicy = "wordPolicy" + } + } + + public struct GuardrailConfiguration: AWSEncodableShape { + /// The identifier for the guardrail. + public let guardrailIdentifier: String + /// The version of the guardrail. + public let guardrailVersion: String + /// The trace behavior for the guardrail. + public let trace: GuardrailTrace? + + public init(guardrailIdentifier: String, guardrailVersion: String, trace: GuardrailTrace? = nil) { + self.guardrailIdentifier = guardrailIdentifier + self.guardrailVersion = guardrailVersion + self.trace = trace + } + + public func validate(name: String) throws { + try self.validate(self.guardrailIdentifier, name: "guardrailIdentifier", parent: name, max: 2048) + try self.validate(self.guardrailIdentifier, name: "guardrailIdentifier", parent: name, pattern: "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$") + try self.validate(self.guardrailVersion, name: "guardrailVersion", parent: name, pattern: "^(([1-9][0-9]{0,7})|(DRAFT))$") + } + + private enum CodingKeys: String, CodingKey { + case guardrailIdentifier = "guardrailIdentifier" + case guardrailVersion = "guardrailVersion" + case trace = "trace" + } + } + + public struct GuardrailContentFilter: AWSDecodableShape { + /// The guardrail action. + public let action: GuardrailContentPolicyAction + /// The guardrail confidence. + public let confidence: GuardrailContentFilterConfidence + /// The guardrail type. + public let type: GuardrailContentFilterType + + public init(action: GuardrailContentPolicyAction, confidence: GuardrailContentFilterConfidence, type: GuardrailContentFilterType) { + self.action = action + self.confidence = confidence + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case confidence = "confidence" + case type = "type" + } + } + + public struct GuardrailContentPolicyAssessment: AWSDecodableShape { + /// The content policy filters. + public let filters: [GuardrailContentFilter] + + public init(filters: [GuardrailContentFilter]) { + self.filters = filters + } + + private enum CodingKeys: String, CodingKey { + case filters = "filters" + } + } + + public struct GuardrailConverseTextBlock: AWSEncodableShape & AWSDecodableShape { + /// The text that you want to guard. + public let text: String + + public init(text: String) { + self.text = text + } + + private enum CodingKeys: String, CodingKey { + case text = "text" + } + } + + public struct GuardrailCustomWord: AWSDecodableShape { + /// The action for the custom word. + public let action: GuardrailWordPolicyAction + /// The match for the custom word. + public let match: String + + public init(action: GuardrailWordPolicyAction, match: String) { + self.action = action + self.match = match + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case match = "match" + } + } + + public struct GuardrailManagedWord: AWSDecodableShape { + /// The action for the managed word. + public let action: GuardrailWordPolicyAction + /// The match for the managed word. + public let match: String + /// The type for the managed word. + public let type: GuardrailManagedWordType + + public init(action: GuardrailWordPolicyAction, match: String, type: GuardrailManagedWordType) { + self.action = action + self.match = match + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case match = "match" + case type = "type" + } + } + + public struct GuardrailPiiEntityFilter: AWSDecodableShape { + /// The PII entity filter action. + public let action: GuardrailSensitiveInformationPolicyAction + /// The PII entity filter match. + public let match: String + /// The PII entity filter type. + public let type: GuardrailPiiEntityType + + public init(action: GuardrailSensitiveInformationPolicyAction, match: String, type: GuardrailPiiEntityType) { + self.action = action + self.match = match + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case match = "match" + case type = "type" + } + } + + public struct GuardrailRegexFilter: AWSDecodableShape { + /// The region filter action. + public let action: GuardrailSensitiveInformationPolicyAction + /// The regesx filter match. + public let match: String? + /// The regex filter name. + public let name: String? + /// The regex query. + public let regex: String? + + public init(action: GuardrailSensitiveInformationPolicyAction, match: String? = nil, name: String? = nil, regex: String? = nil) { + self.action = action + self.match = match + self.name = name + self.regex = regex + } + + private enum CodingKeys: String, CodingKey { + case action = "action" + case match = "match" + case name = "name" + case regex = "regex" + } + } + + public struct GuardrailSensitiveInformationPolicyAssessment: AWSDecodableShape { + /// The PII entities in the assessment. + public let piiEntities: [GuardrailPiiEntityFilter] + /// The regex queries in the assessment. + public let regexes: [GuardrailRegexFilter] + + public init(piiEntities: [GuardrailPiiEntityFilter], regexes: [GuardrailRegexFilter]) { + self.piiEntities = piiEntities + self.regexes = regexes + } + + private enum CodingKeys: String, CodingKey { + case piiEntities = "piiEntities" + case regexes = "regexes" + } + } + + public struct GuardrailStreamConfiguration: AWSEncodableShape { + /// The identifier for the guardrail. + public let guardrailIdentifier: String + /// The version of the guardrail. + public let guardrailVersion: String + /// The processing mode. The processing mode. For more information, see Configure streaming response behavior in the Amazon Bedrock User Guide. + public let streamProcessingMode: GuardrailStreamProcessingMode? + /// The trace behavior for the guardrail. + public let trace: GuardrailTrace? + + public init(guardrailIdentifier: String, guardrailVersion: String, streamProcessingMode: GuardrailStreamProcessingMode? = nil, trace: GuardrailTrace? = nil) { + self.guardrailIdentifier = guardrailIdentifier + self.guardrailVersion = guardrailVersion + self.streamProcessingMode = streamProcessingMode + self.trace = trace + } + + public func validate(name: String) throws { + try self.validate(self.guardrailIdentifier, name: "guardrailIdentifier", parent: name, max: 2048) + try self.validate(self.guardrailIdentifier, name: "guardrailIdentifier", parent: name, pattern: "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$") + try self.validate(self.guardrailVersion, name: "guardrailVersion", parent: name, pattern: "^(([1-9][0-9]{0,7})|(DRAFT))$") + } + + private enum CodingKeys: String, CodingKey { + case guardrailIdentifier = "guardrailIdentifier" + case guardrailVersion = "guardrailVersion" + case streamProcessingMode = "streamProcessingMode" + case trace = "trace" + } + } + + public struct GuardrailTopic: AWSDecodableShape { + /// The action the guardrail should take when it intervenes on a topic. + public let action: GuardrailTopicPolicyAction + /// The name for the guardrail. + public let name: String + /// The type behavior that the guardrail should perform when the model detects the topic. + public let type: GuardrailTopicType - public init(from decoder: Decoder) throws { - let container = try decoder.container(keyedBy: CodingKeys.self) - guard container.allKeys.count == 1, let key = container.allKeys.first else { - let context = DecodingError.Context( - codingPath: container.codingPath, - debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" - ) - throw DecodingError.dataCorrupted(context) - } - switch key { - case .chunk: - let value = try container.decode(PayloadPart.self, forKey: .chunk) - self = .chunk(value) - case .internalServerException: - let value = try container.decode(InternalServerException.self, forKey: .internalServerException) - self = .internalServerException(value) - case .modelStreamErrorException: - let value = try container.decode(ModelStreamErrorException.self, forKey: .modelStreamErrorException) - self = .modelStreamErrorException(value) - case .modelTimeoutException: - let value = try container.decode(ModelTimeoutException.self, forKey: .modelTimeoutException) - self = .modelTimeoutException(value) - case .throttlingException: - let value = try container.decode(ThrottlingException.self, forKey: .throttlingException) - self = .throttlingException(value) - case .validationException: - let value = try container.decode(ValidationException.self, forKey: .validationException) - self = .validationException(value) - } + public init(action: GuardrailTopicPolicyAction, name: String, type: GuardrailTopicType) { + self.action = action + self.name = name + self.type = type } private enum CodingKeys: String, CodingKey { - case chunk = "chunk" - case internalServerException = "internalServerException" - case modelStreamErrorException = "modelStreamErrorException" - case modelTimeoutException = "modelTimeoutException" - case throttlingException = "throttlingException" - case validationException = "validationException" + case action = "action" + case name = "name" + case type = "type" } } - // MARK: Shapes + public struct GuardrailTopicPolicyAssessment: AWSDecodableShape { + /// The topics in the assessment. + public let topics: [GuardrailTopic] + + public init(topics: [GuardrailTopic]) { + self.topics = topics + } + + private enum CodingKeys: String, CodingKey { + case topics = "topics" + } + } + + public struct GuardrailTraceAssessment: AWSDecodableShape { + /// The input assessment. + public let inputAssessment: [String: GuardrailAssessment]? + /// The output from the model. + public let modelOutput: [String]? + /// the output assessments. + public let outputAssessments: [String: [GuardrailAssessment]]? + + public init(inputAssessment: [String: GuardrailAssessment]? = nil, modelOutput: [String]? = nil, outputAssessments: [String: [GuardrailAssessment]]? = nil) { + self.inputAssessment = inputAssessment + self.modelOutput = modelOutput + self.outputAssessments = outputAssessments + } + + private enum CodingKeys: String, CodingKey { + case inputAssessment = "inputAssessment" + case modelOutput = "modelOutput" + case outputAssessments = "outputAssessments" + } + } + + public struct GuardrailWordPolicyAssessment: AWSDecodableShape { + /// Custom words in the assessment. + public let customWords: [GuardrailCustomWord] + /// Managed word lists in the assessment. + public let managedWordLists: [GuardrailManagedWord] + + public init(customWords: [GuardrailCustomWord], managedWordLists: [GuardrailManagedWord]) { + self.customWords = customWords + self.managedWordLists = managedWordLists + } + + private enum CodingKeys: String, CodingKey { + case customWords = "customWords" + case managedWordLists = "managedWordLists" + } + } + + public struct ImageBlock: AWSEncodableShape & AWSDecodableShape { + /// The format of the image. + public let format: ImageFormat + /// The source for the image. + public let source: ImageSource + + public init(format: ImageFormat, source: ImageSource) { + self.format = format + self.source = source + } + + private enum CodingKeys: String, CodingKey { + case format = "format" + case source = "source" + } + } + + public struct InferenceConfiguration: AWSEncodableShape { + /// The maximum number of tokens to allow in the generated response. The default value is the maximum allowed value for the model that you are using. For more information, see Inference parameters for foundation models. + public let maxTokens: Int? + /// A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating the response. + public let stopSequences: [String]? + /// The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options. The default value is the default value for the model that you are using. For more information, see Inference parameters for foundation models. + public let temperature: Float? + /// The percentage of most-likely candidates that the model considers for the next token. For example, if you choose a value of 0.8 for topP, the model selects from the top 80% of the probability distribution of tokens that could be next in the sequence. The default value is the default value for the model that you are using. For more information, see Inference parameters for foundation models. + public let topP: Float? + + public init(maxTokens: Int? = nil, stopSequences: [String]? = nil, temperature: Float? = nil, topP: Float? = nil) { + self.maxTokens = maxTokens + self.stopSequences = stopSequences + self.temperature = temperature + self.topP = topP + } + + public func validate(name: String) throws { + try self.stopSequences?.forEach { + try validate($0, name: "stopSequences[]", parent: name, min: 1) + } + } + + private enum CodingKeys: String, CodingKey { + case maxTokens = "maxTokens" + case stopSequences = "stopSequences" + case temperature = "temperature" + case topP = "topP" + } + } public struct InternalServerException: AWSDecodableShape { public let message: String? @@ -104,9 +1250,9 @@ extension BedrockRuntime { public struct InvokeModelRequest: AWSEncodableShape { /// The desired MIME type of the inference body in the response. The default value is application/json. public let accept: String? - /// The prompt and inference parameters in the format specified in the contentType in the header. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide. + /// The prompt and inference parameters in the format specified in the contentType in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide. public let body: AWSHTTPBody - /// The MIME type of the input data in the request. The default value is application/json. + /// The MIME type of the input data in the request. You must specify application/json. public let contentType: String? /// The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation. An error will be thrown in the following situations. You don't provide a guardrail identifier but you specify the amazon-bedrock-guardrailConfig field in the request body. You enable the guardrail but the contentType isn't application/json. You provide a guardrail identifier, but guardrailVersion isn't specified. public let guardrailIdentifier: String? @@ -177,9 +1323,9 @@ extension BedrockRuntime { public struct InvokeModelWithResponseStreamRequest: AWSEncodableShape { /// The desired MIME type of the inference body in the response. The default value is application/json. public let accept: String? - /// The prompt and inference parameters in the format specified in the contentType in the header. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide. + /// The prompt and inference parameters in the format specified in the contentType in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide. public let body: AWSHTTPBody - /// The MIME type of the input data in the request. The default value is application/json. + /// The MIME type of the input data in the request. You must specify application/json. public let contentType: String? /// The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation. An error is thrown in the following situations. You don't provide a guardrail identifier but you specify the amazon-bedrock-guardrailConfig field in the request body. You enable the guardrail but the contentType isn't application/json. You provide a guardrail identifier, but guardrailVersion isn't specified. public let guardrailIdentifier: String? @@ -247,6 +1393,59 @@ extension BedrockRuntime { private enum CodingKeys: CodingKey {} } + public struct Message: AWSEncodableShape & AWSDecodableShape { + /// The message content. + public let content: [ContentBlock] + /// The role that the message plays in the message. + public let role: ConversationRole + + public init(content: [ContentBlock], role: ConversationRole) { + self.content = content + self.role = role + } + + public func validate(name: String) throws { + try self.content.forEach { + try $0.validate(name: "\(name).content[]") + } + } + + private enum CodingKeys: String, CodingKey { + case content = "content" + case role = "role" + } + } + + public struct MessageStartEvent: AWSDecodableShape { + /// The role for the message. + public let role: ConversationRole + + public init(role: ConversationRole) { + self.role = role + } + + private enum CodingKeys: String, CodingKey { + case role = "role" + } + } + + public struct MessageStopEvent: AWSDecodableShape { + /// The additional model response fields. + public let additionalModelResponseFields: String? + /// The reason why the model stopped generating output. + public let stopReason: StopReason + + public init(additionalModelResponseFields: String? = nil, stopReason: StopReason) { + self.additionalModelResponseFields = additionalModelResponseFields + self.stopReason = stopReason + } + + private enum CodingKeys: String, CodingKey { + case additionalModelResponseFields = "additionalModelResponseFields" + case stopReason = "stopReason" + } + } + public struct ModelStreamErrorException: AWSDecodableShape { public let message: String? /// The original message. @@ -292,6 +1491,25 @@ extension BedrockRuntime { } } + public struct SpecificToolChoice: AWSEncodableShape { + /// The name of the tool that the model must request. + public let name: String + + public init(name: String) { + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z][a-zA-Z0-9_]*$") + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + } + } + public struct ThrottlingException: AWSDecodableShape { public let message: String? @@ -304,6 +1522,166 @@ extension BedrockRuntime { } } + public struct TokenUsage: AWSDecodableShape { + /// The number of tokens sent in the request to the model. + public let inputTokens: Int + /// The number of tokens that the model generated for the request. + public let outputTokens: Int + /// The total of input tokens and tokens generated by the model. + public let totalTokens: Int + + public init(inputTokens: Int, outputTokens: Int, totalTokens: Int) { + self.inputTokens = inputTokens + self.outputTokens = outputTokens + self.totalTokens = totalTokens + } + + private enum CodingKeys: String, CodingKey { + case inputTokens = "inputTokens" + case outputTokens = "outputTokens" + case totalTokens = "totalTokens" + } + } + + public struct ToolConfiguration: AWSEncodableShape { + /// If supported by model, forces the model to request a tool. + public let toolChoice: ToolChoice? + /// An array of tools that you want to pass to a model. + public let tools: [Tool] + + public init(toolChoice: ToolChoice? = nil, tools: [Tool]) { + self.toolChoice = toolChoice + self.tools = tools + } + + public func validate(name: String) throws { + try self.toolChoice?.validate(name: "\(name).toolChoice") + try self.tools.forEach { + try $0.validate(name: "\(name).tools[]") + } + } + + private enum CodingKeys: String, CodingKey { + case toolChoice = "toolChoice" + case tools = "tools" + } + } + + public struct ToolResultBlock: AWSEncodableShape & AWSDecodableShape { + /// The content for tool result content block. + public let content: [ToolResultContentBlock] + /// The status for the tool result content block. This field is only supported Anthropic Claude 3 models. + public let status: ToolResultStatus? + /// The ID of the tool request that this is the result for. + public let toolUseId: String + + public init(content: [ToolResultContentBlock], status: ToolResultStatus? = nil, toolUseId: String) { + self.content = content + self.status = status + self.toolUseId = toolUseId + } + + public func validate(name: String) throws { + try self.validate(self.toolUseId, name: "toolUseId", parent: name, max: 64) + try self.validate(self.toolUseId, name: "toolUseId", parent: name, min: 1) + try self.validate(self.toolUseId, name: "toolUseId", parent: name, pattern: "^[a-zA-Z0-9_-]+$") + } + + private enum CodingKeys: String, CodingKey { + case content = "content" + case status = "status" + case toolUseId = "toolUseId" + } + } + + public struct ToolSpecification: AWSEncodableShape { + /// The description for the tool. + public let description: String? + /// The input schema for the tool in JSON format. + public let inputSchema: ToolInputSchema + /// The name for the tool. + public let name: String + + public init(description: String? = nil, inputSchema: ToolInputSchema, name: String) { + self.description = description + self.inputSchema = inputSchema + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z][a-zA-Z0-9_]*$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case inputSchema = "inputSchema" + case name = "name" + } + } + + public struct ToolUseBlock: AWSEncodableShape & AWSDecodableShape { + /// The input to pass to the tool. + public let input: String + /// The name of the tool that the model wants to use. + public let name: String + /// The ID for the tool request. + public let toolUseId: String + + public init(input: String, name: String, toolUseId: String) { + self.input = input + self.name = name + self.toolUseId = toolUseId + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 64) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z][a-zA-Z0-9_]*$") + try self.validate(self.toolUseId, name: "toolUseId", parent: name, max: 64) + try self.validate(self.toolUseId, name: "toolUseId", parent: name, min: 1) + try self.validate(self.toolUseId, name: "toolUseId", parent: name, pattern: "^[a-zA-Z0-9_-]+$") + } + + private enum CodingKeys: String, CodingKey { + case input = "input" + case name = "name" + case toolUseId = "toolUseId" + } + } + + public struct ToolUseBlockDelta: AWSDecodableShape { + /// The input for a requested tool. + public let input: String + + public init(input: String) { + self.input = input + } + + private enum CodingKeys: String, CodingKey { + case input = "input" + } + } + + public struct ToolUseBlockStart: AWSDecodableShape { + /// The name of the tool that the model is requesting to use. + public let name: String + /// The ID for the tool request. + public let toolUseId: String + + public init(name: String, toolUseId: String) { + self.name = name + self.toolUseId = toolUseId + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case toolUseId = "toolUseId" + } + } + public struct ValidationException: AWSDecodableShape { public let message: String? @@ -315,6 +1693,101 @@ extension BedrockRuntime { case message = "message" } } + + public struct ContentBlockStart: AWSDecodableShape { + /// Information about a tool that the model is requesting to use. + public let toolUse: ToolUseBlockStart? + + public init(toolUse: ToolUseBlockStart? = nil) { + self.toolUse = toolUse + } + + private enum CodingKeys: String, CodingKey { + case toolUse = "toolUse" + } + } + + public struct ConverseOutput: AWSDecodableShape { + /// The message that the model generates. + public let message: Message? + + public init(message: Message? = nil) { + self.message = message + } + + private enum CodingKeys: String, CodingKey { + case message = "message" + } + } + + public struct DocumentSource: AWSEncodableShape & AWSDecodableShape { + /// A base64-encoded string of a UTF-8 encoded file, that is the document to include in the message. + public let bytes: AWSBase64Data? + + public init(bytes: AWSBase64Data? = nil) { + self.bytes = bytes + } + + private enum CodingKeys: String, CodingKey { + case bytes = "bytes" + } + } + + public struct GuardrailConverseContentBlock: AWSEncodableShape & AWSDecodableShape { + /// The text to guard. + public let text: GuardrailConverseTextBlock? + + public init(text: GuardrailConverseTextBlock? = nil) { + self.text = text + } + + private enum CodingKeys: String, CodingKey { + case text = "text" + } + } + + public struct ImageSource: AWSEncodableShape & AWSDecodableShape { + /// The raw image bytes for the image. If you use an AWS SDK, you don't need to base64 encode the image bytes. + public let bytes: AWSBase64Data? + + public init(bytes: AWSBase64Data? = nil) { + self.bytes = bytes + } + + private enum CodingKeys: String, CodingKey { + case bytes = "bytes" + } + } + + public struct Tool: AWSEncodableShape { + /// The specfication for the tool. + public let toolSpec: ToolSpecification? + + public init(toolSpec: ToolSpecification? = nil) { + self.toolSpec = toolSpec + } + + public func validate(name: String) throws { + try self.toolSpec?.validate(name: "\(name).toolSpec") + } + + private enum CodingKeys: String, CodingKey { + case toolSpec = "toolSpec" + } + } + + public struct ToolInputSchema: AWSEncodableShape { + /// The JSON schema for the tool. For more information, see JSON Schema Reference. + public let json: String? + + public init(json: String? = nil) { + self.json = json + } + + private enum CodingKeys: String, CodingKey { + case json = "json" + } + } } // MARK: - Errors diff --git a/Sources/Soto/Services/Budgets/Budgets_api.swift b/Sources/Soto/Services/Budgets/Budgets_api.swift index 125eae7665..033ee3186c 100644 --- a/Sources/Soto/Services/Budgets/Budgets_api.swift +++ b/Sources/Soto/Services/Budgets/Budgets_api.swift @@ -253,8 +253,7 @@ public struct Budgets: AWSService { ) } - /// Lists the budget names and notifications that are associated with an account. - /// + /// Lists the budget names and notifications that are associated with an account. @Sendable public func describeBudgetNotificationsForAccount(_ input: DescribeBudgetNotificationsForAccountRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeBudgetNotificationsForAccountResponse { return try await self.client.execute( @@ -332,6 +331,45 @@ public struct Budgets: AWSService { ) } + /// Lists tags associated with a budget or budget action resource. + @Sendable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + return try await self.client.execute( + operation: "ListTagsForResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates tags for a budget or budget action resource. + @Sendable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + return try await self.client.execute( + operation: "TagResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes tags associated with a budget or budget action resource. + @Sendable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + return try await self.client.execute( + operation: "UntagResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates a budget. You can change every part of a budget except for the budgetName and the calculatedSpend. When you modify a budget, the calculatedSpend drops to zero until Amazon Web Services has new usage data to use for forecasting. Only one of BudgetLimit or PlannedBudgetLimits can be present in the syntax at one time. Use the syntax that matches your case. The Request Syntax section shows the BudgetLimit syntax. For PlannedBudgetLimits, see the Examples section. @Sendable public func updateBudget(_ input: UpdateBudgetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateBudgetResponse { @@ -455,8 +493,7 @@ extension Budgets { ) } - /// Lists the budget names and notifications that are associated with an account. - /// + /// Lists the budget names and notifications that are associated with an account. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/Budgets/Budgets_shapes.swift b/Sources/Soto/Services/Budgets/Budgets_shapes.swift index c55c1910f9..f5d7cb6792 100644 --- a/Sources/Soto/Services/Budgets/Budgets_shapes.swift +++ b/Sources/Soto/Services/Budgets/Budgets_shapes.swift @@ -138,16 +138,16 @@ extension Budgets { public let actionId: String /// The trigger threshold of the action. public let actionThreshold: ActionThreshold - /// The type of action. This defines the type of tasks that can be carried out by this action. - /// This field also determines the format for definition. + /// The type of action. This defines the type of tasks that can be carried out by this + /// action. This field also determines the format for definition. public let actionType: ActionType /// This specifies if the action needs manual or automatic approval. public let approvalModel: ApprovalModel public let budgetName: String /// Where you specify all of the type-specific parameters. public let definition: Definition - /// The role passed for action execution and reversion. Roles and actions must be in the same - /// account. + /// The role passed for action execution and reversion. Roles and actions must be in the + /// same account. public let executionRoleArn: String public let notificationType: NotificationType /// The status of the action. @@ -184,8 +184,8 @@ extension Budgets { public struct ActionHistory: AWSDecodableShape { /// The description of the details for the event. public let actionHistoryDetails: ActionHistoryDetails - /// This distinguishes between whether the events are triggered by the user or are generated by - /// the system. + /// This distinguishes between whether the events are triggered by the user or are + /// generated by the system. public let eventType: EventType /// The status of action at the time of the event. public let status: ActionStatus @@ -243,9 +243,11 @@ extension Budgets { } public struct AutoAdjustData: AWSEncodableShape & AWSDecodableShape { - /// The string that defines whether your budget auto-adjusts based on historical or forecasted data. + /// The string that defines whether your budget auto-adjusts based on historical or + /// forecasted data. public let autoAdjustType: AutoAdjustType - /// The parameters that define or describe the historical data that your auto-adjusting budget is based on. + /// The parameters that define or describe the historical data that your auto-adjusting + /// budget is based on. public let historicalOptions: HistoricalOptions? /// The last time that your budget was auto-adjusted. public let lastAutoAdjustTime: Date? @@ -270,36 +272,46 @@ extension Budgets { public struct Budget: AWSEncodableShape & AWSDecodableShape { /// The parameters that determine the budget amount for an auto-adjusting budget. public let autoAdjustData: AutoAdjustData? - /// The total amount of cost, usage, RI utilization, RI coverage, Savings Plans utilization, or - /// Savings Plans coverage that you want to track with your budget. BudgetLimit is required for cost or usage budgets, but optional for RI or + /// The total amount of cost, usage, RI utilization, RI coverage, Savings Plans + /// utilization, or Savings Plans coverage that you want to track with your budget. BudgetLimit is required for cost or usage budgets, but optional for RI or /// Savings Plans utilization or coverage budgets. RI and Savings Plans utilization or /// coverage budgets default to 100. This is the only valid value for RI or /// Savings Plans utilization or coverage budgets. You can't use BudgetLimit /// with PlannedBudgetLimits for CreateBudget and /// UpdateBudget actions. public let budgetLimit: Spend? - /// The name of a budget. The name must be unique within an account. The : and - /// \ characters, and the "/action/" substring, aren't allowed in + /// The name of a budget. The name must be unique within an account. The : + /// and \ characters, and the "/action/" substring, aren't allowed in /// BudgetName. public let budgetName: String - /// Specifies whether this budget tracks costs, usage, RI utilization, RI coverage, Savings - /// Plans utilization, or Savings Plans coverage. + /// Specifies whether this budget tracks costs, usage, RI utilization, RI coverage, + /// Savings Plans utilization, or Savings Plans coverage. public let budgetType: BudgetType /// The actual and forecasted cost or usage that the budget tracks. public let calculatedSpend: CalculatedSpend? - /// The cost filters, such as Region, Service, member account, Tag, or Cost Category, that are applied to a budget. Amazon Web Services Budgets supports the following services as a Service filter for RI budgets: Amazon EC2 Amazon Redshift Amazon Relational Database Service Amazon ElastiCache Amazon OpenSearch Service + /// The cost filters, such as Region, Service, + /// LinkedAccount, Tag, or CostCategory, that are + /// applied to a budget. Amazon Web Services Budgets supports the following services as a Service filter for RI budgets: Amazon EC2 Amazon Redshift Amazon Relational Database Service Amazon ElastiCache Amazon OpenSearch Service public let costFilters: [String: [String]]? - /// The types of costs that are included in this COST budget. USAGE, RI_UTILIZATION, RI_COVERAGE, SAVINGS_PLANS_UTILIZATION, and SAVINGS_PLANS_COVERAGE budgets do not have CostTypes. + /// The types of costs that are included in this COST budget. USAGE, RI_UTILIZATION, RI_COVERAGE, + /// SAVINGS_PLANS_UTILIZATION, and SAVINGS_PLANS_COVERAGE + /// budgets do not have CostTypes. public let costTypes: CostTypes? /// The last time that you updated this budget. public let lastUpdatedTime: Date? - /// A map containing multiple BudgetLimit, including current or future limits. PlannedBudgetLimits is available for cost or usage budget and supports both - /// monthly and quarterly TimeUnit. For monthly budgets, provide 12 months of PlannedBudgetLimits values. This must start from the current month and include the next 11 months. The key is the start of the month, UTC in epoch seconds. For quarterly budgets, provide four quarters of PlannedBudgetLimits value + /// A map containing multiple BudgetLimit, including current or future + /// limits. PlannedBudgetLimits is available for cost or usage budget and supports + /// both monthly and quarterly TimeUnit. For monthly budgets, provide 12 months of PlannedBudgetLimits values. + /// This must start from the current month and include the next 11 months. The + /// key is the start of the month, UTC in epoch seconds. For quarterly budgets, provide four quarters of PlannedBudgetLimits value /// entries in standard calendar quarter increments. This must start from the current /// quarter and include the next three quarters. The key is the start of the - /// quarter, UTC in epoch seconds. If the planned budget expires before 12 months for monthly or four quarters for quarterly, - /// provide the PlannedBudgetLimits values only for the remaining - /// periods. If the budget begins at a date in the future, provide PlannedBudgetLimits values from the start date of the budget. After all of the BudgetLimit values in PlannedBudgetLimits are used, the budget continues to use the last limit as the BudgetLimit. At that point, the planned budget provides the same experience as a fixed budget. DescribeBudget and DescribeBudgets response along with + /// quarter, UTC in epoch seconds. If the planned budget expires before 12 months for monthly or four quarters for + /// quarterly, provide the PlannedBudgetLimits values only for the remaining + /// periods. If the budget begins at a date in the future, provide PlannedBudgetLimits + /// values from the start date of the budget. After all of the BudgetLimit values in PlannedBudgetLimits + /// are used, the budget continues to use the last limit as the BudgetLimit. At + /// that point, the planned budget provides the same experience as a fixed budget. DescribeBudget and DescribeBudgets response along with /// PlannedBudgetLimits also contain BudgetLimit representing /// the current month or quarter limit present in PlannedBudgetLimits. This /// only applies to budgets that are created with PlannedBudgetLimits. Budgets @@ -307,15 +319,15 @@ extension Budgets { /// BudgetLimit. They don't contain /// PlannedBudgetLimits. public let plannedBudgetLimits: [String: Spend]? - /// The period of time that's covered by a budget. You setthe start date and end date. The start - /// date must come before the end date. The end date must come before 06/15/87 00:00 - /// UTC. If you create your budget and don't specify a start date, Amazon Web Services defaults to the - /// start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For example, - /// if you created your budget on January 24, 2018, chose DAILY, and didn't set - /// a start date, Amazon Web Services set your start date to 01/24/18 00:00 UTC. - /// If you chose MONTHLY, Amazon Web Services set your start date to - /// 01/01/18 00:00 UTC. If you didn't specify an end date, Amazon Web Services set your end date to 06/15/87 00:00 UTC. The defaults are the same for - /// the Billing and Cost Management console and the API. You can change either date with the UpdateBudget operation. After the end date, Amazon Web Services deletes the budget and all the associated + /// The period of time that's covered by a budget. You setthe start date and end date. The + /// start date must come before the end date. The end date must come before 06/15/87 + /// 00:00 UTC. If you create your budget and don't specify a start date, Amazon Web Services defaults + /// to the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For + /// example, if you created your budget on January 24, 2018, chose DAILY, and + /// didn't set a start date, Amazon Web Services set your start date to 01/24/18 00:00 + /// UTC. If you chose MONTHLY, Amazon Web Services set your start + /// date to 01/01/18 00:00 UTC. If you didn't specify an end date, Amazon Web Services set your end date to 06/15/87 00:00 UTC. The defaults are + /// the same for the Billing and Cost Management console and the API. You can change either date with the UpdateBudget operation. After the end date, Amazon Web Services deletes the budget and all the associated /// notifications and subscribers. public let timePeriod: TimePeriod? /// The length of time until a budget resets the actual and forecasted spend. @@ -340,7 +352,7 @@ extension Budgets { try self.budgetLimit?.validate(name: "\(name).budgetLimit") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.calculatedSpend?.validate(name: "\(name).calculatedSpend") try self.costFilters?.forEach { try validate($0.key, name: "costFilters.key", parent: name, max: 2147483647) @@ -384,8 +396,8 @@ extension Budgets { } public struct BudgetPerformanceHistory: AWSDecodableShape { - /// A list of amounts of cost or usage that you created budgets for, which are compared to your - /// actual costs or usage. + /// A list of amounts of cost or usage that you created budgets for, which are compared to + /// your actual costs or usage. public let budgetedAndActualAmountsList: [BudgetedAndActualAmounts]? public let budgetName: String? public let budgetType: BudgetType? @@ -523,9 +535,11 @@ extension Budgets { /// The role passed for action execution and reversion. Roles and actions must be in the same account. public let executionRoleArn: String public let notificationType: NotificationType + /// An optional list of tags to associate with the specified budget action. Each tag consists of a key and a value, and each key must be unique for the resource. + public let resourceTags: [ResourceTag]? public let subscribers: [Subscriber] - public init(accountId: String, actionThreshold: ActionThreshold, actionType: ActionType, approvalModel: ApprovalModel, budgetName: String, definition: Definition, executionRoleArn: String, notificationType: NotificationType, subscribers: [Subscriber]) { + public init(accountId: String, actionThreshold: ActionThreshold, actionType: ActionType, approvalModel: ApprovalModel, budgetName: String, definition: Definition, executionRoleArn: String, notificationType: NotificationType, resourceTags: [ResourceTag]? = nil, subscribers: [Subscriber]) { self.accountId = accountId self.actionThreshold = actionThreshold self.actionType = actionType @@ -534,6 +548,7 @@ extension Budgets { self.definition = definition self.executionRoleArn = executionRoleArn self.notificationType = notificationType + self.resourceTags = resourceTags self.subscribers = subscribers } @@ -544,11 +559,15 @@ extension Budgets { try self.actionThreshold.validate(name: "\(name).actionThreshold") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.definition.validate(name: "\(name).definition") try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, max: 618) try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, min: 32) try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|us-iso-east-1|us-isob-east-1):iam::\\d{12}:role(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$") + try self.resourceTags?.forEach { + try $0.validate(name: "\(name).resourceTags[]") + } + try self.validate(self.resourceTags, name: "resourceTags", parent: name, max: 200) try self.subscribers.forEach { try $0.validate(name: "\(name).subscribers[]") } @@ -565,6 +584,7 @@ extension Budgets { case definition = "Definition" case executionRoleArn = "ExecutionRoleArn" case notificationType = "NotificationType" + case resourceTags = "ResourceTags" case subscribers = "Subscribers" } } @@ -595,11 +615,14 @@ extension Budgets { public let budget: Budget /// A notification that you want to associate with a budget. A budget can have up to five notifications, and each notification can have one SNS subscriber and up to 10 email subscribers. If you include notifications and subscribers in your CreateBudget call, Amazon Web Services creates the notifications and subscribers for you. public let notificationsWithSubscribers: [NotificationWithSubscribers]? + /// An optional list of tags to associate with the specified budget. Each tag consists of a key and a value, and each key must be unique for the resource. + public let resourceTags: [ResourceTag]? - public init(accountId: String, budget: Budget, notificationsWithSubscribers: [NotificationWithSubscribers]? = nil) { + public init(accountId: String, budget: Budget, notificationsWithSubscribers: [NotificationWithSubscribers]? = nil, resourceTags: [ResourceTag]? = nil) { self.accountId = accountId self.budget = budget self.notificationsWithSubscribers = notificationsWithSubscribers + self.resourceTags = resourceTags } public func validate(name: String) throws { @@ -611,12 +634,17 @@ extension Budgets { try $0.validate(name: "\(name).notificationsWithSubscribers[]") } try self.validate(self.notificationsWithSubscribers, name: "notificationsWithSubscribers", parent: name, max: 10) + try self.resourceTags?.forEach { + try $0.validate(name: "\(name).resourceTags[]") + } + try self.validate(self.resourceTags, name: "resourceTags", parent: name, max: 200) } private enum CodingKeys: String, CodingKey { case accountId = "AccountId" case budget = "Budget" case notificationsWithSubscribers = "NotificationsWithSubscribers" + case resourceTags = "ResourceTags" } } @@ -647,7 +675,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.notification.validate(name: "\(name).notification") try self.subscribers.forEach { try $0.validate(name: "\(name).subscribers[]") @@ -691,7 +719,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.notification.validate(name: "\(name).notification") try self.subscriber.validate(name: "\(name).subscriber") } @@ -756,7 +784,7 @@ extension Budgets { try self.validate(self.actionId, name: "actionId", parent: name, pattern: "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") } private enum CodingKeys: String, CodingKey { @@ -801,7 +829,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") } private enum CodingKeys: String, CodingKey { @@ -834,7 +862,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.notification.validate(name: "\(name).notification") } @@ -872,7 +900,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.notification.validate(name: "\(name).notification") try self.subscriber.validate(name: "\(name).subscriber") } @@ -916,7 +944,7 @@ extension Budgets { try self.validate(self.actionId, name: "actionId", parent: name, pattern: "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2147483647) @@ -970,7 +998,7 @@ extension Budgets { try self.validate(self.actionId, name: "actionId", parent: name, pattern: "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") } private enum CodingKeys: String, CodingKey { @@ -1062,7 +1090,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2147483647) @@ -1095,8 +1123,8 @@ extension Budgets { public struct DescribeBudgetNotificationsForAccountRequest: AWSEncodableShape { public let accountId: String - /// An integer that represents how many budgets a paginated response contains. The default is - /// 50. + /// An integer that represents how many budgets a paginated response contains. The + /// default is 50. public let maxResults: Int? public let nextToken: String? @@ -1124,8 +1152,7 @@ extension Budgets { } public struct DescribeBudgetNotificationsForAccountResponse: AWSDecodableShape { - /// A list of budget names and associated notifications for an account. - /// + /// A list of budget names and associated notifications for an account. public let budgetNotificationsForAccount: [BudgetNotificationsForAccount]? public let nextToken: String? @@ -1162,7 +1189,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2147483647) @@ -1211,7 +1238,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") } private enum CodingKeys: String, CodingKey { @@ -1304,7 +1331,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2147483647) @@ -1362,7 +1389,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2147483647) @@ -1420,7 +1447,7 @@ extension Budgets { try self.validate(self.actionId, name: "actionId", parent: name, pattern: "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") } private enum CodingKeys: String, CodingKey { @@ -1455,9 +1482,24 @@ extension Budgets { } public struct HistoricalOptions: AWSEncodableShape & AWSDecodableShape { - /// The number of budget periods included in the moving-average calculation that determines your auto-adjusted budget amount. The maximum value depends on the TimeUnit granularity of the budget: For the DAILY granularity, the maximum value is 60. For the MONTHLY granularity, the maximum value is 12. For the QUARTERLY granularity, the maximum value is 4. For the ANNUALLY granularity, the maximum value is 1. + /// The number of budget periods included in the moving-average calculation that + /// determines your auto-adjusted budget amount. The maximum value depends on the + /// TimeUnit granularity of the budget: For the DAILY granularity, the maximum value is + /// 60. For the MONTHLY granularity, the maximum value is + /// 12. For the QUARTERLY granularity, the maximum value is + /// 4. For the ANNUALLY granularity, the maximum value is + /// 1. public let budgetAdjustmentPeriod: Int - /// The integer that describes how many budget periods in your BudgetAdjustmentPeriod are included in the calculation of your current BudgetLimit. If the first budget period in your BudgetAdjustmentPeriod has no cost data, then that budget period isn’t included in the average that determines your budget limit. For example, if you set BudgetAdjustmentPeriod as 4 quarters, but your account had no cost data in the first quarter, then only the last three quarters are included in the calculation. In this scenario, LookBackAvailablePeriods returns 3. You can’t set your own LookBackAvailablePeriods. The value is automatically calculated from the BudgetAdjustmentPeriod and your historical cost data. + /// The integer that describes how many budget periods in your + /// BudgetAdjustmentPeriod are included in the calculation of your current + /// BudgetLimit. If the first budget period in your + /// BudgetAdjustmentPeriod has no cost data, then that budget period isn’t + /// included in the average that determines your budget limit. For example, if you set BudgetAdjustmentPeriod as 4 + /// quarters, but your account had no cost data in the first quarter, then only the last + /// three quarters are included in the calculation. In this scenario, + /// LookBackAvailablePeriods returns 3. You can’t set your own LookBackAvailablePeriods. The value is + /// automatically calculated from the BudgetAdjustmentPeriod and your + /// historical cost data. public let lookBackAvailablePeriods: Int? public init(budgetAdjustmentPeriod: Int, lookBackAvailablePeriods: Int? = nil) { @@ -1530,20 +1572,59 @@ extension Budgets { } } + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The unique identifier for the resource. + public let resourceARN: String + + public init(resourceARN: String) { + self.resourceARN = resourceARN + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case resourceARN = "ResourceARN" + } + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// The tags associated with the resource. + public let resourceTags: [ResourceTag]? + + public init(resourceTags: [ResourceTag]? = nil) { + self.resourceTags = resourceTags + } + + private enum CodingKeys: String, CodingKey { + case resourceTags = "ResourceTags" + } + } + public struct Notification: AWSEncodableShape & AWSDecodableShape { /// The comparison that's used for this notification. public let comparisonOperator: ComparisonOperator /// Specifies whether this notification is in alarm. If a budget notification is in the /// ALARM state, you passed the set threshold for the budget. public let notificationState: NotificationState? - /// Specifies whether the notification is for how much you have spent (ACTUAL) or - /// for how much that you're forecasted to spend (FORECASTED). + /// Specifies whether the notification is for how much you have spent + /// (ACTUAL) or for how much that you're forecasted to spend + /// (FORECASTED). public let notificationType: NotificationType - /// The threshold that's associated with a notification. Thresholds are always a percentage, and - /// many customers find value being alerted between 50% - 200% of the budgeted amount. The - /// maximum limit for your threshold is 1,000,000% above the budgeted amount. + /// The threshold that's associated with a notification. Thresholds are always a + /// percentage, and many customers find value being alerted between 50% - 200% of the + /// budgeted amount. The maximum limit for your threshold is 1,000,000% above the budgeted + /// amount. public let threshold: Double - /// The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, Amazon Web Services notifies you when you go over or are forecasted to go over your total cost threshold. For PERCENTAGE thresholds, Amazon Web Services notifies you when you go over or are forecasted to go over a certain percentage of your forecasted spend. For example, if you have a budget for 200 dollars and you have a PERCENTAGE threshold of 80%, Amazon Web Services notifies you when you go over 160 dollars. + /// The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, + /// Amazon Web Services notifies you when you go over or are forecasted to go over your + /// total cost threshold. For + /// PERCENTAGE thresholds, Amazon Web Services notifies you when you go over + /// or are forecasted to go over a certain percentage of your forecasted spend. For example, + /// if you have a budget for 200 dollars and you have a PERCENTAGE threshold of + /// 80%, Amazon Web Services notifies you when you go over 160 dollars. public let thresholdType: ThresholdType? public init(comparisonOperator: ComparisonOperator, notificationState: NotificationState? = nil, notificationType: NotificationType, threshold: Double, thresholdType: ThresholdType? = nil) { @@ -1594,6 +1675,29 @@ extension Budgets { } } + public struct ResourceTag: AWSEncodableShape & AWSDecodableShape { + /// The key that's associated with the tag. + public let key: String + /// The value that's associated with the tag. + public let value: String + + public init(key: String, value: String) { + self.key = key + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.key, name: "key", parent: name, max: 128) + try self.validate(self.key, name: "key", parent: name, min: 1) + try self.validate(self.value, name: "value", parent: name, max: 256) + } + + private enum CodingKeys: String, CodingKey { + case key = "Key" + case value = "Value" + } + } + public struct ScpActionDefinition: AWSEncodableShape & AWSDecodableShape { /// The policy ID attached. public let policyId: String @@ -1625,8 +1729,8 @@ extension Budgets { } public struct Spend: AWSEncodableShape & AWSDecodableShape { - /// The cost or usage amount that's associated with a budget forecast, actual spend, or budget - /// threshold. + /// The cost or usage amount that's associated with a budget forecast, actual spend, or + /// budget threshold. public let amount: String /// The unit of measurement that's used for the budget forecast, actual spend, or budget /// threshold. @@ -1687,7 +1791,9 @@ extension Budgets { } public struct Subscriber: AWSEncodableShape & AWSDecodableShape { - /// The address that Amazon Web Services sends budget notifications to, either an SNS topic or an email. When you create a subscriber, the value of Address can't contain line breaks. + /// The address that Amazon Web Services sends budget notifications to, either an SNS topic + /// or an email. When you create a subscriber, the value of Address can't contain line + /// breaks. public let address: String /// The type of notification that Amazon Web Services sends to a subscriber. public let subscriptionType: SubscriptionType @@ -1709,12 +1815,50 @@ extension Budgets { } } + public struct TagResourceRequest: AWSEncodableShape { + /// The unique identifier for the resource. + public let resourceARN: String + /// The tags associated with the resource. + public let resourceTags: [ResourceTag] + + public init(resourceARN: String, resourceTags: [ResourceTag]) { + self.resourceARN = resourceARN + self.resourceTags = resourceTags + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.resourceTags.forEach { + try $0.validate(name: "\(name).resourceTags[]") + } + try self.validate(self.resourceTags, name: "resourceTags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceARN = "ResourceARN" + case resourceTags = "ResourceTags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + public struct TimePeriod: AWSEncodableShape & AWSDecodableShape { - /// The end date for a budget. If you didn't specify an end date, Amazon Web Services set your end date to 06/15/87 00:00 UTC. The defaults are the same for the Billing and Cost Management console and the API. After the end date, Amazon Web Services deletes the budget and all the associated + /// The end date for a budget. If you didn't specify an end date, Amazon Web Services set + /// your end date to 06/15/87 00:00 UTC. The defaults are the same for the + /// Billing and Cost Management console and the API. After the end date, Amazon Web Services deletes the budget and all the associated /// notifications and subscribers. You can change your end date with the /// UpdateBudget operation. public let end: Date? - /// The start date for a budget. If you created your budget and didn't specify a start date, Amazon Web Services defaults to the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For example, if you created your budget on January 24, 2018, chose DAILY, and didn't set a start date, Amazon Web Services set your start date to 01/24/18 00:00 UTC. If you chose MONTHLY, Amazon Web Services set your start date to 01/01/18 00:00 UTC. The defaults are the same for the Billing and Cost Management console and the API. You can change your start date with the UpdateBudget operation. + /// The start date for a budget. If you created your budget and didn't specify a start + /// date, Amazon Web Services defaults to the start of your chosen time period (DAILY, + /// MONTHLY, QUARTERLY, or ANNUALLY). For example, if you created your budget on January 24, + /// 2018, chose DAILY, and didn't set a start date, Amazon Web Services set your + /// start date to 01/24/18 00:00 UTC. If you chose MONTHLY, + /// Amazon Web Services set your start date to 01/01/18 00:00 UTC. The + /// defaults are the same for the Billing and Cost Management console and the API. You can change your start date with the UpdateBudget operation. public let start: Date? public init(end: Date? = nil, start: Date? = nil) { @@ -1728,6 +1872,37 @@ extension Budgets { } } + public struct UntagResourceRequest: AWSEncodableShape { + /// The unique identifier for the resource. + public let resourceARN: String + /// The key that's associated with the tag. + public let resourceTagKeys: [String] + + public init(resourceARN: String, resourceTagKeys: [String]) { + self.resourceARN = resourceARN + self.resourceTagKeys = resourceTagKeys + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.resourceTagKeys.forEach { + try validate($0, name: "resourceTagKeys[]", parent: name, max: 128) + try validate($0, name: "resourceTagKeys[]", parent: name, min: 1) + } + try self.validate(self.resourceTagKeys, name: "resourceTagKeys", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceARN = "ResourceARN" + case resourceTagKeys = "ResourceTagKeys" + } + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + public struct UpdateBudgetActionRequest: AWSEncodableShape { public let accountId: String /// A system-generated universally unique identifier (UUID) for the action. @@ -1764,7 +1939,7 @@ extension Budgets { try self.actionThreshold?.validate(name: "\(name).actionThreshold") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.definition?.validate(name: "\(name).definition") try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, max: 618) try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, min: 32) @@ -1863,7 +2038,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.newNotification.validate(name: "\(name).newNotification") try self.oldNotification.validate(name: "\(name).oldNotification") } @@ -1906,7 +2081,7 @@ extension Budgets { try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.budgetName, name: "budgetName", parent: name, max: 100) try self.validate(self.budgetName, name: "budgetName", parent: name, min: 1) - try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/)[^:\\\\]+$") + try self.validate(self.budgetName, name: "budgetName", parent: name, pattern: "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$") try self.newSubscriber.validate(name: "\(name).newSubscriber") try self.notification.validate(name: "\(name).notification") try self.oldSubscriber.validate(name: "\(name).oldSubscriber") @@ -1940,6 +2115,7 @@ public struct BudgetsErrorType: AWSErrorType { case invalidParameterException = "InvalidParameterException" case notFoundException = "NotFoundException" case resourceLockedException = "ResourceLockedException" + case serviceQuotaExceededException = "ServiceQuotaExceededException" case throttlingException = "ThrottlingException" } @@ -1977,8 +2153,10 @@ public struct BudgetsErrorType: AWSErrorType { public static var invalidParameterException: Self { .init(.invalidParameterException) } /// We can’t locate the resource that you specified. public static var notFoundException: Self { .init(.notFoundException) } - /// The request was received and recognized by the server, but the server rejected that particular method for the requested resource. + /// The request was received and recognized by the server, but the server rejected that particular method for the requested resource. public static var resourceLockedException: Self { .init(.resourceLockedException) } + /// You've reached the limit on the number of tags you can associate with a resource. + public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } /// The number of API requests has exceeded the maximum allowed API request throttling limit for the account. public static var throttlingException: Self { .init(.throttlingException) } } diff --git a/Sources/Soto/Services/Chatbot/Chatbot_api.swift b/Sources/Soto/Services/Chatbot/Chatbot_api.swift index feb4903896..8f8f6181c8 100644 --- a/Sources/Soto/Services/Chatbot/Chatbot_api.swift +++ b/Sources/Soto/Services/Chatbot/Chatbot_api.swift @@ -321,6 +321,45 @@ public struct Chatbot: AWSService { ) } + /// Retrieves the list of tags applied to a configuration. + @Sendable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + return try await self.client.execute( + operation: "ListTagsForResource", + path: "/list-tags-for-resource", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Applies the supplied tags to a configuration. + @Sendable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + return try await self.client.execute( + operation: "TagResource", + path: "/tag-resource", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Removes the supplied tags from a configuration + @Sendable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + return try await self.client.execute( + operation: "UntagResource", + path: "/untag-resource", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Update Chatbot account level preferences @Sendable public func updateAccountPreferences(_ input: UpdateAccountPreferencesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateAccountPreferencesResult { diff --git a/Sources/Soto/Services/Chatbot/Chatbot_shapes.swift b/Sources/Soto/Services/Chatbot/Chatbot_shapes.swift index 944ed73d36..76edafadf1 100644 --- a/Sources/Soto/Services/Chatbot/Chatbot_shapes.swift +++ b/Sources/Soto/Services/Chatbot/Chatbot_shapes.swift @@ -56,15 +56,18 @@ extension Chatbot { public let loggingLevel: String? /// The ARNs of the SNS topics that deliver notifications to AWS Chatbot. public let snsTopicArns: [String] + /// A list of tags applied to the configuration. + public let tags: [Tag]? /// Description of the webhook. Recommend using the convention `RoomName/WebhookName`. See Chime setup tutorial for more details: https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html. public let webhookDescription: String - public init(chatConfigurationArn: String, configurationName: String? = nil, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String], webhookDescription: String) { + public init(chatConfigurationArn: String, configurationName: String? = nil, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String], tags: [Tag]? = nil, webhookDescription: String) { self.chatConfigurationArn = chatConfigurationArn self.configurationName = configurationName self.iamRoleArn = iamRoleArn self.loggingLevel = loggingLevel self.snsTopicArns = snsTopicArns + self.tags = tags self.webhookDescription = webhookDescription } @@ -74,6 +77,7 @@ extension Chatbot { case iamRoleArn = "IamRoleArn" case loggingLevel = "LoggingLevel" case snsTopicArns = "SnsTopicArns" + case tags = "Tags" case webhookDescription = "WebhookDescription" } } @@ -108,16 +112,19 @@ extension Chatbot { public let loggingLevel: String? /// The ARNs of the SNS topics that deliver notifications to AWS Chatbot. public let snsTopicArns: [String] + /// A list of tags to apply to the configuration. + public let tags: [Tag]? /// Description of the webhook. Recommend using the convention `RoomName/WebhookName`. See Chime setup tutorial for more details: https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html. public let webhookDescription: String /// URL for the Chime webhook. public let webhookUrl: String - public init(configurationName: String, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String], webhookDescription: String, webhookUrl: String) { + public init(configurationName: String, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String], tags: [Tag]? = nil, webhookDescription: String, webhookUrl: String) { self.configurationName = configurationName self.iamRoleArn = iamRoleArn self.loggingLevel = loggingLevel self.snsTopicArns = snsTopicArns + self.tags = tags self.webhookDescription = webhookDescription self.webhookUrl = webhookUrl } @@ -137,6 +144,9 @@ extension Chatbot { try validate($0, name: "snsTopicArns[]", parent: name, min: 12) try validate($0, name: "snsTopicArns[]", parent: name, pattern: "^arn:aws:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$") } + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } try self.validate(self.webhookDescription, name: "webhookDescription", parent: name, max: 255) try self.validate(self.webhookDescription, name: "webhookDescription", parent: name, min: 1) try self.validate(self.webhookUrl, name: "webhookUrl", parent: name, max: 255) @@ -149,6 +159,7 @@ extension Chatbot { case iamRoleArn = "IamRoleArn" case loggingLevel = "LoggingLevel" case snsTopicArns = "SnsTopicArns" + case tags = "Tags" case webhookDescription = "WebhookDescription" case webhookUrl = "WebhookUrl" } @@ -184,10 +195,12 @@ extension Chatbot { public let slackTeamId: String /// The ARNs of the SNS topics that deliver notifications to AWS Chatbot. public let snsTopicArns: [String]? + /// A list of tags to apply to the configuration. + public let tags: [Tag]? /// Enables use of a user role requirement in your chat configuration. public let userAuthorizationRequired: Bool? - public init(configurationName: String, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, slackChannelId: String, slackChannelName: String? = nil, slackTeamId: String, snsTopicArns: [String]? = nil, userAuthorizationRequired: Bool? = nil) { + public init(configurationName: String, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, slackChannelId: String, slackChannelName: String? = nil, slackTeamId: String, snsTopicArns: [String]? = nil, tags: [Tag]? = nil, userAuthorizationRequired: Bool? = nil) { self.configurationName = configurationName self.guardrailPolicyArns = guardrailPolicyArns self.iamRoleArn = iamRoleArn @@ -196,6 +209,7 @@ extension Chatbot { self.slackChannelName = slackChannelName self.slackTeamId = slackTeamId self.snsTopicArns = snsTopicArns + self.tags = tags self.userAuthorizationRequired = userAuthorizationRequired } @@ -227,6 +241,9 @@ extension Chatbot { try validate($0, name: "snsTopicArns[]", parent: name, min: 12) try validate($0, name: "snsTopicArns[]", parent: name, pattern: "^arn:aws:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$") } + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } } private enum CodingKeys: String, CodingKey { @@ -238,6 +255,7 @@ extension Chatbot { case slackChannelName = "SlackChannelName" case slackTeamId = "SlackTeamId" case snsTopicArns = "SnsTopicArns" + case tags = "Tags" case userAuthorizationRequired = "UserAuthorizationRequired" } } @@ -270,6 +288,8 @@ extension Chatbot { public let loggingLevel: String? /// The ARNs of the SNS topics that deliver notifications to AWS Chatbot. public let snsTopicArns: [String]? + /// A list of tags to apply to the configuration. + public let tags: [Tag]? /// The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide. public let teamId: String /// The name of the Microsoft Teams Team. @@ -279,7 +299,7 @@ extension Chatbot { /// Enables use of a user role requirement in your chat configuration. public let userAuthorizationRequired: Bool? - public init(channelId: String, channelName: String? = nil, configurationName: String, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String]? = nil, teamId: String, teamName: String? = nil, tenantId: String, userAuthorizationRequired: Bool? = nil) { + public init(channelId: String, channelName: String? = nil, configurationName: String, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String]? = nil, tags: [Tag]? = nil, teamId: String, teamName: String? = nil, tenantId: String, userAuthorizationRequired: Bool? = nil) { self.channelId = channelId self.channelName = channelName self.configurationName = configurationName @@ -287,6 +307,7 @@ extension Chatbot { self.iamRoleArn = iamRoleArn self.loggingLevel = loggingLevel self.snsTopicArns = snsTopicArns + self.tags = tags self.teamId = teamId self.teamName = teamName self.tenantId = tenantId @@ -319,6 +340,9 @@ extension Chatbot { try validate($0, name: "snsTopicArns[]", parent: name, min: 12) try validate($0, name: "snsTopicArns[]", parent: name, pattern: "^arn:aws:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$") } + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } try self.validate(self.teamId, name: "teamId", parent: name, max: 36) try self.validate(self.teamId, name: "teamId", parent: name, min: 36) try self.validate(self.teamId, name: "teamId", parent: name, pattern: "^[0-9A-Fa-f]{8}(?:-[0-9A-Fa-f]{4}){3}-[0-9A-Fa-f]{12}$") @@ -338,6 +362,7 @@ extension Chatbot { case iamRoleArn = "IamRoleArn" case loggingLevel = "LoggingLevel" case snsTopicArns = "SnsTopicArns" + case tags = "Tags" case teamId = "TeamId" case teamName = "TeamName" case tenantId = "TenantId" @@ -869,6 +894,38 @@ extension Chatbot { } } + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The ARN of the configuration. + public let resourceARN: String + + public init(resourceARN: String) { + self.resourceARN = resourceARN + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, pattern: "^arn:aws:(wheatley|chatbot):[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$") + } + + private enum CodingKeys: String, CodingKey { + case resourceARN = "ResourceARN" + } + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// A list of tags applied to the configuration. + public let tags: [Tag]? + + public init(tags: [Tag]? = nil) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "Tags" + } + } + public struct ListTeamsChannelConfigurationsRequest: AWSEncodableShape { /// The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. public let maxResults: Int? @@ -939,10 +996,12 @@ extension Chatbot { public let slackTeamName: String /// The ARNs of the SNS topics that deliver notifications to AWS Chatbot. public let snsTopicArns: [String] + /// A list of tags applied to the configuration. + public let tags: [Tag]? /// Enables use of a user role requirement in your chat configuration. public let userAuthorizationRequired: Bool? - public init(chatConfigurationArn: String, configurationName: String? = nil, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, slackChannelId: String, slackChannelName: String, slackTeamId: String, slackTeamName: String, snsTopicArns: [String], userAuthorizationRequired: Bool? = nil) { + public init(chatConfigurationArn: String, configurationName: String? = nil, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, slackChannelId: String, slackChannelName: String, slackTeamId: String, slackTeamName: String, snsTopicArns: [String], tags: [Tag]? = nil, userAuthorizationRequired: Bool? = nil) { self.chatConfigurationArn = chatConfigurationArn self.configurationName = configurationName self.guardrailPolicyArns = guardrailPolicyArns @@ -953,6 +1012,7 @@ extension Chatbot { self.slackTeamId = slackTeamId self.slackTeamName = slackTeamName self.snsTopicArns = snsTopicArns + self.tags = tags self.userAuthorizationRequired = userAuthorizationRequired } @@ -967,6 +1027,7 @@ extension Chatbot { case slackTeamId = "SlackTeamId" case slackTeamName = "SlackTeamName" case snsTopicArns = "SnsTopicArns" + case tags = "Tags" case userAuthorizationRequired = "UserAuthorizationRequired" } } @@ -1017,6 +1078,61 @@ extension Chatbot { } } + public struct Tag: AWSEncodableShape & AWSDecodableShape { + /// The tag key. + public let tagKey: String + /// The tag value. + public let tagValue: String + + public init(tagKey: String, tagValue: String) { + self.tagKey = tagKey + self.tagValue = tagValue + } + + public func validate(name: String) throws { + try self.validate(self.tagKey, name: "tagKey", parent: name, max: 128) + try self.validate(self.tagKey, name: "tagKey", parent: name, min: 1) + try self.validate(self.tagValue, name: "tagValue", parent: name, max: 256) + try self.validate(self.tagValue, name: "tagValue", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case tagKey = "TagKey" + case tagValue = "TagValue" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The ARN of the configuration. + public let resourceARN: String + /// A list of tags to apply to the configuration. + public let tags: [Tag] + + public init(resourceARN: String, tags: [Tag]) { + self.resourceARN = resourceARN + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, pattern: "^arn:aws:(wheatley|chatbot):[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$") + try self.tags.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceARN = "ResourceARN" + case tags = "Tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + public struct TeamsChannelConfiguration: AWSDecodableShape { /// The ID of the Microsoft Teams channel. public let channelId: String @@ -1034,6 +1150,8 @@ extension Chatbot { public let loggingLevel: String? /// The ARNs of the SNS topics that deliver notifications to AWS Chatbot. public let snsTopicArns: [String] + /// A list of tags applied to the configuration. + public let tags: [Tag]? /// The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide. public let teamId: String /// The name of the Microsoft Teams Team. @@ -1043,7 +1161,7 @@ extension Chatbot { /// Enables use of a user role requirement in your chat configuration. public let userAuthorizationRequired: Bool? - public init(channelId: String, channelName: String? = nil, chatConfigurationArn: String, configurationName: String? = nil, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String], teamId: String, teamName: String? = nil, tenantId: String, userAuthorizationRequired: Bool? = nil) { + public init(channelId: String, channelName: String? = nil, chatConfigurationArn: String, configurationName: String? = nil, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String], tags: [Tag]? = nil, teamId: String, teamName: String? = nil, tenantId: String, userAuthorizationRequired: Bool? = nil) { self.channelId = channelId self.channelName = channelName self.chatConfigurationArn = chatConfigurationArn @@ -1052,6 +1170,7 @@ extension Chatbot { self.iamRoleArn = iamRoleArn self.loggingLevel = loggingLevel self.snsTopicArns = snsTopicArns + self.tags = tags self.teamId = teamId self.teamName = teamName self.tenantId = tenantId @@ -1067,6 +1186,7 @@ extension Chatbot { case iamRoleArn = "IamRoleArn" case loggingLevel = "LoggingLevel" case snsTopicArns = "SnsTopicArns" + case tags = "Tags" case teamId = "TeamId" case teamName = "TeamName" case tenantId = "TenantId" @@ -1111,6 +1231,38 @@ extension Chatbot { } } + public struct UntagResourceRequest: AWSEncodableShape { + /// The ARN of the configuration. + public let resourceARN: String + /// A list of tag keys to remove from the configuration. + public let tagKeys: [String] + + public init(resourceARN: String, tagKeys: [String]) { + self.resourceARN = resourceARN + self.tagKeys = tagKeys + } + + public func validate(name: String) throws { + try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, pattern: "^arn:aws:(wheatley|chatbot):[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$") + try self.tagKeys.forEach { + try validate($0, name: "tagKeys[]", parent: name, max: 128) + try validate($0, name: "tagKeys[]", parent: name, min: 1) + } + try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceARN = "ResourceARN" + case tagKeys = "TagKeys" + } + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + public struct UpdateAccountPreferencesRequest: AWSEncodableShape { /// Turns on training data collection. This helps improve the AWS Chatbot experience by allowing AWS Chatbot to store and use your customer information, such as AWS Chatbot configurations, notifications, user inputs, AWS Chatbot generated responses, and interaction data. This data helps us to continuously improve and develop Artificial Intelligence (AI) technologies. Your data is not shared with any third parties and is protected using sophisticated controls to prevent unauthorized access and misuse. AWS Chatbot does not store or use interactions in chat channels with Amazon Q for training AWS Chatbot’s AI technologies. public let trainingDataCollectionEnabled: Bool? @@ -1395,6 +1547,7 @@ public struct ChatbotErrorType: AWSErrorType { case describeSlackWorkspacesException = "DescribeSlackWorkspacesException" case getAccountPreferencesException = "GetAccountPreferencesException" case getTeamsChannelConfigurationException = "GetTeamsChannelConfigurationException" + case internalServiceError = "InternalServiceError" case invalidParameterException = "InvalidParameterException" case invalidRequestException = "InvalidRequestException" case limitExceededException = "LimitExceededException" @@ -1402,6 +1555,8 @@ public struct ChatbotErrorType: AWSErrorType { case listMicrosoftTeamsUserIdentitiesException = "ListMicrosoftTeamsUserIdentitiesException" case listTeamsChannelConfigurationsException = "ListTeamsChannelConfigurationsException" case resourceNotFoundException = "ResourceNotFoundException" + case serviceUnavailableException = "ServiceUnavailableException" + case tooManyTagsException = "TooManyTagsException" case updateAccountPreferencesException = "UpdateAccountPreferencesException" case updateChimeWebhookConfigurationException = "UpdateChimeWebhookConfigurationException" case updateSlackChannelConfigurationException = "UpdateSlackChannelConfigurationException" @@ -1460,6 +1615,8 @@ public struct ChatbotErrorType: AWSErrorType { public static var getAccountPreferencesException: Self { .init(.getAccountPreferencesException) } /// We can’t process your request right now because of a server issue. Try again later. public static var getTeamsChannelConfigurationException: Self { .init(.getTeamsChannelConfigurationException) } + /// Customer/consumer-facing internal service exception. https://w.amazon.com/index.php/AWS/API_Standards/Exceptions#InternalServiceError + public static var internalServiceError: Self { .init(.internalServiceError) } /// Your request input doesn't meet the constraints that AWS Chatbot requires. public static var invalidParameterException: Self { .init(.invalidParameterException) } /// Your request input doesn't meet the constraints that AWS Chatbot requires. @@ -1475,6 +1632,10 @@ public struct ChatbotErrorType: AWSErrorType { /// We were not able to find the resource for your request. public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } /// We can’t process your request right now because of a server issue. Try again later. + public static var serviceUnavailableException: Self { .init(.serviceUnavailableException) } + /// The supplied list of tags contains too many tags. + public static var tooManyTagsException: Self { .init(.tooManyTagsException) } + /// We can’t process your request right now because of a server issue. Try again later. public static var updateAccountPreferencesException: Self { .init(.updateAccountPreferencesException) } /// We can’t process your request right now because of a server issue. Try again later. public static var updateChimeWebhookConfigurationException: Self { .init(.updateChimeWebhookConfigurationException) } diff --git a/Sources/Soto/Services/Cloud9/Cloud9_api.swift b/Sources/Soto/Services/Cloud9/Cloud9_api.swift index 3fe8c1c714..29e528e5b5 100644 --- a/Sources/Soto/Services/Cloud9/Cloud9_api.swift +++ b/Sources/Soto/Services/Cloud9/Cloud9_api.swift @@ -60,6 +60,7 @@ public struct Cloud9: AWSService { serviceProtocol: .json(version: "1.1"), apiVersion: "2017-09-23", endpoint: endpoint, + variantEndpoints: Self.variantEndpoints, errorType: Cloud9ErrorType.self, middleware: middleware, timeout: timeout, @@ -71,6 +72,33 @@ public struct Cloud9: AWSService { + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "af-south-1": "cloud9.af-south-1.api.aws", + "ap-east-1": "cloud9.ap-east-1.api.aws", + "ap-northeast-1": "cloud9.ap-northeast-1.api.aws", + "ap-northeast-2": "cloud9.ap-northeast-2.api.aws", + "ap-northeast-3": "cloud9.ap-northeast-3.api.aws", + "ap-south-1": "cloud9.ap-south-1.api.aws", + "ap-southeast-1": "cloud9.ap-southeast-1.api.aws", + "ap-southeast-2": "cloud9.ap-southeast-2.api.aws", + "ca-central-1": "cloud9.ca-central-1.api.aws", + "eu-central-1": "cloud9.eu-central-1.api.aws", + "eu-north-1": "cloud9.eu-north-1.api.aws", + "eu-south-1": "cloud9.eu-south-1.api.aws", + "eu-west-1": "cloud9.eu-west-1.api.aws", + "eu-west-2": "cloud9.eu-west-2.api.aws", + "eu-west-3": "cloud9.eu-west-3.api.aws", + "il-central-1": "cloud9.il-central-1.api.aws", + "me-south-1": "cloud9.me-south-1.api.aws", + "sa-east-1": "cloud9.sa-east-1.api.aws", + "us-east-1": "cloud9.us-east-1.api.aws", + "us-east-2": "cloud9.us-east-2.api.aws", + "us-west-1": "cloud9.us-west-1.api.aws", + "us-west-2": "cloud9.us-west-2.api.aws" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/CloudFormation/CloudFormation_shapes.swift b/Sources/Soto/Services/CloudFormation/CloudFormation_shapes.swift index c36ab8349d..8c282fb562 100644 --- a/Sources/Soto/Services/CloudFormation/CloudFormation_shapes.swift +++ b/Sources/Soto/Services/CloudFormation/CloudFormation_shapes.swift @@ -124,6 +124,12 @@ extension CloudFormation { public var description: String { return self.rawValue } } + public enum DeletionMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case forceDeleteStack = "FORCE_DELETE_STACK" + case standard = "STANDARD" + public var description: String { return self.rawValue } + } + public enum DeprecatedStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case deprecated = "DEPRECATED" case live = "LIVE" @@ -1572,6 +1578,8 @@ extension CloudFormation { public struct DeleteStackInput: AWSEncodableShape { /// A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry DeleteStack requests to ensure that CloudFormation successfully received them. All events initiated by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1. In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. public let clientRequestToken: String? + /// Specifies the deletion mode for the stack. Possible values are: STANDARD - Use the standard behavior. Specifying this value is the same as not specifying this parameter. FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED state due to resource deletion failure. + public let deletionMode: DeletionMode? /// For stacks in the DELETE_FAILED state, a list of resource logical IDs that are associated with the resources you want to retain. During deletion, CloudFormation deletes the stack but doesn't delete the retained resources. Retaining resources is useful when you can't delete a resource, such as a non-empty S3 bucket, but you want to delete the stack. @OptionalCustomCoding> public var retainResources: [String]? @@ -1580,8 +1588,9 @@ extension CloudFormation { /// The name or the unique stack ID that's associated with the stack. public let stackName: String? - public init(clientRequestToken: String? = nil, retainResources: [String]? = nil, roleARN: String? = nil, stackName: String? = nil) { + public init(clientRequestToken: String? = nil, deletionMode: DeletionMode? = nil, retainResources: [String]? = nil, roleARN: String? = nil, stackName: String? = nil) { self.clientRequestToken = clientRequestToken + self.deletionMode = deletionMode self.retainResources = retainResources self.roleARN = roleARN self.stackName = stackName @@ -1597,6 +1606,7 @@ extension CloudFormation { private enum CodingKeys: String, CodingKey { case clientRequestToken = "ClientRequestToken" + case deletionMode = "DeletionMode" case retainResources = "RetainResources" case roleARN = "RoleARN" case stackName = "StackName" @@ -3670,7 +3680,7 @@ extension CloudFormation { public struct ListStackInstanceResourceDriftsOutput: AWSDecodableShape { /// If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null. public let nextToken: String? - /// A list of StackInstanceResourceDriftSummary structures that contain information about the specified stack instances. + /// A list of StackInstanceResourceDriftsSummary structures that contain information about the specified stack instances. @OptionalCustomCoding> public var summaries: [StackInstanceResourceDriftsSummary]? @@ -5269,6 +5279,8 @@ extension CloudFormation { public let changeSetId: String? /// The time at which the stack was created. public let creationTime: Date? + /// Specifies the deletion mode for the stack. Possible values are: STANDARD - Use the standard behavior. Specifying this value is the same as not specifying this parameter. FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED state due to resource deletion failure. + public let deletionMode: DeletionMode? /// The time the stack was deleted. public let deletionTime: Date? /// A user-defined description associated with the stack. @@ -5316,10 +5328,11 @@ extension CloudFormation { /// The amount of time within which stack creation should complete. public let timeoutInMinutes: Int? - public init(capabilities: [Capability]? = nil, changeSetId: String? = nil, creationTime: Date? = nil, deletionTime: Date? = nil, description: String? = nil, detailedStatus: DetailedStatus? = nil, disableRollback: Bool? = nil, driftInformation: StackDriftInformation? = nil, enableTerminationProtection: Bool? = nil, lastUpdatedTime: Date? = nil, notificationARNs: [String]? = nil, outputs: [Output]? = nil, parameters: [Parameter]? = nil, parentId: String? = nil, retainExceptOnCreate: Bool? = nil, roleARN: String? = nil, rollbackConfiguration: RollbackConfiguration? = nil, rootId: String? = nil, stackId: String? = nil, stackName: String? = nil, stackStatus: StackStatus? = nil, stackStatusReason: String? = nil, tags: [Tag]? = nil, timeoutInMinutes: Int? = nil) { + public init(capabilities: [Capability]? = nil, changeSetId: String? = nil, creationTime: Date? = nil, deletionMode: DeletionMode? = nil, deletionTime: Date? = nil, description: String? = nil, detailedStatus: DetailedStatus? = nil, disableRollback: Bool? = nil, driftInformation: StackDriftInformation? = nil, enableTerminationProtection: Bool? = nil, lastUpdatedTime: Date? = nil, notificationARNs: [String]? = nil, outputs: [Output]? = nil, parameters: [Parameter]? = nil, parentId: String? = nil, retainExceptOnCreate: Bool? = nil, roleARN: String? = nil, rollbackConfiguration: RollbackConfiguration? = nil, rootId: String? = nil, stackId: String? = nil, stackName: String? = nil, stackStatus: StackStatus? = nil, stackStatusReason: String? = nil, tags: [Tag]? = nil, timeoutInMinutes: Int? = nil) { self.capabilities = capabilities self.changeSetId = changeSetId self.creationTime = creationTime + self.deletionMode = deletionMode self.deletionTime = deletionTime self.description = description self.detailedStatus = detailedStatus @@ -5347,6 +5360,7 @@ extension CloudFormation { case capabilities = "Capabilities" case changeSetId = "ChangeSetId" case creationTime = "CreationTime" + case deletionMode = "DeletionMode" case deletionTime = "DeletionTime" case description = "Description" case detailedStatus = "DetailedStatus" @@ -6119,7 +6133,7 @@ extension CloudFormation { } public struct StackSetOperationPreferences: AWSEncodableShape & AWSDecodableShape { - /// Specifies how the concurrency level behaves during the operation execution. STRICT_FAILURE_TOLERANCE: This option dynamically lowers the concurrency level to ensure the number of failed accounts never exceeds the value of FailureToleranceCount +1. The initial actual concurrency is set to the lower of either the value of the MaxConcurrentCount, or the value of MaxConcurrentCount +1. The actual concurrency is then reduced proportionally by the number of failures. This is the default behavior. If failure tolerance or Maximum concurrent accounts are set to percentages, the behavior is similar. SOFT_FAILURE_TOLERANCE: This option decouples FailureToleranceCount from the actual concurrency. This allows stack set operations to run at the concurrency level set by the MaxConcurrentCount value, or MaxConcurrentPercentage, regardless of the number of failures. + /// Specifies how the concurrency level behaves during the operation execution. STRICT_FAILURE_TOLERANCE: This option dynamically lowers the concurrency level to ensure the number of failed accounts never exceeds the value of FailureToleranceCount +1. The initial actual concurrency is set to the lower of either the value of the MaxConcurrentCount, or the value of FailureToleranceCount +1. The actual concurrency is then reduced proportionally by the number of failures. This is the default behavior. If failure tolerance or Maximum concurrent accounts are set to percentages, the behavior is similar. SOFT_FAILURE_TOLERANCE: This option decouples FailureToleranceCount from the actual concurrency. This allows stack set operations to run at the concurrency level set by the MaxConcurrentCount value, or MaxConcurrentPercentage, regardless of the number of failures. public let concurrencyMode: ConcurrencyMode? /// The number of accounts, per Region, for which this operation can fail before CloudFormation stops the operation in that Region. If the operation is stopped in a Region, CloudFormation doesn't attempt the operation in any subsequent Regions. Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage (but not both). By default, 0 is specified. public let failureToleranceCount: Int? diff --git a/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift b/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift index 878267753b..8e67e902ef 100644 --- a/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift +++ b/Sources/Soto/Services/CloudHSMV2/CloudHSMV2_shapes.swift @@ -44,6 +44,12 @@ extension CloudHSMV2 { public var description: String { return self.rawValue } } + public enum ClusterMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case fips = "FIPS" + case nonFips = "NON_FIPS" + public var description: String { return self.rawValue } + } + public enum ClusterState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "ACTIVE" case createInProgress = "CREATE_IN_PROGRESS" @@ -81,6 +87,10 @@ extension CloudHSMV2 { public let createTimestamp: Date? /// The date and time when the backup will be permanently deleted. public let deleteTimestamp: Date? + /// The HSM type of the cluster that was backed up. + public let hsmType: String? + /// The mode of the cluster that was backed up. + public let mode: ClusterMode? /// Specifies whether the service should exempt a backup from the retention policy for the cluster. True exempts a backup from the retention policy. False means the service applies the backup retention policy defined at the cluster. public let neverExpires: Bool? /// The identifier (ID) of the source backup from which the new backup was copied. @@ -92,13 +102,15 @@ extension CloudHSMV2 { /// The list of tags for the backup. public let tagList: [Tag]? - public init(backupId: String, backupState: BackupState? = nil, clusterId: String? = nil, copyTimestamp: Date? = nil, createTimestamp: Date? = nil, deleteTimestamp: Date? = nil, neverExpires: Bool? = nil, sourceBackup: String? = nil, sourceCluster: String? = nil, sourceRegion: String? = nil, tagList: [Tag]? = nil) { + public init(backupId: String, backupState: BackupState? = nil, clusterId: String? = nil, copyTimestamp: Date? = nil, createTimestamp: Date? = nil, deleteTimestamp: Date? = nil, hsmType: String? = nil, mode: ClusterMode? = nil, neverExpires: Bool? = nil, sourceBackup: String? = nil, sourceCluster: String? = nil, sourceRegion: String? = nil, tagList: [Tag]? = nil) { self.backupId = backupId self.backupState = backupState self.clusterId = clusterId self.copyTimestamp = copyTimestamp self.createTimestamp = createTimestamp self.deleteTimestamp = deleteTimestamp + self.hsmType = hsmType + self.mode = mode self.neverExpires = neverExpires self.sourceBackup = sourceBackup self.sourceCluster = sourceCluster @@ -113,6 +125,8 @@ extension CloudHSMV2 { case copyTimestamp = "CopyTimestamp" case createTimestamp = "CreateTimestamp" case deleteTimestamp = "DeleteTimestamp" + case hsmType = "HsmType" + case mode = "Mode" case neverExpires = "NeverExpires" case sourceBackup = "SourceBackup" case sourceCluster = "SourceCluster" @@ -188,6 +202,8 @@ extension CloudHSMV2 { public let hsms: [Hsm]? /// The type of HSM that the cluster contains. public let hsmType: String? + /// The mode of the cluster. + public let mode: ClusterMode? /// The default password for the cluster's Pre-Crypto Officer (PRECO) user. public let preCoPassword: String? /// The identifier (ID) of the cluster's security group. @@ -205,7 +221,7 @@ extension CloudHSMV2 { /// The identifier (ID) of the virtual private cloud (VPC) that contains the cluster. public let vpcId: String? - public init(backupPolicy: BackupPolicy? = nil, backupRetentionPolicy: BackupRetentionPolicy? = nil, certificates: Certificates? = nil, clusterId: String? = nil, createTimestamp: Date? = nil, hsms: [Hsm]? = nil, hsmType: String? = nil, preCoPassword: String? = nil, securityGroup: String? = nil, sourceBackupId: String? = nil, state: ClusterState? = nil, stateMessage: String? = nil, subnetMapping: [String: String]? = nil, tagList: [Tag]? = nil, vpcId: String? = nil) { + public init(backupPolicy: BackupPolicy? = nil, backupRetentionPolicy: BackupRetentionPolicy? = nil, certificates: Certificates? = nil, clusterId: String? = nil, createTimestamp: Date? = nil, hsms: [Hsm]? = nil, hsmType: String? = nil, mode: ClusterMode? = nil, preCoPassword: String? = nil, securityGroup: String? = nil, sourceBackupId: String? = nil, state: ClusterState? = nil, stateMessage: String? = nil, subnetMapping: [String: String]? = nil, tagList: [Tag]? = nil, vpcId: String? = nil) { self.backupPolicy = backupPolicy self.backupRetentionPolicy = backupRetentionPolicy self.certificates = certificates @@ -213,6 +229,7 @@ extension CloudHSMV2 { self.createTimestamp = createTimestamp self.hsms = hsms self.hsmType = hsmType + self.mode = mode self.preCoPassword = preCoPassword self.securityGroup = securityGroup self.sourceBackupId = sourceBackupId @@ -231,6 +248,7 @@ extension CloudHSMV2 { case createTimestamp = "CreateTimestamp" case hsms = "Hsms" case hsmType = "HsmType" + case mode = "Mode" case preCoPassword = "PreCoPassword" case securityGroup = "SecurityGroup" case sourceBackupId = "SourceBackupId" @@ -289,8 +307,10 @@ extension CloudHSMV2 { public struct CreateClusterRequest: AWSEncodableShape { /// A policy that defines how the service retains backups. public let backupRetentionPolicy: BackupRetentionPolicy? - /// The type of HSM to use in the cluster. Currently the only allowed value is hsm1.medium. + /// The type of HSM to use in the cluster. The allowed values are hsm1.medium and hsm2m.medium. public let hsmType: String + /// The mode to use in the cluster. The allowed values are FIPS and NON_FIPS. + public let mode: ClusterMode? /// The identifier (ID) of the cluster backup to restore. Use this value to restore the cluster from a backup instead of creating a new cluster. To find the backup ID, use DescribeBackups. public let sourceBackupId: String? /// The identifiers (IDs) of the subnets where you are creating the cluster. You must specify at least one subnet. If you specify multiple subnets, they must meet the following criteria: All subnets must be in the same virtual private cloud (VPC). You can specify only one subnet per Availability Zone. @@ -298,9 +318,10 @@ extension CloudHSMV2 { /// Tags to apply to the CloudHSM cluster during creation. public let tagList: [Tag]? - public init(backupRetentionPolicy: BackupRetentionPolicy? = nil, hsmType: String, sourceBackupId: String? = nil, subnetIds: [String], tagList: [Tag]? = nil) { + public init(backupRetentionPolicy: BackupRetentionPolicy? = nil, hsmType: String, mode: ClusterMode? = nil, sourceBackupId: String? = nil, subnetIds: [String], tagList: [Tag]? = nil) { self.backupRetentionPolicy = backupRetentionPolicy self.hsmType = hsmType + self.mode = mode self.sourceBackupId = sourceBackupId self.subnetIds = subnetIds self.tagList = tagList @@ -308,7 +329,8 @@ extension CloudHSMV2 { public func validate(name: String) throws { try self.backupRetentionPolicy?.validate(name: "\(name).backupRetentionPolicy") - try self.validate(self.hsmType, name: "hsmType", parent: name, pattern: "^(hsm1\\.medium)$") + try self.validate(self.hsmType, name: "hsmType", parent: name, max: 32) + try self.validate(self.hsmType, name: "hsmType", parent: name, pattern: "^((p|)hsm[0-9][a-z.]*\\.[a-zA-Z]+)$") try self.validate(self.sourceBackupId, name: "sourceBackupId", parent: name, pattern: "^backup-[2-7a-zA-Z]{11,16}$") try self.subnetIds.forEach { try validate($0, name: "subnetIds[]", parent: name, pattern: "^subnet-[0-9a-fA-F]{8,17}$") @@ -325,6 +347,7 @@ extension CloudHSMV2 { private enum CodingKeys: String, CodingKey { case backupRetentionPolicy = "BackupRetentionPolicy" case hsmType = "HsmType" + case mode = "Mode" case sourceBackupId = "SourceBackupId" case subnetIds = "SubnetIds" case tagList = "TagList" @@ -510,6 +533,7 @@ extension CloudHSMV2 { try self.filters?.forEach { try validate($0.key, name: "filters.key", parent: name, pattern: "^[a-zA-Z0-9_-]+$") } + try self.validate(self.filters, name: "filters", parent: name, max: 30) try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 256) @@ -559,6 +583,7 @@ extension CloudHSMV2 { try self.filters?.forEach { try validate($0.key, name: "filters.key", parent: name, pattern: "^[a-zA-Z0-9_-]+$") } + try self.validate(self.filters, name: "filters", parent: name, max: 30) try self.validate(self.maxResults, name: "maxResults", parent: name, max: 25) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 256) @@ -671,9 +696,9 @@ extension CloudHSMV2 { public func validate(name: String) throws { try self.validate(self.clusterId, name: "clusterId", parent: name, pattern: "^cluster-[2-7a-zA-Z]{11,16}$") - try self.validate(self.signedCert, name: "signedCert", parent: name, max: 5000) + try self.validate(self.signedCert, name: "signedCert", parent: name, max: 20000) try self.validate(self.signedCert, name: "signedCert", parent: name, pattern: "^[a-zA-Z0-9+-/=\\s]*$") - try self.validate(self.trustAnchor, name: "trustAnchor", parent: name, max: 5000) + try self.validate(self.trustAnchor, name: "trustAnchor", parent: name, max: 20000) try self.validate(self.trustAnchor, name: "trustAnchor", parent: name, pattern: "^[a-zA-Z0-9+-/=\\s]*$") } diff --git a/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift b/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift index 8cee416989..5e8d64ff9d 100644 --- a/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift +++ b/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift @@ -516,7 +516,7 @@ public struct CloudTrail: AWSService { ) } - /// Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events for trails in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events. When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example You create an event selector for a trail and specify that you want write-only events. The EC2 GetConsoleOutput and RunInstances API operations occur in your account. CloudTrail evaluates whether the events match your event selectors. The RunInstances is a write-only event and it matches your event selector. The trail logs the event. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event. The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown. You can configure up to five event selectors for each trail. For more information, see Logging management events, Logging data events, and Quotas in CloudTrail in the CloudTrail User Guide. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide. + /// Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events. When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example You create an event selector for a trail and specify that you want write-only events. The EC2 GetConsoleOutput and RunInstances API operations occur in your account. CloudTrail evaluates whether the events match your event selectors. The RunInstances is a write-only event and it matches your event selector. The trail logs the event. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event. The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown. You can configure up to five event selectors for each trail. For more information, see Logging management events, Logging data events, and Quotas in CloudTrail in the CloudTrail User Guide. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide. @Sendable public func putEventSelectors(_ input: PutEventSelectorsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutEventSelectorsResponse { return try await self.client.execute( @@ -607,7 +607,7 @@ public struct CloudTrail: AWSService { ) } - /// Starts an import of logged trail events from a source S3 bucket to a destination event data store. By default, CloudTrail only imports events contained in the S3 bucket's CloudTrail prefix and the prefixes inside the CloudTrail prefix, and does not check prefixes for other Amazon Web Services services. If you want to import CloudTrail events contained in another prefix, you must include the prefix in the S3LocationUri. For more considerations about importing trail events, see Considerations. When you start a new import, the Destinations and ImportSource parameters are required. Before starting a new import, disable any access control lists (ACLs) attached to the source S3 bucket. For more information about disabling ACLs, see Controlling ownership of objects and disabling ACLs for your bucket. When you retry an import, the ImportID parameter is required. If the destination event data store is for an organization, you must use the management account to import trail events. You cannot use the delegated administrator account for the organization. + /// Starts an import of logged trail events from a source S3 bucket to a destination event data store. By default, CloudTrail only imports events contained in the S3 bucket's CloudTrail prefix and the prefixes inside the CloudTrail prefix, and does not check prefixes for other Amazon Web Services services. If you want to import CloudTrail events contained in another prefix, you must include the prefix in the S3LocationUri. For more considerations about importing trail events, see Considerations for copying trail events in the CloudTrail User Guide. When you start a new import, the Destinations and ImportSource parameters are required. Before starting a new import, disable any access control lists (ACLs) attached to the source S3 bucket. For more information about disabling ACLs, see Controlling ownership of objects and disabling ACLs for your bucket. When you retry an import, the ImportID parameter is required. If the destination event data store is for an organization, you must use the management account to import trail events. You cannot use the delegated administrator account for the organization. @Sendable public func startImport(_ input: StartImportRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartImportResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift b/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift index 2a639087d5..58cca8cdb9 100644 --- a/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift +++ b/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift @@ -192,7 +192,7 @@ extension CloudTrail { public let endsWith: [String]? /// An operator that includes events that match the exact value of the event record field specified as the value of Field. This is the only valid operator that you can use with the readOnly, eventCategory, and resources.type fields. public let equals: [String]? - /// A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported. For CloudTrail management events, supported fields include readOnly, eventCategory, and eventSource. For CloudTrail data events, supported fields include readOnly, eventCategory, eventName, resources.type, and resources.ARN. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory. readOnly - Optional. Can be set to Equals a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events. eventSource - For filtering management events only. This can be set to NotEquals kms.amazonaws.com or NotEquals rdsdata.amazonaws.com. eventName - Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock. You can have multiple values for this field, separated by commas. eventCategory - This is required and must be set to Equals. For CloudTrail management events, the value must be Management. For CloudTrail data events, the value must be Data. The following are used only for event data stores: For CloudTrail Insights events, the value must be Insight. For Config configuration items, the value must be ConfigurationItem. For Audit Manager evidence, the value must be Evidence. For non-Amazon Web Services events, the value must be ActivityAuditLog. resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator, and the value can be one of the following: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object AWS::AppConfig::Configuration AWS::B2BI::Transformer AWS::Bedrock::AgentAlias AWS::Bedrock::KnowledgeBase AWS::Cassandra::Table AWS::CloudFront::KeyValueStore AWS::CloudTrail::Channel AWS::CodeWhisperer::Customization AWS::CodeWhisperer::Profile AWS::Cognito::IdentityPool AWS::DynamoDB::Stream AWS::EC2::Snapshot AWS::EMRWAL::Workspace AWS::FinSpace::Environment AWS::Glue::Table AWS::GreengrassV2::ComponentVersion AWS::GreengrassV2::Deployment AWS::GuardDuty::Detector AWS::IoT::Certificate AWS::IoT::Thing AWS::IoTSiteWise::Asset AWS::IoTSiteWise::TimeSeries AWS::IoTTwinMaker::Entity AWS::IoTTwinMaker::Workspace AWS::KendraRanking::ExecutionPlan AWS::KinesisVideo::Stream AWS::ManagedBlockchain::Network AWS::ManagedBlockchain::Node AWS::MedicalImaging::Datastore AWS::NeptuneGraph::Graph AWS::PCAConnectorAD::Connector AWS::QBusiness::Application AWS::QBusiness::DataSource AWS::QBusiness::Index AWS::QBusiness::WebExperience AWS::RDS::DBCluster AWS::S3::AccessPoint AWS::S3ObjectLambda::AccessPoint AWS::S3Outposts::Object AWS::SageMaker::Endpoint AWS::SageMaker::ExperimentTrialComponent AWS::SageMaker::FeatureGroup AWS::ServiceDiscovery::Namespace AWS::ServiceDiscovery::Service AWS::SCN::Instance AWS::SNS::PlatformEndpoint AWS::SNS::Topic AWS::SWF::Domain AWS::SQS::Queue AWS::SSMMessages::ControlChannel AWS::ThinClient::Device AWS::ThinClient::Environment AWS::Timestream::Database AWS::Timestream::Table AWS::VerifiedPermissions::PolicyStore You can have only one resources.type field per selector. To log data events on more than one resource type, add another selector. resources.ARN - You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. For example, if resources.type equals AWS::S3::Object, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value. The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information. arn::s3:::/ arn::s3:::// When resources.type equals AWS::DynamoDB::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::dynamodb:::table/ When resources.type equals AWS::Lambda::Function, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::lambda:::function: When resources.type equals AWS::AppConfig::Configuration, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::appconfig:::application//environment//configuration/ When resources.type equals AWS::B2BI::Transformer, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::b2bi:::transformer/ When resources.type equals AWS::Bedrock::AgentAlias, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::bedrock:::agent-alias// When resources.type equals AWS::Bedrock::KnowledgeBase, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::bedrock:::knowledge-base/ When resources.type equals AWS::Cassandra::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cassandra:::/keyspace//table/ When resources.type equals AWS::CloudFront::KeyValueStore, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cloudfront:::key-value-store/ When resources.type equals AWS::CloudTrail::Channel, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cloudtrail:::channel/ When resources.type equals AWS::CodeWhisperer::Customization, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::codewhisperer:::customization/ When resources.type equals AWS::CodeWhisperer::Profile, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::codewhisperer:::profile/ When resources.type equals AWS::Cognito::IdentityPool, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cognito-identity:::identitypool/ When resources.type equals AWS::DynamoDB::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::dynamodb:::table//stream/ When resources.type equals AWS::EC2::Snapshot, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::ec2:::snapshot/ When resources.type equals AWS::EMRWAL::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::emrwal:::workspace/ When resources.type equals AWS::FinSpace::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::finspace:::environment/ When resources.type equals AWS::Glue::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::glue:::table// When resources.type equals AWS::GreengrassV2::ComponentVersion, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::greengrass:::components/ When resources.type equals AWS::GreengrassV2::Deployment, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::greengrass:::deployments/ When resources.type equals AWS::GuardDuty::Detector, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::guardduty:::detector/ When resources.type equals AWS::IoT::Certificate, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iot:::cert/ When resources.type equals AWS::IoT::Thing, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iot:::thing/ When resources.type equals AWS::IoTSiteWise::Asset, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iotsitewise:::asset/ When resources.type equals AWS::IoTSiteWise::TimeSeries, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iotsitewise:::timeseries/ When resources.type equals AWS::IoTTwinMaker::Entity, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iottwinmaker:::workspace//entity/ When resources.type equals AWS::IoTTwinMaker::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iottwinmaker:::workspace/ When resources.type equals AWS::KendraRanking::ExecutionPlan, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::kendra-ranking:::rescore-execution-plan/ When resources.type equals AWS::KinesisVideo::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::kinesisvideo:::stream// When resources.type equals AWS::ManagedBlockchain::Network, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::managedblockchain:::networks/ When resources.type equals AWS::ManagedBlockchain::Node, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::managedblockchain:::nodes/ When resources.type equals AWS::MedicalImaging::Datastore, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::medical-imaging:::datastore/ When resources.type equals AWS::NeptuneGraph::Graph, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::neptune-graph:::graph/ When resources.type equals AWS::PCAConnectorAD::Connector, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::pca-connector-ad:::connector/ When resources.type equals AWS::QBusiness::Application, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application/ When resources.type equals AWS::QBusiness::DataSource, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application//index//data-source/ When resources.type equals AWS::QBusiness::Index, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application//index/ When resources.type equals AWS::QBusiness::WebExperience, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application//web-experience/ When resources.type equals AWS::RDS::DBCluster, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::rds:::cluster/ When resources.type equals AWS::S3::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith or NotStartsWith operators. arn::s3:::accesspoint/ arn::s3:::accesspoint//object/ When resources.type equals AWS::S3ObjectLambda::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::s3-object-lambda:::accesspoint/ When resources.type equals AWS::S3Outposts::Object, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::s3-outposts::: When resources.type equals AWS::SageMaker::Endpoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sagemaker:::endpoint/ When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sagemaker:::experiment-trial-component/ When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sagemaker:::feature-group/ When resources.type equals AWS::SCN::Instance, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::scn:::instance/ When resources.type equals AWS::ServiceDiscovery::Namespace, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::servicediscovery:::namespace/ When resources.type equals AWS::ServiceDiscovery::Service, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::servicediscovery:::service/ When resources.type equals AWS::SNS::PlatformEndpoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sns:::endpoint/// When resources.type equals AWS::SNS::Topic, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sns::: When resources.type equals AWS::SWF::Domain, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::swf:::domain/ When resources.type equals AWS::SQS::Queue, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sqs::: When resources.type equals AWS::SSMMessages::ControlChannel, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::ssmmessages:::control-channel/ When resources.type equals AWS::ThinClient::Device, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::thinclient:::device/ When resources.type equals AWS::ThinClient::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::thinclient:::environment/ When resources.type equals AWS::Timestream::Database, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::timestream:::database/ When resources.type equals AWS::Timestream::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::timestream:::database//table/ When resources.type equals AWS::VerifiedPermissions::PolicyStore, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::verifiedpermissions:::policy-store/ + /// A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported. For CloudTrail management events, supported fields include readOnly, eventCategory, and eventSource. For CloudTrail data events, supported fields include readOnly, eventCategory, eventName, resources.type, and resources.ARN. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory. readOnly - Optional. Can be set to Equals a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events. eventSource - For filtering management events only. This can be set to NotEquals kms.amazonaws.com or NotEquals rdsdata.amazonaws.com. eventName - Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock. You can have multiple values for this field, separated by commas. eventCategory - This is required and must be set to Equals. For CloudTrail management events, the value must be Management. For CloudTrail data events, the value must be Data. The following are used only for event data stores: For CloudTrail Insights events, the value must be Insight. For Config configuration items, the value must be ConfigurationItem. For Audit Manager evidence, the value must be Evidence. For non-Amazon Web Services events, the value must be ActivityAuditLog. resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator, and the value can be one of the following: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object AWS::AppConfig::Configuration AWS::B2BI::Transformer AWS::Bedrock::AgentAlias AWS::Bedrock::KnowledgeBase AWS::Cassandra::Table AWS::CloudFront::KeyValueStore AWS::CloudTrail::Channel AWS::CodeWhisperer::Customization AWS::CodeWhisperer::Profile AWS::Cognito::IdentityPool AWS::DynamoDB::Stream AWS::EC2::Snapshot AWS::EMRWAL::Workspace AWS::FinSpace::Environment AWS::Glue::Table AWS::GreengrassV2::ComponentVersion AWS::GreengrassV2::Deployment AWS::GuardDuty::Detector AWS::IoT::Certificate AWS::IoT::Thing AWS::IoTSiteWise::Asset AWS::IoTSiteWise::TimeSeries AWS::IoTTwinMaker::Entity AWS::IoTTwinMaker::Workspace AWS::KendraRanking::ExecutionPlan AWS::KinesisVideo::Stream AWS::ManagedBlockchain::Network AWS::ManagedBlockchain::Node AWS::MedicalImaging::Datastore AWS::NeptuneGraph::Graph AWS::PCAConnectorAD::Connector AWS::QApps:QApp AWS::QBusiness::Application AWS::QBusiness::DataSource AWS::QBusiness::Index AWS::QBusiness::WebExperience AWS::RDS::DBCluster AWS::S3::AccessPoint AWS::S3ObjectLambda::AccessPoint AWS::S3Outposts::Object AWS::SageMaker::Endpoint AWS::SageMaker::ExperimentTrialComponent AWS::SageMaker::FeatureGroup AWS::ServiceDiscovery::Namespace AWS::ServiceDiscovery::Service AWS::SCN::Instance AWS::SNS::PlatformEndpoint AWS::SNS::Topic AWS::SQS::Queue AWS::SSM::ManagedNode AWS::SSMMessages::ControlChannel AWS::SWF::Domain AWS::ThinClient::Device AWS::ThinClient::Environment AWS::Timestream::Database AWS::Timestream::Table AWS::VerifiedPermissions::PolicyStore AWS::XRay::Trace You can have only one resources.type field per selector. To log data events on more than one resource type, add another selector. resources.ARN - You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. You can't use the resources.ARN field to filter resource types that do not have ARNs. The resources.ARN field can be set one of the following. If resources.type equals AWS::S3::Object, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value. The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information. arn::s3:::/ arn::s3:::// When resources.type equals AWS::DynamoDB::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::dynamodb:::table/ When resources.type equals AWS::Lambda::Function, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::lambda:::function: When resources.type equals AWS::AppConfig::Configuration, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::appconfig:::application//environment//configuration/ When resources.type equals AWS::B2BI::Transformer, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::b2bi:::transformer/ When resources.type equals AWS::Bedrock::AgentAlias, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::bedrock:::agent-alias// When resources.type equals AWS::Bedrock::KnowledgeBase, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::bedrock:::knowledge-base/ When resources.type equals AWS::Cassandra::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cassandra:::/keyspace//table/ When resources.type equals AWS::CloudFront::KeyValueStore, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cloudfront:::key-value-store/ When resources.type equals AWS::CloudTrail::Channel, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cloudtrail:::channel/ When resources.type equals AWS::CodeWhisperer::Customization, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::codewhisperer:::customization/ When resources.type equals AWS::CodeWhisperer::Profile, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::codewhisperer:::profile/ When resources.type equals AWS::Cognito::IdentityPool, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cognito-identity:::identitypool/ When resources.type equals AWS::DynamoDB::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::dynamodb:::table//stream/ When resources.type equals AWS::EC2::Snapshot, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::ec2:::snapshot/ When resources.type equals AWS::EMRWAL::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::emrwal:::workspace/ When resources.type equals AWS::FinSpace::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::finspace:::environment/ When resources.type equals AWS::Glue::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::glue:::table// When resources.type equals AWS::GreengrassV2::ComponentVersion, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::greengrass:::components/ When resources.type equals AWS::GreengrassV2::Deployment, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::greengrass:::deployments/ When resources.type equals AWS::GuardDuty::Detector, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::guardduty:::detector/ When resources.type equals AWS::IoT::Certificate, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iot:::cert/ When resources.type equals AWS::IoT::Thing, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iot:::thing/ When resources.type equals AWS::IoTSiteWise::Asset, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iotsitewise:::asset/ When resources.type equals AWS::IoTSiteWise::TimeSeries, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iotsitewise:::timeseries/ When resources.type equals AWS::IoTTwinMaker::Entity, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iottwinmaker:::workspace//entity/ When resources.type equals AWS::IoTTwinMaker::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iottwinmaker:::workspace/ When resources.type equals AWS::KendraRanking::ExecutionPlan, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::kendra-ranking:::rescore-execution-plan/ When resources.type equals AWS::KinesisVideo::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::kinesisvideo:::stream// When resources.type equals AWS::ManagedBlockchain::Network, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::managedblockchain:::networks/ When resources.type equals AWS::ManagedBlockchain::Node, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::managedblockchain:::nodes/ When resources.type equals AWS::MedicalImaging::Datastore, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::medical-imaging:::datastore/ When resources.type equals AWS::NeptuneGraph::Graph, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::neptune-graph:::graph/ When resources.type equals AWS::PCAConnectorAD::Connector, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::pca-connector-ad:::connector/ When resources.type equals AWS::QApps:QApp, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qapps:::application//qapp/ When resources.type equals AWS::QBusiness::Application, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application/ When resources.type equals AWS::QBusiness::DataSource, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application//index//data-source/ When resources.type equals AWS::QBusiness::Index, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application//index/ When resources.type equals AWS::QBusiness::WebExperience, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application//web-experience/ When resources.type equals AWS::RDS::DBCluster, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::rds:::cluster/ When resources.type equals AWS::S3::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith or NotStartsWith operators. arn::s3:::accesspoint/ arn::s3:::accesspoint//object/ When resources.type equals AWS::S3ObjectLambda::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::s3-object-lambda:::accesspoint/ When resources.type equals AWS::S3Outposts::Object, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::s3-outposts::: When resources.type equals AWS::SageMaker::Endpoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sagemaker:::endpoint/ When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sagemaker:::experiment-trial-component/ When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sagemaker:::feature-group/ When resources.type equals AWS::SCN::Instance, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::scn:::instance/ When resources.type equals AWS::ServiceDiscovery::Namespace, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::servicediscovery:::namespace/ When resources.type equals AWS::ServiceDiscovery::Service, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::servicediscovery:::service/ When resources.type equals AWS::SNS::PlatformEndpoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sns:::endpoint/// When resources.type equals AWS::SNS::Topic, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sns::: When resources.type equals AWS::SQS::Queue, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sqs::: When resources.type equals AWS::SSM::ManagedNode, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats: arn::ssm:::managed-instance/ arn::ec2:::instance/ When resources.type equals AWS::SSMMessages::ControlChannel, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::ssmmessages:::control-channel/ When resources.type equals AWS::SWF::Domain, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::swf:::domain/ When resources.type equals AWS::ThinClient::Device, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::thinclient:::device/ When resources.type equals AWS::ThinClient::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::thinclient:::environment/ When resources.type equals AWS::Timestream::Database, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::timestream:::database/ When resources.type equals AWS::Timestream::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::timestream:::database//table/ When resources.type equals AWS::VerifiedPermissions::PolicyStore, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::verifiedpermissions:::policy-store/ public let field: String /// An operator that excludes events that match the last few characters of the event record field specified as the value of Field. public let notEndsWith: [String]? @@ -403,7 +403,7 @@ extension CloudTrail { } public struct CreateEventDataStoreRequest: AWSEncodableShape { - /// The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store. For more information about how to use advanced event selectors to log CloudTrail events, see Log events by using advanced event selectors in the CloudTrail User Guide. For more information about how to use advanced event selectors to include Config configuration items in your event data store, see Create an event data store for Config configuration items in the CloudTrail User Guide. For more information about how to use advanced event selectors to include non-Amazon Web Services events in your event data store, see Create an integration to log events from outside Amazon Web Services in the CloudTrail User Guide. + /// The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store. For more information about how to use advanced event selectors to log CloudTrail events, see Log events by using advanced event selectors in the CloudTrail User Guide. For more information about how to use advanced event selectors to include Config configuration items in your event data store, see Create an event data store for Config configuration items in the CloudTrail User Guide. For more information about how to use advanced event selectors to include events outside of Amazon Web Services events in your event data store, see Create an integration to log events from outside Amazon Web Services in the CloudTrail User Guide. public let advancedEventSelectors: [AdvancedEventSelector]? /// The billing mode for the event data store determines the cost for ingesting events and the default and maximum retention period for the event data store. The following are the possible values: EXTENDABLE_RETENTION_PRICING - This billing mode is generally recommended if you want a flexible retention period of up to 3653 days (about 10 years). The default retention period for this billing mode is 366 days. FIXED_RETENTION_PRICING - This billing mode is recommended if you expect to ingest more than 25 TB of event data per month and need a retention period of up to 2557 days (about 7 years). The default retention period for this billing mode is 2557 days. The default value is EXTENDABLE_RETENTION_PRICING. For more information about CloudTrail pricing, see CloudTrail Pricing and Managing CloudTrail Lake costs. public let billingMode: BillingMode? @@ -545,7 +545,7 @@ extension CloudTrail { public let kmsKeyId: String? /// Specifies the name of the trail. The name must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are not valid. Not be in IP address format (for example, 192.168.5.4) public let name: String - /// Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements. + /// Specifies the name of the Amazon S3 bucket designated for publishing log files. For information about bucket naming rules, see Bucket naming rules in the Amazon Simple Storage Service User Guide. public let s3BucketName: String /// Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters. public let s3KeyPrefix: String? @@ -672,7 +672,7 @@ extension CloudTrail { public struct DataResource: AWSEncodableShape & AWSDecodableShape { /// The resource type in which you want to log data events. You can specify the following basic event selector resource types: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object Additional resource types are available through advanced event selectors. For more information about these additional resource types, see AdvancedFieldSelector. public let type: String? - /// An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified objects. To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3. This also enables logging of data event activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a bucket that belongs to another Amazon Web Services account. To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/. The trail logs data events for all objects in this S3 bucket. To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::bucket-1/example-images. The trail logs data events for objects in this S3 bucket that match the prefix. To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda. This also enables logging of Invoke activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a function that belongs to another Amazon Web Services account. To log data events for a specific Lambda function, specify the function ARN. Lambda function ARNs are exact. For example, if you specify a function ARN arn:aws:lambda:us-west-2:111111111111:function:helloworld, data events will only be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld. They will not be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld2. To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb. + /// An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type. To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3. This also enables logging of data event activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a bucket that belongs to another Amazon Web Services account. To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/. The trail logs data events for all objects in this S3 bucket. To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::bucket-1/example-images. The trail logs data events for objects in this S3 bucket that match the prefix. To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda. This also enables logging of Invoke activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a function that belongs to another Amazon Web Services account. To log data events for a specific Lambda function, specify the function ARN. Lambda function ARNs are exact. For example, if you specify a function ARN arn:aws:lambda:us-west-2:111111111111:function:helloworld, data events will only be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld. They will not be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld2. To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb. public let values: [String]? public init(type: String? = nil, values: [String]? = nil) { @@ -1233,6 +1233,8 @@ extension CloudTrail { public let name: String? /// Indicates whether an event data store is collecting logged events for an organization in Organizations. public let organizationEnabled: Bool? + /// The partition keys for the event data store. To improve query performance and efficiency, CloudTrail Lake organizes event data into partitions based on values derived from partition keys. + public let partitionKeys: [PartitionKey]? /// The retention period of the event data store, in days. public let retentionPeriod: Int? /// The status of an event data store. @@ -1242,7 +1244,7 @@ extension CloudTrail { /// Shows the time that an event data store was updated, if applicable. UpdatedTimestamp is always either the same or newer than the time shown in CreatedTimestamp. public let updatedTimestamp: Date? - public init(advancedEventSelectors: [AdvancedEventSelector]? = nil, billingMode: BillingMode? = nil, createdTimestamp: Date? = nil, eventDataStoreArn: String? = nil, federationRoleArn: String? = nil, federationStatus: FederationStatus? = nil, kmsKeyId: String? = nil, multiRegionEnabled: Bool? = nil, name: String? = nil, organizationEnabled: Bool? = nil, retentionPeriod: Int? = nil, status: EventDataStoreStatus? = nil, terminationProtectionEnabled: Bool? = nil, updatedTimestamp: Date? = nil) { + public init(advancedEventSelectors: [AdvancedEventSelector]? = nil, billingMode: BillingMode? = nil, createdTimestamp: Date? = nil, eventDataStoreArn: String? = nil, federationRoleArn: String? = nil, federationStatus: FederationStatus? = nil, kmsKeyId: String? = nil, multiRegionEnabled: Bool? = nil, name: String? = nil, organizationEnabled: Bool? = nil, partitionKeys: [PartitionKey]? = nil, retentionPeriod: Int? = nil, status: EventDataStoreStatus? = nil, terminationProtectionEnabled: Bool? = nil, updatedTimestamp: Date? = nil) { self.advancedEventSelectors = advancedEventSelectors self.billingMode = billingMode self.createdTimestamp = createdTimestamp @@ -1253,6 +1255,7 @@ extension CloudTrail { self.multiRegionEnabled = multiRegionEnabled self.name = name self.organizationEnabled = organizationEnabled + self.partitionKeys = partitionKeys self.retentionPeriod = retentionPeriod self.status = status self.terminationProtectionEnabled = terminationProtectionEnabled @@ -1270,6 +1273,7 @@ extension CloudTrail { case multiRegionEnabled = "MultiRegionEnabled" case name = "Name" case organizationEnabled = "OrganizationEnabled" + case partitionKeys = "PartitionKeys" case retentionPeriod = "RetentionPeriod" case status = "Status" case terminationProtectionEnabled = "TerminationProtectionEnabled" @@ -1584,11 +1588,11 @@ extension CloudTrail { public let latestDeliveryAttemptSucceeded: String? /// This field is no longer in use. public let latestDeliveryAttemptTime: String? - /// Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference. This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, create a new bucket, and then call UpdateTrail to specify the new bucket; or fix the existing objects so that CloudTrail can again write to the bucket. + /// Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference. This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, fix the bucket policy so that CloudTrail can write to the bucket; or create a new bucket and call UpdateTrail to specify the new bucket. public let latestDeliveryError: String? /// Specifies the date and time that CloudTrail last delivered log files to an account's Amazon S3 bucket. public let latestDeliveryTime: Date? - /// Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference. This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, create a new bucket, and then call UpdateTrail to specify the new bucket; or fix the existing objects so that CloudTrail can again write to the bucket. + /// Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference. This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, fix the bucket policy so that CloudTrail can write to the bucket; or create a new bucket and call UpdateTrail to specify the new bucket. public let latestDigestDeliveryError: String? /// Specifies the date and time that CloudTrail last delivered a digest file to an account's Amazon S3 bucket. public let latestDigestDeliveryTime: Date? @@ -2322,6 +2326,23 @@ extension CloudTrail { } } + public struct PartitionKey: AWSDecodableShape { + /// The name of the partition key. + public let name: String + /// The data type of the partition key. For example, bigint or string. + public let type: String + + public init(name: String, type: String) { + self.name = name + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case name = "Name" + case type = "Type" + } + } + public struct PublicKey: AWSDecodableShape { /// The fingerprint of the public key. public let fingerprint: String? @@ -3096,7 +3117,7 @@ extension CloudTrail { public let logFileValidationEnabled: Bool? /// Name of the trail set by calling CreateTrail. The maximum length is 128 characters. public let name: String? - /// Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket Naming Requirements. + /// Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket naming rules. public let s3BucketName: String? /// Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters. public let s3KeyPrefix: String? @@ -3391,7 +3412,7 @@ extension CloudTrail { public let kmsKeyId: String? /// Specifies the name of the trail or trail ARN. If Name is a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are not valid. Not be in IP address format (for example, 192.168.5.4) If Name is a trail ARN, it must be in the following format. arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail public let name: String - /// Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements. + /// Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket naming rules. public let s3BucketName: String? /// Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters. public let s3KeyPrefix: String? @@ -3638,7 +3659,7 @@ public struct CloudTrailErrorType: AWSErrorType { public static var channelNotFoundException: Self { .init(.channelNotFoundException) } /// This exception is thrown when an operation is called with an ARN that is not valid. The following is the format of a trail ARN: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail The following is the format of an event data store ARN: arn:aws:cloudtrail:us-east-2:123456789012:eventdatastore/EXAMPLE-f852-4e8f-8bd1-bcf6cEXAMPLE The following is the format of a channel ARN: arn:aws:cloudtrail:us-east-2:123456789012:channel/01234567890 public static var cloudTrailARNInvalidException: Self { .init(.cloudTrailARNInvalidException) } - /// This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see Enabling Trusted Access with Other Amazon Web Services Services and Prepare For Creating a Trail For Your Organization. + /// This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see How to enable or disable trusted access in the Organizations User Guide and Prepare For Creating a Trail For Your Organization in the CloudTrail User Guide. public static var cloudTrailAccessNotEnabledException: Self { .init(.cloudTrailAccessNotEnabledException) } /// This exception is thrown when a call results in the InvalidClientTokenId error code. This can occur when you are creating or updating a trail to send notifications to an Amazon SNS topic that is in a suspended Amazon Web Services account. public static var cloudTrailInvalidClientTokenIdException: Self { .init(.cloudTrailInvalidClientTokenIdException) } @@ -3746,7 +3767,7 @@ public struct CloudTrailErrorType: AWSErrorType { public static var noManagementAccountSLRExistsException: Self { .init(.noManagementAccountSLRExistsException) } /// This exception is thrown when the account making the request is not the organization's management account. public static var notOrganizationManagementAccountException: Self { .init(.notOrganizationManagementAccountException) } - /// This exception is thrown when the Amazon Web Services account making the request to create or update an organization trail or event data store is not the management account for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Create an event data store. + /// This exception is thrown when the Amazon Web Services account making the request to create or update an organization trail or event data store is not the management account for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Organization event data stores. public static var notOrganizationMasterAccountException: Self { .init(.notOrganizationMasterAccountException) } /// This exception is thrown when the requested operation is not permitted. public static var operationNotPermittedException: Self { .init(.operationNotPermittedException) } diff --git a/Sources/Soto/Services/CodeArtifact/CodeArtifact_api.swift b/Sources/Soto/Services/CodeArtifact/CodeArtifact_api.swift index b9a61c60b7..5a45c1dca3 100644 --- a/Sources/Soto/Services/CodeArtifact/CodeArtifact_api.swift +++ b/Sources/Soto/Services/CodeArtifact/CodeArtifact_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS CodeArtifact service. /// -/// CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, pip, and dotnet. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client. CodeArtifact concepts Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools such as the npm CLI or the Maven CLI ( mvn ). For a list of supported package managers, see the CodeArtifact User Guide. Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in Key Management Service (KMS). Each repository is a member of a single domain and can't be moved to a different domain. The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages. Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization. Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, Maven, NuGet, Swift, Ruby, and generic package formats. For more information about the supported package formats and how to use CodeArtifact with them, see the CodeArtifact User Guide. In CodeArtifact, a package consists of: A name (for example, webpack is the name of a popular npm package) An optional namespace (for example, @types in @types/node) A set of versions (for example, 1.0.0, 1.0.1, 1.0.2, etc.) Package-level metadata (for example, npm tags) Package group: A group of packages that match a specified definition. Package groups can be used to apply configuration to multiple packages that match a defined pattern using package format, package namespace, and package name. You can use package groups to more conveniently configure package origin controls for multiple packages. Package origin controls are used to block or allow ingestion or publishing of new package versions, which protects users from malicious actions known as dependency substitution attacks. Package version: A version of a package, such as @types/node 12.6.9. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets. Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories. Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz file or Maven POM and JAR files. CodeArtifact supported API operations AssociateExternalConnection: Adds an existing external connection to a repository. CopyPackageVersions: Copies package versions from one repository to another repository in the same domain. CreateDomain: Creates a domain. CreatePackageGroup: Creates a package group. CreateRepository: Creates a CodeArtifact repository in a domain. DeleteDomain: Deletes a domain. You cannot delete a domain that contains repositories. DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain. DeletePackage: Deletes a package and all associated package versions. DeletePackageGroup: Deletes a package group. Does not delete packages or package versions that are associated with a package group. DeletePackageVersions: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage. DeleteRepository: Deletes a repository. DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository. DescribeDomain: Returns a DomainDescription object that contains information about the requested domain. DescribePackage: Returns a PackageDescription object that contains details about a package. DescribePackageGroup: Returns a PackageGroup object that contains details about a package group. DescribePackageVersion: Returns a PackageVersionDescription object that contains details about a package version. DescribeRepository: Returns a RepositoryDescription object that contains detailed information about the requested repository. DisposePackageVersions: Disposes versions of a package. A package version with the status Disposed cannot be restored because they have been permanently removed from storage. DisassociateExternalConnection: Removes an existing external connection from a repository. GetAssociatedPackageGroup: Returns the most closely associated package group to the specified package. GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours. GetDomainPermissionsPolicy: Returns the policy of a resource that is attached to the specified domain. GetPackageVersionAsset: Returns the contents of an asset that is in a package version. GetPackageVersionReadme: Gets the readme file or descriptive text for a package version. GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format: generic maven npm nuget pypi ruby swift GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository. ListAllowedRepositoriesForGroup: Lists the allowed repositories for a package group that has origin configuration set to ALLOW_SPECIFIC_REPOSITORIES. ListAssociatedPackages: Returns a list of packages associated with the requested package group. ListDomains: Returns a list of DomainSummary objects. Each returned DomainSummary object contains information about a domain. ListPackages: Lists the packages in a repository. ListPackageGroups: Returns a list of package groups in the requested domain. ListPackageVersionAssets: Lists the assets for a given package version. ListPackageVersionDependencies: Returns a list of the direct dependencies for a package version. ListPackageVersions: Returns a list of package versions for a specified package in a repository. ListRepositories: Returns a list of repositories owned by the Amazon Web Services account that called this method. ListRepositoriesInDomain: Returns a list of the repositories in a domain. ListSubPackageGroups: Returns a list of direct children of the specified package group. PublishPackageVersion: Creates a new package version containing one or more assets. PutDomainPermissionsPolicy: Attaches a resource policy to a domain. PutPackageOriginConfiguration: Sets the package origin configuration for a package, which determine how new versions of the package can be added to a specific repository. PutRepositoryPermissionsPolicy: Sets the resource policy on a repository that specifies permissions to access it. UpdatePackageGroup: Updates a package group. This API cannot be used to update a package group's origin configuration or pattern. UpdatePackageGroupOriginConfiguration: Updates the package origin configuration for a package group. UpdatePackageVersionsStatus: Updates the status of one or more versions of a package. UpdateRepository: Updates the properties of a repository. +/// CodeArtifact is a fully managed artifact repository compatible with language-native package managers and build tools such as npm, Apache Maven, pip, and dotnet. You can use CodeArtifact to share packages with development teams and pull packages. Packages can be pulled from both public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact repository and another repository, which effectively merges their contents from the point of view of a package manager client. CodeArtifact concepts Repository: A CodeArtifact repository contains a set of package versions, each of which maps to a set of assets, or files. Repositories are polyglot, so a single repository can contain packages of any supported type. Each repository exposes endpoints for fetching and publishing packages using tools such as the npm CLI or the Maven CLI ( mvn ). For a list of supported package managers, see the CodeArtifact User Guide. Domain: Repositories are aggregated into a higher-level entity known as a domain. All package assets and metadata are stored in the domain, but are consumed through repositories. A given package asset, such as a Maven JAR file, is stored once per domain, no matter how many repositories it's present in. All of the assets and metadata in a domain are encrypted with the same customer master key (CMK) stored in Key Management Service (KMS). Each repository is a member of a single domain and can't be moved to a different domain. The domain allows organizational policy to be applied across multiple repositories, such as which accounts can access repositories in the domain, and which public repositories can be used as sources of packages. Although an organization can have multiple domains, we recommend a single production domain that contains all published artifacts so that teams can find and share packages across their organization. Package: A package is a bundle of software and the metadata required to resolve dependencies and install the software. CodeArtifact supports npm, PyPI, Maven, NuGet, Swift, Ruby, Cargo, and generic package formats. For more information about the supported package formats and how to use CodeArtifact with them, see the CodeArtifact User Guide. In CodeArtifact, a package consists of: A name (for example, webpack is the name of a popular npm package) An optional namespace (for example, @types in @types/node) A set of versions (for example, 1.0.0, 1.0.1, 1.0.2, etc.) Package-level metadata (for example, npm tags) Package group: A group of packages that match a specified definition. Package groups can be used to apply configuration to multiple packages that match a defined pattern using package format, package namespace, and package name. You can use package groups to more conveniently configure package origin controls for multiple packages. Package origin controls are used to block or allow ingestion or publishing of new package versions, which protects users from malicious actions known as dependency substitution attacks. Package version: A version of a package, such as @types/node 12.6.9. The version number format and semantics vary for different package formats. For example, npm package versions must conform to the Semantic Versioning specification. In CodeArtifact, a package version consists of the version identifier, metadata at the package version level, and a set of assets. Upstream repository: One repository is upstream of another when the package versions in it can be accessed from the repository endpoint of the downstream repository, effectively merging the contents of the two repositories from the point of view of a client. CodeArtifact allows creating an upstream relationship between two repositories. Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm .tgz file or Maven POM and JAR files. CodeArtifact supported API operations AssociateExternalConnection: Adds an existing external connection to a repository. CopyPackageVersions: Copies package versions from one repository to another repository in the same domain. CreateDomain: Creates a domain. CreatePackageGroup: Creates a package group. CreateRepository: Creates a CodeArtifact repository in a domain. DeleteDomain: Deletes a domain. You cannot delete a domain that contains repositories. DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain. DeletePackage: Deletes a package and all associated package versions. DeletePackageGroup: Deletes a package group. Does not delete packages or package versions that are associated with a package group. DeletePackageVersions: Deletes versions of a package. After a package has been deleted, it can be republished, but its assets and metadata cannot be restored because they have been permanently removed from storage. DeleteRepository: Deletes a repository. DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository. DescribeDomain: Returns a DomainDescription object that contains information about the requested domain. DescribePackage: Returns a PackageDescription object that contains details about a package. DescribePackageGroup: Returns a PackageGroup object that contains details about a package group. DescribePackageVersion: Returns a PackageVersionDescription object that contains details about a package version. DescribeRepository: Returns a RepositoryDescription object that contains detailed information about the requested repository. DisposePackageVersions: Disposes versions of a package. A package version with the status Disposed cannot be restored because they have been permanently removed from storage. DisassociateExternalConnection: Removes an existing external connection from a repository. GetAssociatedPackageGroup: Returns the most closely associated package group to the specified package. GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours. GetDomainPermissionsPolicy: Returns the policy of a resource that is attached to the specified domain. GetPackageVersionAsset: Returns the contents of an asset that is in a package version. GetPackageVersionReadme: Gets the readme file or descriptive text for a package version. GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format: cargo generic maven npm nuget pypi ruby swift GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository. ListAllowedRepositoriesForGroup: Lists the allowed repositories for a package group that has origin configuration set to ALLOW_SPECIFIC_REPOSITORIES. ListAssociatedPackages: Returns a list of packages associated with the requested package group. ListDomains: Returns a list of DomainSummary objects. Each returned DomainSummary object contains information about a domain. ListPackages: Lists the packages in a repository. ListPackageGroups: Returns a list of package groups in the requested domain. ListPackageVersionAssets: Lists the assets for a given package version. ListPackageVersionDependencies: Returns a list of the direct dependencies for a package version. ListPackageVersions: Returns a list of package versions for a specified package in a repository. ListRepositories: Returns a list of repositories owned by the Amazon Web Services account that called this method. ListRepositoriesInDomain: Returns a list of the repositories in a domain. ListSubPackageGroups: Returns a list of direct children of the specified package group. PublishPackageVersion: Creates a new package version containing one or more assets. PutDomainPermissionsPolicy: Attaches a resource policy to a domain. PutPackageOriginConfiguration: Sets the package origin configuration for a package, which determine how new versions of the package can be added to a specific repository. PutRepositoryPermissionsPolicy: Sets the resource policy on a repository that specifies permissions to access it. UpdatePackageGroup: Updates a package group. This API cannot be used to update a package group's origin configuration or pattern. UpdatePackageGroupOriginConfiguration: Updates the package origin configuration for a package group. UpdatePackageVersionsStatus: Updates the status of one or more versions of a package. UpdateRepository: Updates the properties of a repository. public struct CodeArtifact: AWSService { // MARK: Member variables @@ -385,7 +385,7 @@ public struct CodeArtifact: AWSService { ) } - /// Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format: generic maven npm nuget pypi ruby swift + /// Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each package format: cargo generic maven npm nuget pypi ruby swift @Sendable public func getRepositoryEndpoint(_ input: GetRepositoryEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetRepositoryEndpointResult { return try await self.client.execute( diff --git a/Sources/Soto/Services/CodeArtifact/CodeArtifact_shapes.swift b/Sources/Soto/Services/CodeArtifact/CodeArtifact_shapes.swift index d9df1144a8..6e7d06d471 100644 --- a/Sources/Soto/Services/CodeArtifact/CodeArtifact_shapes.swift +++ b/Sources/Soto/Services/CodeArtifact/CodeArtifact_shapes.swift @@ -58,6 +58,7 @@ extension CodeArtifact { } public enum PackageFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cargo = "cargo" case generic = "generic" case maven = "maven" case npm = "npm" @@ -155,7 +156,7 @@ extension CodeArtifact { public let domain: String /// The 12-digit account number of the Amazon Web Services account that owns the domain. It does not include dashes or spaces. public let domainOwner: String? - /// The name of the external connection to add to the repository. The following values are supported: public:npmjs - for the npm public repository. public:nuget-org - for the NuGet Gallery. public:pypi - for the Python Package Index. public:maven-central - for Maven Central. public:maven-googleandroid - for the Google Android repository. public:maven-gradleplugins - for the Gradle plugins repository. public:maven-commonsware - for the CommonsWare Android repository. public:maven-clojars - for the Clojars repository. + /// The name of the external connection to add to the repository. The following values are supported: public:npmjs - for the npm public repository. public:nuget-org - for the NuGet Gallery. public:pypi - for the Python Package Index. public:maven-central - for Maven Central. public:maven-googleandroid - for the Google Android repository. public:maven-gradleplugins - for the Gradle plugins repository. public:maven-commonsware - for the CommonsWare Android repository. public:maven-clojars - for the Clojars repository. public:ruby-gems-org - for RubyGems.org. public:crates-io - for Crates.io. public let externalConnection: String /// The name of the repository to which the external connection is added. public let repository: String @@ -212,7 +213,7 @@ extension CodeArtifact { public let associationType: PackageGroupAssociationType? /// A format that specifies the type of the associated package. public let format: PackageFormat? - /// The namespace of the associated package. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the associated package. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the associated package. public let package: String? @@ -245,7 +246,7 @@ extension CodeArtifact { public let format: PackageFormat /// Set to true to copy packages from repositories that are upstream from the source repository to the destination repository. The default setting is false. For more information, see Working with upstream repositories. public let includeFromUpstream: Bool? - /// The namespace of the package versions to be copied. The package component that specifies its namespace depends on its type. For example: The namespace is required when copying package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package versions to be copied. The package component that specifies its namespace depends on its type. For example: The namespace is required when copying package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the package that contains the versions to be copied. public let package: String @@ -697,7 +698,7 @@ extension CodeArtifact { public let domainOwner: String? /// The format of the requested package to delete. public let format: PackageFormat - /// The namespace of the package to delete. The package component that specifies its namespace depends on its type. For example: The namespace is required when deleting packages of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package to delete. The package component that specifies its namespace depends on its type. For example: The namespace is required when deleting packages of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the package to delete. public let package: String @@ -766,7 +767,7 @@ extension CodeArtifact { public let expectedStatus: PackageVersionStatus? /// The format of the package versions to delete. public let format: PackageFormat - /// The namespace of the package versions to be deleted. The package component that specifies its namespace depends on its type. For example: The namespace is required when deleting package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package versions to be deleted. The package component that specifies its namespace depends on its type. For example: The namespace is required when deleting package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the package with the versions to delete. public let package: String @@ -1052,7 +1053,7 @@ extension CodeArtifact { public let domainOwner: String? /// A format that specifies the type of the requested package. public let format: PackageFormat - /// The namespace of the requested package. The package component that specifies its namespace depends on its type. For example: The namespace is required when requesting packages of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the requested package. The package component that specifies its namespace depends on its type. For example: The namespace is required when requesting packages of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the requested package. public let package: String @@ -1120,7 +1121,7 @@ extension CodeArtifact { public let domainOwner: String? /// A format that specifies the type of the requested package version. public let format: PackageFormat - /// The namespace of the requested package version. The package component that specifies its namespace depends on its type. For example: The namespace is required when requesting package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the requested package version. The package component that specifies its namespace depends on its type. For example: The namespace is required when requesting package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the requested package version. public let package: String @@ -1304,7 +1305,7 @@ extension CodeArtifact { public let expectedStatus: PackageVersionStatus? /// A format that specifies the type of package versions you want to dispose. public let format: PackageFormat - /// The namespace of the package versions to be disposed. The package component that specifies its namespace depends on its type. For example: The namespace is required when disposing package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package versions to be disposed. The package component that specifies its namespace depends on its type. For example: The namespace is required when disposing package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the package with the versions you want to dispose. public let package: String @@ -1499,7 +1500,7 @@ extension CodeArtifact { public let domainOwner: String? /// The format of the package from which to get the associated package group. public let format: PackageFormat - /// The namespace of the package from which to get the associated package group. The package component that specifies its namespace depends on its type. For example: The namespace is required when getting associated package groups from packages of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package from which to get the associated package group. The package component that specifies its namespace depends on its type. For example: The namespace is required when getting associated package groups from packages of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The package from which to get the associated package group. public let package: String @@ -1662,7 +1663,7 @@ extension CodeArtifact { public let domainOwner: String? /// A format that specifies the type of the package version with the requested asset file. public let format: PackageFormat - /// The namespace of the package version with the requested asset file. The package component that specifies its namespace depends on its type. For example: The namespace is required when requesting assets from package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package version with the requested asset file. The package component that specifies its namespace depends on its type. For example: The namespace is required when requesting assets from package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the package that contains the requested asset. public let package: String @@ -1766,7 +1767,7 @@ extension CodeArtifact { public let domainOwner: String? /// A format that specifies the type of the package version with the requested readme file. public let format: PackageFormat - /// The namespace of the package version with the requested readme file. The package component that specifies its namespace depends on its type. For example: The namespace is required when requesting the readme from package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package version with the requested readme file. The package component that specifies its namespace depends on its type. For example: The namespace is required when requesting the readme from package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the package version that contains the requested readme file. public let package: String @@ -1824,7 +1825,7 @@ extension CodeArtifact { public struct GetPackageVersionReadmeResult: AWSDecodableShape { /// The format of the package with the requested readme file. public let format: PackageFormat? - /// The namespace of the package version with the requested readme file. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package version with the requested readme file. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the package that contains the returned readme file. public let package: String? @@ -2234,7 +2235,7 @@ extension CodeArtifact { public let format: PackageFormat /// The maximum number of results to return per page. public let maxResults: Int? - /// The namespace of the package version that contains the requested package version assets. The package component that specifies its namespace depends on its type. For example: The namespace is required requesting assets from package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package version that contains the requested package version assets. The package component that specifies its namespace depends on its type. For example: The namespace is required requesting assets from package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. public let nextToken: String? @@ -2305,7 +2306,7 @@ extension CodeArtifact { public let assets: [AssetSummary]? /// The format of the package that contains the requested package version assets. public let format: PackageFormat? - /// The namespace of the package version that contains the requested package version assets. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package version that contains the requested package version assets. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// If there are additional results, this is the token for the next set of results. public let nextToken: String? @@ -2344,7 +2345,7 @@ extension CodeArtifact { public let domainOwner: String? /// The format of the package with the requested dependencies. public let format: PackageFormat - /// The namespace of the package version with the requested dependencies. The package component that specifies its namespace depends on its type. For example: The namespace is required when listing dependencies from package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package version with the requested dependencies. The package component that specifies its namespace depends on its type. For example: The namespace is required when listing dependencies from package versions of the following formats: Maven The namespace of a Maven package version is its groupId. The namespace of an npm package version is its scope. Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. public let nextToken: String? @@ -2411,7 +2412,7 @@ extension CodeArtifact { public let dependencies: [PackageDependency]? /// A format that specifies the type of the package that contains the returned dependencies. public let format: PackageFormat? - /// The namespace of the package version that contains the returned dependencies. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package version that contains the returned dependencies. The package component that specifies its namespace depends on its type. For example: The namespace is required when listing dependencies from package versions of the following formats: Maven The namespace of a Maven package version is its groupId. The namespace of an npm package version is its scope. Python and NuGet package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. public let nextToken: String? @@ -2452,7 +2453,7 @@ extension CodeArtifact { public let format: PackageFormat /// The maximum number of results to return per page. public let maxResults: Int? - /// The namespace of the package that contains the requested package versions. The package component that specifies its namespace depends on its type. For example: The namespace is required when deleting package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package that contains the requested package versions. The package component that specifies its namespace depends on its type. For example: The namespace is required when deleting package versions of the following formats: Maven Swift generic The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. public let nextToken: String? @@ -2528,7 +2529,7 @@ extension CodeArtifact { public let defaultDisplayVersion: String? /// A format of the package. public let format: PackageFormat? - /// The namespace of the package that contains the requested package versions. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package that contains the requested package versions. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// If there are additional results, this is the token for the next set of results. public let nextToken: String? @@ -2565,7 +2566,7 @@ extension CodeArtifact { public let format: PackageFormat? /// The maximum number of results to return per page. public let maxResults: Int? - /// The namespace prefix used to filter requested packages. Only packages with a namespace that starts with the provided string value are returned. Note that although this option is called --namespace and not --namespace-prefix, it has prefix-matching behavior. Each package format uses namespace as follows: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace prefix used to filter requested packages. Only packages with a namespace that starts with the provided string value are returned. Note that although this option is called --namespace and not --namespace-prefix, it has prefix-matching behavior. Each package format uses namespace as follows: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. public let nextToken: String? @@ -2882,7 +2883,7 @@ extension CodeArtifact { public struct PackageDependency: AWSDecodableShape { /// The type of a package dependency. The possible values depend on the package type. npm: regular, dev, peer, optional maven: optional, parent, compile, runtime, test, system, provided. Note that parent is not a regular Maven dependency type; instead this is extracted from the element if one is defined in the package version's POM file. nuget: The dependencyType field is never set for NuGet packages. pypi: Requires-Dist public let dependencyType: String? - /// The namespace of the package that this package depends on. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package that this package depends on. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the package that this package depends on. public let package: String? @@ -2909,7 +2910,7 @@ extension CodeArtifact { public let format: PackageFormat? /// The name of the package. public let name: String? - /// The namespace of the package. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The package origin configuration for the package. public let originConfiguration: PackageOriginConfiguration? @@ -3130,7 +3131,7 @@ extension CodeArtifact { public struct PackageSummary: AWSDecodableShape { /// The format of the package. public let format: PackageFormat? - /// The namespace of the package. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// A PackageOriginConfiguration object that contains a PackageOriginRestrictions object that contains information about the upstream and publish package origin restrictions. public let originConfiguration: PackageOriginConfiguration? @@ -3161,7 +3162,7 @@ extension CodeArtifact { public let homePage: String? /// Information about licenses associated with the package version. public let licenses: [LicenseInfo]? - /// The namespace of the package version. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package version. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// A PackageVersionOrigin object that contains information about how the package version was added to the repository. public let origin: PackageVersionOrigin? @@ -3454,7 +3455,7 @@ extension CodeArtifact { public let domainOwner: String? /// A format that specifies the type of the package to be updated. public let format: PackageFormat - /// The namespace of the package to be updated. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package to be updated. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the package to be updated. public let package: String @@ -3981,7 +3982,7 @@ extension CodeArtifact { public let expectedStatus: PackageVersionStatus? /// A format that specifies the type of the package with the statuses to update. public let format: PackageFormat - /// The namespace of the package version to be updated. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions of those formats do not have a namespace. + /// The namespace of the package version to be updated. The package component that specifies its namespace depends on its type. For example: The namespace of a Maven package version is its groupId. The namespace of an npm or Swift package version is its scope. The namespace of a generic package is its namespace. Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions of those formats do not have a namespace. public let namespace: String? /// The name of the package with the version statuses to update. public let package: String diff --git a/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift b/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift index a94a102b09..4d24b9333c 100644 --- a/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift +++ b/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift @@ -149,6 +149,7 @@ extension CodeBuild { } public enum FleetContextCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case actionRequired = "ACTION_REQUIRED" case createFailed = "CREATE_FAILED" case updateFailed = "UPDATE_FAILED" public var description: String { return self.rawValue } @@ -370,6 +371,12 @@ extension CodeBuild { public var description: String { return self.rawValue } } + public enum WebhookScopeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case githubGlobal = "GITHUB_GLOBAL" + case githubOrganization = "GITHUB_ORGANIZATION" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct BatchDeleteBuildsInput: AWSEncodableShape { @@ -721,7 +728,7 @@ extension CodeBuild { public let sourceVersion: String? /// When the build process started, expressed in Unix time format. public let startTime: Date? - /// How long, in minutes, for CodeBuild to wait before timing out this build if it does not get marked as completed. + /// How long, in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before timing out this build if it does not get marked as completed. public let timeoutInMinutes: Int? /// If your CodeBuild project accesses resources in an Amazon VPC, you provide this parameter that identifies the VPC ID and the list of security group IDs and subnet IDs. The security groups and subnets must belong to the same VPC. You must provide at least one security group and one subnet ID. public let vpcConfig: VpcConfig? @@ -1237,27 +1244,33 @@ extension CodeBuild { public let computeType: ComputeType /// The environment type of the compute fleet. The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo). The environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai). For more information, see Build environment compute types in the CodeBuild user guide. public let environmentType: EnvironmentType + /// The service role associated with the compute fleet. For more information, see Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide. + public let fleetServiceRole: String? /// The name of the compute fleet. public let name: String - /// The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. + /// The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see Example policy statement to allow CodeBuild access to Amazon Web Services services required to create a VPC network interface. public let overflowBehavior: FleetOverflowBehavior? /// The scaling configuration of the compute fleet. public let scalingConfiguration: ScalingConfigurationInput? /// A list of tag key and value pairs associated with this compute fleet. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. public let tags: [Tag]? + public let vpcConfig: VpcConfig? - public init(baseCapacity: Int, computeType: ComputeType, environmentType: EnvironmentType, name: String, overflowBehavior: FleetOverflowBehavior? = nil, scalingConfiguration: ScalingConfigurationInput? = nil, tags: [Tag]? = nil) { + public init(baseCapacity: Int, computeType: ComputeType, environmentType: EnvironmentType, fleetServiceRole: String? = nil, name: String, overflowBehavior: FleetOverflowBehavior? = nil, scalingConfiguration: ScalingConfigurationInput? = nil, tags: [Tag]? = nil, vpcConfig: VpcConfig? = nil) { self.baseCapacity = baseCapacity self.computeType = computeType self.environmentType = environmentType + self.fleetServiceRole = fleetServiceRole self.name = name self.overflowBehavior = overflowBehavior self.scalingConfiguration = scalingConfiguration self.tags = tags + self.vpcConfig = vpcConfig } public func validate(name: String) throws { try self.validate(self.baseCapacity, name: "baseCapacity", parent: name, min: 1) + try self.validate(self.fleetServiceRole, name: "fleetServiceRole", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, max: 128) try self.validate(self.name, name: "name", parent: name, min: 2) try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,127}$") @@ -1266,16 +1279,19 @@ extension CodeBuild { try $0.validate(name: "\(name).tags[]") } try self.validate(self.tags, name: "tags", parent: name, max: 50) + try self.vpcConfig?.validate(name: "\(name).vpcConfig") } private enum CodingKeys: String, CodingKey { case baseCapacity = "baseCapacity" case computeType = "computeType" case environmentType = "environmentType" + case fleetServiceRole = "fleetServiceRole" case name = "name" case overflowBehavior = "overflowBehavior" case scalingConfiguration = "scalingConfiguration" case tags = "tags" + case vpcConfig = "vpcConfig" } } @@ -1327,13 +1343,13 @@ extension CodeBuild { public let serviceRole: String /// Information about the build input source code for the build project. public let source: ProjectSource - /// A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of: For CodeCommit: the commit ID, branch, or Git tag to use. For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of the object that represents the build input ZIP file to use. If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level). For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. + /// A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of: For CodeCommit: the commit ID, branch, or Git tag to use. For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For GitLab: the commit ID, branch, or Git tag to use. For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of the object that represents the build input ZIP file to use. If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level). For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. public let sourceVersion: String? /// A list of tag key and value pairs associated with this build project. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. public let tags: [Tag]? - /// How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes. + /// How long, in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes. public let timeoutInMinutes: Int? - /// VpcConfig enables CodeBuild to access resources in an Amazon VPC. + /// VpcConfig enables CodeBuild to access resources in an Amazon VPC. If you're using compute fleets during project creation, do not provide vpcConfig. public let vpcConfig: VpcConfig? public init(artifacts: ProjectArtifacts, badgeEnabled: Bool? = nil, buildBatchConfig: ProjectBuildBatchConfig? = nil, cache: ProjectCache? = nil, concurrentBuildLimit: Int? = nil, description: String? = nil, encryptionKey: String? = nil, environment: ProjectEnvironment, fileSystemLocations: [ProjectFileSystemLocation]? = nil, logsConfig: LogsConfig? = nil, name: String, queuedTimeoutInMinutes: Int? = nil, secondaryArtifacts: [ProjectArtifacts]? = nil, secondarySources: [ProjectSource]? = nil, secondarySourceVersions: [ProjectSourceVersion]? = nil, serviceRole: String, source: ProjectSource, sourceVersion: String? = nil, tags: [Tag]? = nil, timeoutInMinutes: Int? = nil, vpcConfig: VpcConfig? = nil) { @@ -1365,9 +1381,9 @@ extension CodeBuild { try self.validate(self.description, name: "description", parent: name, max: 255) try self.validate(self.encryptionKey, name: "encryptionKey", parent: name, min: 1) try self.environment.validate(name: "\(name).environment") - try self.validate(self.name, name: "name", parent: name, max: 255) + try self.validate(self.name, name: "name", parent: name, max: 150) try self.validate(self.name, name: "name", parent: name, min: 2) - try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,254}$") + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,149}$") try self.validate(self.queuedTimeoutInMinutes, name: "queuedTimeoutInMinutes", parent: name, max: 480) try self.validate(self.queuedTimeoutInMinutes, name: "queuedTimeoutInMinutes", parent: name, min: 5) try self.validate(self.secondaryArtifacts, name: "secondaryArtifacts", parent: name, max: 12) @@ -1382,7 +1398,7 @@ extension CodeBuild { try $0.validate(name: "\(name).tags[]") } try self.validate(self.tags, name: "tags", parent: name, max: 50) - try self.validate(self.timeoutInMinutes, name: "timeoutInMinutes", parent: name, max: 480) + try self.validate(self.timeoutInMinutes, name: "timeoutInMinutes", parent: name, max: 2160) try self.validate(self.timeoutInMinutes, name: "timeoutInMinutes", parent: name, min: 5) try self.vpcConfig?.validate(name: "\(name).vpcConfig") } @@ -1480,27 +1496,35 @@ extension CodeBuild { public let buildType: WebhookBuildType? /// An array of arrays of WebhookFilter objects used to determine which webhooks are triggered. At least one WebhookFilter in the array must specify EVENT as its type. For a build to be triggered, at least one filter group in the filterGroups array must pass. For a filter group to pass, each of its filters must pass. public let filterGroups: [[WebhookFilter]]? + /// If manualCreation is true, CodeBuild doesn't create a webhook in GitHub and instead returns payloadUrl and secret values for the webhook. The payloadUrl and secret values in the output can be used to manually create a webhook within GitHub. manualCreation is only available for GitHub webhooks. + public let manualCreation: Bool? /// The name of the CodeBuild project. public let projectName: String + /// The scope configuration for global or organization webhooks. Global or organization webhooks are only available for GitHub and Github Enterprise webhooks. + public let scopeConfiguration: ScopeConfiguration? - public init(branchFilter: String? = nil, buildType: WebhookBuildType? = nil, filterGroups: [[WebhookFilter]]? = nil, projectName: String) { + public init(branchFilter: String? = nil, buildType: WebhookBuildType? = nil, filterGroups: [[WebhookFilter]]? = nil, manualCreation: Bool? = nil, projectName: String, scopeConfiguration: ScopeConfiguration? = nil) { self.branchFilter = branchFilter self.buildType = buildType self.filterGroups = filterGroups + self.manualCreation = manualCreation self.projectName = projectName + self.scopeConfiguration = scopeConfiguration } public func validate(name: String) throws { - try self.validate(self.projectName, name: "projectName", parent: name, max: 255) + try self.validate(self.projectName, name: "projectName", parent: name, max: 150) try self.validate(self.projectName, name: "projectName", parent: name, min: 2) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,254}$") + try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,149}$") } private enum CodingKeys: String, CodingKey { case branchFilter = "branchFilter" case buildType = "buildType" case filterGroups = "filterGroups" + case manualCreation = "manualCreation" case projectName = "projectName" + case scopeConfiguration = "scopeConfiguration" } } @@ -1720,9 +1744,9 @@ extension CodeBuild { } public func validate(name: String) throws { - try self.validate(self.projectName, name: "projectName", parent: name, max: 255) + try self.validate(self.projectName, name: "projectName", parent: name, max: 150) try self.validate(self.projectName, name: "projectName", parent: name, min: 2) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,254}$") + try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,149}$") } private enum CodingKeys: String, CodingKey { @@ -1953,13 +1977,15 @@ extension CodeBuild { public let created: Date? /// The environment type of the compute fleet. The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo). The environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai). For more information, see Build environment compute types in the CodeBuild user guide. public let environmentType: EnvironmentType? + /// The service role associated with the compute fleet. For more information, see Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide. + public let fleetServiceRole: String? /// The ID of the compute fleet. public let id: String? /// The time at which the compute fleet was last modified. public let lastModified: Date? /// The name of the compute fleet. public let name: String? - /// The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. + /// The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see Example policy statement to allow CodeBuild access to Amazon Web Services services required to create a VPC network interface. public let overflowBehavior: FleetOverflowBehavior? /// The scaling configuration of the compute fleet. public let scalingConfiguration: ScalingConfigurationOutput? @@ -1967,13 +1993,15 @@ extension CodeBuild { public let status: FleetStatus? /// A list of tag key and value pairs associated with this compute fleet. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. public let tags: [Tag]? + public let vpcConfig: VpcConfig? - public init(arn: String? = nil, baseCapacity: Int? = nil, computeType: ComputeType? = nil, created: Date? = nil, environmentType: EnvironmentType? = nil, id: String? = nil, lastModified: Date? = nil, name: String? = nil, overflowBehavior: FleetOverflowBehavior? = nil, scalingConfiguration: ScalingConfigurationOutput? = nil, status: FleetStatus? = nil, tags: [Tag]? = nil) { + public init(arn: String? = nil, baseCapacity: Int? = nil, computeType: ComputeType? = nil, created: Date? = nil, environmentType: EnvironmentType? = nil, fleetServiceRole: String? = nil, id: String? = nil, lastModified: Date? = nil, name: String? = nil, overflowBehavior: FleetOverflowBehavior? = nil, scalingConfiguration: ScalingConfigurationOutput? = nil, status: FleetStatus? = nil, tags: [Tag]? = nil, vpcConfig: VpcConfig? = nil) { self.arn = arn self.baseCapacity = baseCapacity self.computeType = computeType self.created = created self.environmentType = environmentType + self.fleetServiceRole = fleetServiceRole self.id = id self.lastModified = lastModified self.name = name @@ -1981,6 +2009,7 @@ extension CodeBuild { self.scalingConfiguration = scalingConfiguration self.status = status self.tags = tags + self.vpcConfig = vpcConfig } private enum CodingKeys: String, CodingKey { @@ -1989,6 +2018,7 @@ extension CodeBuild { case computeType = "computeType" case created = "created" case environmentType = "environmentType" + case fleetServiceRole = "fleetServiceRole" case id = "id" case lastModified = "lastModified" case name = "name" @@ -1996,6 +2026,7 @@ extension CodeBuild { case scalingConfiguration = "scalingConfiguration" case status = "status" case tags = "tags" + case vpcConfig = "vpcConfig" } } @@ -2108,13 +2139,13 @@ extension CodeBuild { } public struct ImportSourceCredentialsInput: AWSEncodableShape { - /// The type of authentication used to connect to a GitHub, GitHub Enterprise, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the CodeBuild console. + /// The type of authentication used to connect to a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the CodeBuild console. Note that CODECONNECTIONS is only valid for GitLab and GitLab Self Managed. public let authType: AuthType /// The source provider used for this project. public let serverType: ServerType /// Set to false to prevent overwriting the repository source credentials. Set to true to overwrite the repository source credentials. The default value is true. public let shouldOverwrite: Bool? - /// For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. + /// For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the authType CODECONNECTIONS, this is the connectionArn. public let token: String /// The Bitbucket username when the authType is BASIC_AUTH. This parameter is not valid for other types of source providers or connections. public let username: String? @@ -2851,11 +2882,11 @@ extension CodeBuild { public let serviceRole: String? /// Information about the build input source code for this build project. public let source: ProjectSource? - /// A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of: For CodeCommit: the commit ID, branch, or Git tag to use. For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of the object that represents the build input ZIP file to use. If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level). For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. + /// A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of: For CodeCommit: the commit ID, branch, or Git tag to use. For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For GitLab: the commit ID, branch, or Git tag to use. For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of the object that represents the build input ZIP file to use. If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level). For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. public let sourceVersion: String? /// A list of tag key and value pairs associated with this build project. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. public let tags: [Tag]? - /// How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed. The default is 60 minutes. + /// How long, in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed. The default is 60 minutes. public let timeoutInMinutes: Int? /// Information about the VPC configuration that CodeBuild accesses. public let vpcConfig: VpcConfig? @@ -3197,7 +3228,7 @@ extension CodeBuild { public struct ProjectSourceVersion: AWSEncodableShape & AWSDecodableShape { /// An identifier for a source in the build project. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length. public let sourceIdentifier: String - /// The source version for the corresponding source identifier. If specified, must be one of: For CodeCommit: the commit ID, branch, or Git tag to use. For GitHub or GitLab: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of the object that represents the build input ZIP file to use. For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. + /// The source version for the corresponding source identifier. If specified, must be one of: For CodeCommit: the commit ID, branch, or Git tag to use. For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example, pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For GitLab: the commit ID, branch, or Git tag to use. For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of the object that represents the build input ZIP file to use. For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. public let sourceVersion: String public init(sourceIdentifier: String, sourceVersion: String) { @@ -3640,6 +3671,27 @@ extension CodeBuild { } } + public struct ScopeConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The domain of the GitHub Enterprise organization. Note that this parameter is only required if your project's source type is GITHUB_ENTERPRISE + public let domain: String? + /// The name of either the enterprise or organization that will send webhook events to CodeBuild, depending on if the webhook is a global or organization webhook respectively. + public let name: String + /// The type of scope for a GitHub webhook. + public let scope: WebhookScopeType + + public init(domain: String? = nil, name: String, scope: WebhookScopeType) { + self.domain = domain + self.name = name + self.scope = scope + } + + private enum CodingKeys: String, CodingKey { + case domain = "domain" + case name = "name" + case scope = "scope" + } + } + public struct SourceAuth: AWSEncodableShape & AWSDecodableShape { /// The resource value that applies to the specified authorization type. public let resource: String? @@ -3782,7 +3834,7 @@ extension CodeBuild { public func validate(name: String) throws { try self.buildBatchConfigOverride?.validate(name: "\(name).buildBatchConfigOverride") - try self.validate(self.buildTimeoutInMinutesOverride, name: "buildTimeoutInMinutesOverride", parent: name, max: 480) + try self.validate(self.buildTimeoutInMinutesOverride, name: "buildTimeoutInMinutesOverride", parent: name, max: 2160) try self.validate(self.buildTimeoutInMinutesOverride, name: "buildTimeoutInMinutesOverride", parent: name, min: 5) try self.validate(self.encryptionKeyOverride, name: "encryptionKeyOverride", parent: name, min: 1) try self.environmentVariablesOverride?.forEach { @@ -3907,15 +3959,15 @@ extension CodeBuild { public let secondarySourcesVersionOverride: [ProjectSourceVersion]? /// The name of a service role for this build that overrides the one specified in the build project. public let serviceRoleOverride: String? - /// An authorization type for this build that overrides the one defined in the build project. This override applies only if the build project's source is BitBucket or GitHub. + /// An authorization type for this build that overrides the one defined in the build project. This override applies only if the build project's source is BitBucket, GitHub, GitLab, or GitLab Self Managed. public let sourceAuthOverride: SourceAuth? /// A location that overrides, for this build, the source location for the one defined in the build project. public let sourceLocationOverride: String? /// A source input type, for this build, that overrides the source input defined in the build project. public let sourceTypeOverride: SourceType? - /// The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider: CodeCommit The commit ID, branch, or Git tag to use. GitHub The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. Bitbucket The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. Amazon S3 The version ID of the object that represents the build input ZIP file to use. If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence. For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. + /// The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider: CodeCommit The commit ID, branch, or Git tag to use. GitHub The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. GitLab The commit ID, branch, or Git tag to use. Bitbucket The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. Amazon S3 The version ID of the object that represents the build input ZIP file to use. If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence. For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. public let sourceVersion: String? - /// The number of build timeout minutes, from 5 to 480 (8 hours), that overrides, for this build only, the latest setting already defined in the build project. + /// The number of build timeout minutes, from 5 to 2160 (36 hours), that overrides, for this build only, the latest setting already defined in the build project. public let timeoutInMinutesOverride: Int? public init(artifactsOverride: ProjectArtifacts? = nil, buildspecOverride: String? = nil, buildStatusConfigOverride: BuildStatusConfig? = nil, cacheOverride: ProjectCache? = nil, certificateOverride: String? = nil, computeTypeOverride: ComputeType? = nil, debugSessionEnabled: Bool? = nil, encryptionKeyOverride: String? = nil, environmentTypeOverride: EnvironmentType? = nil, environmentVariablesOverride: [EnvironmentVariable]? = nil, fleetOverride: ProjectFleet? = nil, gitCloneDepthOverride: Int? = nil, gitSubmodulesConfigOverride: GitSubmodulesConfig? = nil, idempotencyToken: String? = nil, imageOverride: String? = nil, imagePullCredentialsTypeOverride: ImagePullCredentialsType? = nil, insecureSslOverride: Bool? = nil, logsConfigOverride: LogsConfig? = nil, privilegedModeOverride: Bool? = nil, projectName: String, queuedTimeoutInMinutesOverride: Int? = nil, registryCredentialOverride: RegistryCredential? = nil, reportBuildStatusOverride: Bool? = nil, secondaryArtifactsOverride: [ProjectArtifacts]? = nil, secondarySourcesOverride: [ProjectSource]? = nil, secondarySourcesVersionOverride: [ProjectSourceVersion]? = nil, serviceRoleOverride: String? = nil, sourceAuthOverride: SourceAuth? = nil, sourceLocationOverride: String? = nil, sourceTypeOverride: SourceType? = nil, sourceVersion: String? = nil, timeoutInMinutesOverride: Int? = nil) { @@ -3971,7 +4023,7 @@ extension CodeBuild { try self.validate(self.secondarySourcesOverride, name: "secondarySourcesOverride", parent: name, max: 12) try self.validate(self.secondarySourcesVersionOverride, name: "secondarySourcesVersionOverride", parent: name, max: 12) try self.validate(self.serviceRoleOverride, name: "serviceRoleOverride", parent: name, min: 1) - try self.validate(self.timeoutInMinutesOverride, name: "timeoutInMinutesOverride", parent: name, max: 480) + try self.validate(self.timeoutInMinutesOverride, name: "timeoutInMinutesOverride", parent: name, max: 2160) try self.validate(self.timeoutInMinutesOverride, name: "timeoutInMinutesOverride", parent: name, min: 5) } @@ -4213,31 +4265,38 @@ extension CodeBuild { public let computeType: ComputeType? /// The environment type of the compute fleet. The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo). The environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai). For more information, see Build environment compute types in the CodeBuild user guide. public let environmentType: EnvironmentType? - /// The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. + /// The service role associated with the compute fleet. For more information, see Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide. + public let fleetServiceRole: String? + /// The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see Example policy statement to allow CodeBuild access to Amazon Web Services services required to create a VPC network interface. public let overflowBehavior: FleetOverflowBehavior? /// The scaling configuration of the compute fleet. public let scalingConfiguration: ScalingConfigurationInput? /// A list of tag key and value pairs associated with this compute fleet. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. public let tags: [Tag]? + public let vpcConfig: VpcConfig? - public init(arn: String, baseCapacity: Int? = nil, computeType: ComputeType? = nil, environmentType: EnvironmentType? = nil, overflowBehavior: FleetOverflowBehavior? = nil, scalingConfiguration: ScalingConfigurationInput? = nil, tags: [Tag]? = nil) { + public init(arn: String, baseCapacity: Int? = nil, computeType: ComputeType? = nil, environmentType: EnvironmentType? = nil, fleetServiceRole: String? = nil, overflowBehavior: FleetOverflowBehavior? = nil, scalingConfiguration: ScalingConfigurationInput? = nil, tags: [Tag]? = nil, vpcConfig: VpcConfig? = nil) { self.arn = arn self.baseCapacity = baseCapacity self.computeType = computeType self.environmentType = environmentType + self.fleetServiceRole = fleetServiceRole self.overflowBehavior = overflowBehavior self.scalingConfiguration = scalingConfiguration self.tags = tags + self.vpcConfig = vpcConfig } public func validate(name: String) throws { try self.validate(self.arn, name: "arn", parent: name, min: 1) try self.validate(self.baseCapacity, name: "baseCapacity", parent: name, min: 1) + try self.validate(self.fleetServiceRole, name: "fleetServiceRole", parent: name, min: 1) try self.scalingConfiguration?.validate(name: "\(name).scalingConfiguration") try self.tags?.forEach { try $0.validate(name: "\(name).tags[]") } try self.validate(self.tags, name: "tags", parent: name, max: 50) + try self.vpcConfig?.validate(name: "\(name).vpcConfig") } private enum CodingKeys: String, CodingKey { @@ -4245,9 +4304,11 @@ extension CodeBuild { case baseCapacity = "baseCapacity" case computeType = "computeType" case environmentType = "environmentType" + case fleetServiceRole = "fleetServiceRole" case overflowBehavior = "overflowBehavior" case scalingConfiguration = "scalingConfiguration" case tags = "tags" + case vpcConfig = "vpcConfig" } } @@ -4298,11 +4359,11 @@ extension CodeBuild { public let serviceRole: String? /// Information to be changed about the build input source code for the build project. public let source: ProjectSource? - /// A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of: For CodeCommit: the commit ID, branch, or Git tag to use. For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of the object that represents the build input ZIP file to use. If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level). For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. + /// A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of: For CodeCommit: the commit ID, branch, or Git tag to use. For GitHub: the commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For GitLab: the commit ID, branch, or Git tag to use. For Bitbucket: the commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used. For Amazon S3: the version ID of the object that represents the build input ZIP file to use. If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level). For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide. public let sourceVersion: String? /// An updated list of tag key and value pairs associated with this build project. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. public let tags: [Tag]? - /// The replacement value in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed. + /// The replacement value in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed. public let timeoutInMinutes: Int? /// VpcConfig enables CodeBuild to access resources in an Amazon VPC. public let vpcConfig: VpcConfig? @@ -4351,7 +4412,7 @@ extension CodeBuild { try $0.validate(name: "\(name).tags[]") } try self.validate(self.tags, name: "tags", parent: name, max: 50) - try self.validate(self.timeoutInMinutes, name: "timeoutInMinutes", parent: name, max: 480) + try self.validate(self.timeoutInMinutes, name: "timeoutInMinutes", parent: name, max: 2160) try self.validate(self.timeoutInMinutes, name: "timeoutInMinutes", parent: name, min: 5) try self.vpcConfig?.validate(name: "\(name).vpcConfig") } @@ -4503,9 +4564,9 @@ extension CodeBuild { } public func validate(name: String) throws { - try self.validate(self.projectName, name: "projectName", parent: name, max: 255) + try self.validate(self.projectName, name: "projectName", parent: name, max: 150) try self.validate(self.projectName, name: "projectName", parent: name, min: 2) - try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,254}$") + try self.validate(self.projectName, name: "projectName", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,149}$") } private enum CodingKeys: String, CodingKey { @@ -4572,19 +4633,25 @@ extension CodeBuild { public let filterGroups: [[WebhookFilter]]? /// A timestamp that indicates the last time a repository's secret token was modified. public let lastModifiedSecret: Date? + /// If manualCreation is true, CodeBuild doesn't create a webhook in GitHub and instead returns payloadUrl and secret values for the webhook. The payloadUrl and secret values in the output can be used to manually create a webhook within GitHub. manualCreation is only available for GitHub webhooks. + public let manualCreation: Bool? /// The CodeBuild endpoint where webhook events are sent. public let payloadUrl: String? + /// The scope configuration for global or organization webhooks. Global or organization webhooks are only available for GitHub and Github Enterprise webhooks. + public let scopeConfiguration: ScopeConfiguration? /// The secret token of the associated repository. A Bitbucket webhook does not support secret. public let secret: String? /// The URL to the webhook. public let url: String? - public init(branchFilter: String? = nil, buildType: WebhookBuildType? = nil, filterGroups: [[WebhookFilter]]? = nil, lastModifiedSecret: Date? = nil, payloadUrl: String? = nil, secret: String? = nil, url: String? = nil) { + public init(branchFilter: String? = nil, buildType: WebhookBuildType? = nil, filterGroups: [[WebhookFilter]]? = nil, lastModifiedSecret: Date? = nil, manualCreation: Bool? = nil, payloadUrl: String? = nil, scopeConfiguration: ScopeConfiguration? = nil, secret: String? = nil, url: String? = nil) { self.branchFilter = branchFilter self.buildType = buildType self.filterGroups = filterGroups self.lastModifiedSecret = lastModifiedSecret + self.manualCreation = manualCreation self.payloadUrl = payloadUrl + self.scopeConfiguration = scopeConfiguration self.secret = secret self.url = url } @@ -4594,7 +4661,9 @@ extension CodeBuild { case buildType = "buildType" case filterGroups = "filterGroups" case lastModifiedSecret = "lastModifiedSecret" + case manualCreation = "manualCreation" case payloadUrl = "payloadUrl" + case scopeConfiguration = "scopeConfiguration" case secret = "secret" case url = "url" } @@ -4605,7 +4674,7 @@ extension CodeBuild { public let excludeMatchedPattern: Bool? /// For a WebHookFilter that uses EVENT type, a comma-separated string that specifies one or more events. For example, the webhook filter PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED allows all push, pull request created, and pull request updated events to trigger a build. For a WebHookFilter that uses any of the other filter types, a regular expression pattern. For example, a WebHookFilter that uses HEAD_REF for its type and the pattern ^refs/heads/ triggers a build when the head reference is a branch with a reference name refs/heads/branch-name. public let pattern: String - /// The type of webhook filter. There are nine webhook filter types: EVENT, ACTOR_ACCOUNT_ID, HEAD_REF, BASE_REF, FILE_PATH, COMMIT_MESSAGE, TAG_NAME, RELEASE_NAME, and WORKFLOW_NAME. EVENT A webhook event triggers a build when the provided pattern matches one of nine event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, PULL_REQUEST_CLOSED, PULL_REQUEST_REOPENED, PULL_REQUEST_MERGED, RELEASED, PRERELEASED, and WORKFLOW_JOB_QUEUED. The EVENT patterns are specified as a comma-separated string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull request created, and pull request updated events. The PULL_REQUEST_REOPENED works with GitHub and GitHub Enterprise only. The RELEASED, PRERELEASED, and WORKFLOW_JOB_QUEUED work with GitHub only. ACTOR_ACCOUNT_ID A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression pattern. HEAD_REF A webhook event triggers a build when the head reference matches the regular expression pattern. For example, refs/heads/branch-name and refs/tags/tag-name. Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events. BASE_REF A webhook event triggers a build when the base reference matches the regular expression pattern. For example, refs/heads/branch-name. Works with pull request events only. FILE_PATH A webhook triggers a build when the path of a changed file matches the regular expression pattern. Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events. COMMIT_MESSAGE A webhook triggers a build when the head commit message matches the regular expression pattern. Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events. TAG_NAME A webhook triggers a build when the tag name of the release matches the regular expression pattern. Works with RELEASED and PRERELEASED events only. RELEASE_NAME A webhook triggers a build when the release name matches the regular expression pattern. Works with RELEASED and PRERELEASED events only. WORKFLOW_NAME A webhook triggers a build when the workflow name matches the regular expression pattern. Works with WORKFLOW_JOB_QUEUED events only. + /// The type of webhook filter. There are nine webhook filter types: EVENT, ACTOR_ACCOUNT_ID, HEAD_REF, BASE_REF, FILE_PATH, COMMIT_MESSAGE, TAG_NAME, RELEASE_NAME, and WORKFLOW_NAME. EVENT A webhook event triggers a build when the provided pattern matches one of nine event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, PULL_REQUEST_CLOSED, PULL_REQUEST_REOPENED, PULL_REQUEST_MERGED, RELEASED, PRERELEASED, and WORKFLOW_JOB_QUEUED. The EVENT patterns are specified as a comma-separated string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull request created, and pull request updated events. Types PULL_REQUEST_REOPENED and WORKFLOW_JOB_QUEUED work with GitHub and GitHub Enterprise only. Types RELEASED and PRERELEASED work with GitHub only. ACTOR_ACCOUNT_ID A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression pattern. HEAD_REF A webhook event triggers a build when the head reference matches the regular expression pattern. For example, refs/heads/branch-name and refs/tags/tag-name. Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events. BASE_REF A webhook event triggers a build when the base reference matches the regular expression pattern. For example, refs/heads/branch-name. Works with pull request events only. FILE_PATH A webhook triggers a build when the path of a changed file matches the regular expression pattern. Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events. COMMIT_MESSAGE A webhook triggers a build when the head commit message matches the regular expression pattern. Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events. TAG_NAME A webhook triggers a build when the tag name of the release matches the regular expression pattern. Works with RELEASED and PRERELEASED events only. RELEASE_NAME A webhook triggers a build when the release name matches the regular expression pattern. Works with RELEASED and PRERELEASED events only. REPOSITORY_NAME A webhook triggers a build when the repository name matches the regular expression pattern. Works with GitHub global or organization webhooks only. WORKFLOW_NAME A webhook triggers a build when the workflow name matches the regular expression pattern. Works with WORKFLOW_JOB_QUEUED events only. public let type: WebhookFilterType public init(excludeMatchedPattern: Bool? = nil, pattern: String, type: WebhookFilterType) { diff --git a/Sources/Soto/Services/CodeGuruSecurity/CodeGuruSecurity_api.swift b/Sources/Soto/Services/CodeGuruSecurity/CodeGuruSecurity_api.swift index 92732c7079..55e09e4018 100644 --- a/Sources/Soto/Services/CodeGuruSecurity/CodeGuruSecurity_api.swift +++ b/Sources/Soto/Services/CodeGuruSecurity/CodeGuruSecurity_api.swift @@ -73,7 +73,7 @@ public struct CodeGuruSecurity: AWSService { // MARK: API Calls - /// Returns a list of all requested findings. + /// Returns a list of requested findings from standard scans. @Sendable public func batchGetFindings(_ input: BatchGetFindingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchGetFindingsResponse { return try await self.client.execute( @@ -86,7 +86,7 @@ public struct CodeGuruSecurity: AWSService { ) } - /// Use to create a scan using code uploaded to an S3 bucket. + /// Use to create a scan using code uploaded to an Amazon S3 bucket. @Sendable public func createScan(_ input: CreateScanRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateScanResponse { return try await self.client.execute( @@ -99,7 +99,7 @@ public struct CodeGuruSecurity: AWSService { ) } - /// Generates a pre-signed URL and request headers used to upload a code resource. You can upload your code resource to the URL and add the request headers using any HTTP client. + /// Generates a pre-signed URL, request headers used to upload a code resource, and code artifact identifier for the uploaded resource. You can upload your code resource to the URL with the request headers using any HTTP client. @Sendable public func createUploadUrl(_ input: CreateUploadUrlRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateUploadUrlResponse { return try await self.client.execute( @@ -112,7 +112,7 @@ public struct CodeGuruSecurity: AWSService { ) } - /// Use to get account level configuration. + /// Use to get the encryption configuration for an account. @Sendable public func getAccountConfiguration(_ input: GetAccountConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAccountConfigurationResponse { return try await self.client.execute( @@ -138,7 +138,7 @@ public struct CodeGuruSecurity: AWSService { ) } - /// Returns top level metrics about an account from a specified date, including number of open findings, the categories with most findings, the scans with most open findings, and scans with most open critical findings. + /// Returns a summary of metrics for an account from a specified date, including number of open findings, the categories with most findings, the scans with most open findings, and scans with most open critical findings. @Sendable public func getMetricsSummary(_ input: GetMetricsSummaryRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetMetricsSummaryResponse { return try await self.client.execute( @@ -177,7 +177,7 @@ public struct CodeGuruSecurity: AWSService { ) } - /// Returns a list of all the standard scans in an account. Does not return express scans. + /// Returns a list of all scans in an account. Does not return EXPRESS scans. @Sendable public func listScans(_ input: ListScansRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListScansResponse { return try await self.client.execute( @@ -229,7 +229,7 @@ public struct CodeGuruSecurity: AWSService { ) } - /// Use to update account-level configuration with an encryption key. + /// Use to update the encryption configuration for an account. @Sendable public func updateAccountConfiguration(_ input: UpdateAccountConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateAccountConfigurationResponse { return try await self.client.execute( @@ -294,7 +294,7 @@ extension CodeGuruSecurity { ) } - /// Returns a list of all the standard scans in an account. Does not return express scans. + /// Returns a list of all scans in an account. Does not return EXPRESS scans. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/CodeGuruSecurity/CodeGuruSecurity_shapes.swift b/Sources/Soto/Services/CodeGuruSecurity/CodeGuruSecurity_shapes.swift index 51c148186b..95dba79e7c 100644 --- a/Sources/Soto/Services/CodeGuruSecurity/CodeGuruSecurity_shapes.swift +++ b/Sources/Soto/Services/CodeGuruSecurity/CodeGuruSecurity_shapes.swift @@ -73,15 +73,15 @@ extension CodeGuruSecurity { // MARK: Shapes public struct AccountFindingsMetric: AWSDecodableShape { - /// The number of closed findings of each severity in an account on the specified date. + /// The number of closed findings of each severity on the specified date. public let closedFindings: FindingMetricsValuePerSeverity? - /// The date from which the finding metrics were retrieved. + /// The date from which the findings metrics were retrieved. public let date: Date? - /// The average time it takes to close findings of each severity in days. + /// The average time in days it takes to close findings of each severity as of a specified date. public let meanTimeToClose: FindingMetricsValuePerSeverity? - /// The number of new findings of each severity in account on the specified date. + /// The number of new findings of each severity on the specified date. public let newFindings: FindingMetricsValuePerSeverity? - /// The number of open findings of each severity in an account as of the specified date. + /// The number of open findings of each severity as of the specified date. public let openFindings: FindingMetricsValuePerSeverity? public init(closedFindings: FindingMetricsValuePerSeverity? = nil, date: Date? = nil, meanTimeToClose: FindingMetricsValuePerSeverity? = nil, newFindings: FindingMetricsValuePerSeverity? = nil, openFindings: FindingMetricsValuePerSeverity? = nil) { @@ -147,7 +147,7 @@ extension CodeGuruSecurity { public struct BatchGetFindingsResponse: AWSDecodableShape { /// A list of errors for individual findings which were not fetched. Each BatchGetFindingsError contains the scanName, findingId, errorCode and error message. public let failedFindings: [BatchGetFindingsError] - /// A list of all requested findings. + /// A list of all findings which were successfully fetched. public let findings: [Finding] public init(failedFindings: [BatchGetFindingsError], findings: [Finding]) { @@ -200,9 +200,9 @@ extension CodeGuruSecurity { public let analysisType: AnalysisType? /// The idempotency token for the request. Amazon CodeGuru Security uses this value to prevent the accidental creation of duplicate scans if there are failures and retries. public let clientToken: String? - /// The identifier for an input resource used to create a scan. + /// The identifier for the resource object to be scanned. public let resourceId: ResourceId - /// The unique name that CodeGuru Security uses to track revisions across multiple scans of the same resource. Only allowed for a STANDARD scan type. If not specified, it will be auto generated. + /// The unique name that CodeGuru Security uses to track revisions across multiple scans of the same resource. Only allowed for a STANDARD scan type. public let scanName: String /// The type of scan, either Standard or Express. Defaults to Standard type if missing. Express scans run on limited resources and use a limited set of detectors to analyze your code in near-real time. Standard scans have standard resource limits and use the full set of detectors to analyze your code. public let scanType: ScanType? @@ -293,11 +293,11 @@ extension CodeGuruSecurity { } public struct CreateUploadUrlResponse: AWSDecodableShape { - /// The identifier for the uploaded code resource. + /// The identifier for the uploaded code resource. Pass this to CreateScan to use the uploaded resources. public let codeArtifactId: String /// A set of key-value pairs that contain the required headers when uploading your resource. public let requestHeaders: [String: String] - /// A pre-signed S3 URL. You can upload the code file you want to scan and add the required requestHeaders using any HTTP client. + /// A pre-signed S3 URL. You can upload the code file you want to scan with the required requestHeaders using any HTTP client. public let s3Url: String public init(codeArtifactId: String, requestHeaders: [String: String], s3Url: String) { @@ -314,7 +314,7 @@ extension CodeGuruSecurity { } public struct EncryptionConfig: AWSEncodableShape & AWSDecodableShape { - /// The KMS key ARN to use for encryption. This must be provided as a header when uploading your code resource. + /// The KMS key ARN that is used for encryption. If an AWS-managed key is used for encryption, returns empty. public let kmsKeyArn: String? public init(kmsKeyArn: String? = nil) { @@ -372,7 +372,7 @@ extension CodeGuruSecurity { public let detectorName: String? /// One or more tags or categorizations that are associated with a detector. These tags are defined by type, programming language, or other classification such as maintainability or consistency. public let detectorTags: [String]? - /// The identifier for the component that generated a finding such as AWSCodeGuruSecurity or AWSInspector. + /// The identifier for the component that generated a finding such as AmazonCodeGuruSecurity. public let generatorId: String? /// The identifier for a finding. public let id: String? @@ -382,7 +382,7 @@ extension CodeGuruSecurity { public let resource: Resource? /// The identifier for the rule that generated the finding. public let ruleId: String? - /// The severity of the finding. + /// The severity of the finding. Severity can be critical, high, medium, low, or informational. For information on severity levels, see Finding severity in the Amazon CodeGuru Security User Guide. public let severity: Severity? /// The status of the finding. A finding status can be open or closed. public let status: Status? @@ -452,15 +452,15 @@ extension CodeGuruSecurity { } public struct FindingMetricsValuePerSeverity: AWSDecodableShape { - /// The severity of the finding is critical and should be addressed immediately. + /// A numeric value corresponding to a critical finding. public let critical: Double? - /// The severity of the finding is high and should be addressed as a near-term priority. + /// A numeric value corresponding to a high severity finding. public let high: Double? - /// The finding is related to quality or readability improvements and not considered actionable. + /// A numeric value corresponding to an informational finding. public let info: Double? - /// The severity of the finding is low and does require action on its own. + /// A numeric value corresponding to a low severity finding. public let low: Double? - /// The severity of the finding is medium and should be addressed as a mid-term priority. + /// A numeric value corresponding to a medium severity finding. public let medium: Double? public init(critical: Double? = nil, high: Double? = nil, info: Double? = nil, low: Double? = nil, medium: Double? = nil) { @@ -485,7 +485,7 @@ extension CodeGuruSecurity { } public struct GetAccountConfigurationResponse: AWSDecodableShape { - /// An EncryptionConfig object that contains the KMS key ARN to use for encryption. By default, CodeGuru Security uses an AWS-managed key for encryption. To specify your own key, call UpdateAccountConfiguration. + /// An EncryptionConfig object that contains the KMS key ARN that is used for encryption. By default, CodeGuru Security uses an AWS-managed key for encryption. To specify your own key, call UpdateAccountConfiguration. If you do not specify a customer-managed key, returns empty. public let encryptionConfig: EncryptionConfig public init(encryptionConfig: EncryptionConfig) { @@ -498,7 +498,7 @@ extension CodeGuruSecurity { } public struct GetFindingsRequest: AWSEncodableShape { - /// The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results. + /// The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results. If not specified, returns 1000 results. public let maxResults: Int? /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page. public let nextToken: String? @@ -553,7 +553,7 @@ extension CodeGuruSecurity { } public struct GetMetricsSummaryRequest: AWSEncodableShape { - /// The date you want to retrieve summary metrics from, rounded to the nearest day. The date must be within the past two years since metrics data is only stored for two years. If a date outside of this range is passed, the response will be empty. + /// The date you want to retrieve summary metrics from, rounded to the nearest day. The date must be within the past two years. public let date: Date public init(date: Date) { @@ -615,6 +615,8 @@ extension CodeGuruSecurity { public let analysisType: AnalysisType /// The time the scan was created. public let createdAt: Date + /// Details about the error that causes a scan to fail to be retrieved. + public let errorMessage: String? /// The number of times a scan has been re-run on a revised resource. public let numberOfRevisions: Int64? /// UUID that identifies the individual scan run. @@ -623,14 +625,15 @@ extension CodeGuruSecurity { public let scanName: String /// The ARN for the scan name. public let scanNameArn: String? - /// The current state of the scan. Pass either InProgress, Successful, or Failed. + /// The current state of the scan. Returns either InProgress, Successful, or Failed. public let scanState: ScanState /// The time when the scan was last updated. Only available for STANDARD scan types. public let updatedAt: Date? - public init(analysisType: AnalysisType, createdAt: Date, numberOfRevisions: Int64? = nil, runId: String, scanName: String, scanNameArn: String? = nil, scanState: ScanState, updatedAt: Date? = nil) { + public init(analysisType: AnalysisType, createdAt: Date, errorMessage: String? = nil, numberOfRevisions: Int64? = nil, runId: String, scanName: String, scanNameArn: String? = nil, scanState: ScanState, updatedAt: Date? = nil) { self.analysisType = analysisType self.createdAt = createdAt + self.errorMessage = errorMessage self.numberOfRevisions = numberOfRevisions self.runId = runId self.scanName = scanName @@ -642,6 +645,7 @@ extension CodeGuruSecurity { private enum CodingKeys: String, CodingKey { case analysisType = "analysisType" case createdAt = "createdAt" + case errorMessage = "errorMessage" case numberOfRevisions = "numberOfRevisions" case runId = "runId" case scanName = "scanName" @@ -652,13 +656,13 @@ extension CodeGuruSecurity { } public struct ListFindingsMetricsRequest: AWSEncodableShape { - /// The end date of the interval which you want to retrieve metrics from. + /// The end date of the interval which you want to retrieve metrics from. Round to the nearest day. public let endDate: Date - /// The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results. + /// The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results. If not specified, returns 1000 results. public let maxResults: Int? /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page. public let nextToken: String? - /// The start date of the interval which you want to retrieve metrics from. + /// The start date of the interval which you want to retrieve metrics from. Rounds to the nearest day. public let startDate: Date public init(endDate: Date, maxResults: Int? = nil, nextToken: String? = nil, startDate: Date) { @@ -704,7 +708,7 @@ extension CodeGuruSecurity { } public struct ListScansRequest: AWSEncodableShape { - /// The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results. + /// The maximum number of results to return in the response. Use this parameter when paginating results. If additional results exist beyond the number you specify, the nextToken element is returned in the response. Use nextToken in a subsequent request to retrieve additional results. If not specified, returns 100 results. public let maxResults: Int? /// A token to use for paginating results that are returned in the response. Set the value of this parameter to null for the first request. For subsequent calls, use the nextToken value returned from the previous request to continue listing results after the first page. public let nextToken: String? @@ -748,7 +752,7 @@ extension CodeGuruSecurity { } public struct ListTagsForResourceRequest: AWSEncodableShape { - /// The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan. + /// The ARN of the ScanName object. You can retrieve this ARN by calling CreateScan, ListScans, or GetScan. public let resourceArn: String public init(resourceArn: String) { @@ -784,15 +788,15 @@ extension CodeGuruSecurity { } public struct MetricsSummary: AWSDecodableShape { - /// A list of CategoryWithFindingNum objects for the top 5 finding categories with the most open findings in an account. + /// A list of CategoryWithFindingNum objects for the top 5 finding categories with the most findings. public let categoriesWithMostFindings: [CategoryWithFindingNum]? /// The date from which the metrics summary information was retrieved. public let date: Date? - /// The number of open findings of each severity in an account. + /// The number of open findings of each severity. public let openFindings: FindingMetricsValuePerSeverity? - /// A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open findings in an account. + /// A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open critical findings. public let scansWithMostOpenCriticalFindings: [ScanNameWithFindingNum]? - /// A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open critical findings in an account. + /// A list of ScanNameWithFindingNum objects for the top 3 scans with the most number of open findings. public let scansWithMostOpenFindings: [ScanNameWithFindingNum]? public init(categoriesWithMostFindings: [CategoryWithFindingNum]? = nil, date: Date? = nil, openFindings: FindingMetricsValuePerSeverity? = nil, scansWithMostOpenCriticalFindings: [ScanNameWithFindingNum]? = nil, scansWithMostOpenFindings: [ScanNameWithFindingNum]? = nil) { @@ -847,9 +851,9 @@ extension CodeGuruSecurity { } public struct Resource: AWSDecodableShape { - /// The identifier for the resource. + /// The scanName of the scan that was run on the resource. public let id: String? - /// The identifier for a section of the resource, such as an AWS Lambda layer. + /// The identifier for a section of the resource. public let subResourceId: String? public init(id: String? = nil, subResourceId: String? = nil) { @@ -864,7 +868,7 @@ extension CodeGuruSecurity { } public struct ScanNameWithFindingNum: AWSDecodableShape { - /// The number of open findings generated by a scan. + /// The number of findings generated by a scan. public let findingNumber: Int? /// The name of the scan. public let scanName: String? @@ -914,7 +918,7 @@ extension CodeGuruSecurity { } public struct SuggestedFix: AWSDecodableShape { - /// The suggested code to add to your file. + /// The suggested code fix. If applicable, includes code patch to replace your source code. public let code: String? /// A description of the suggested code fix and why it is being suggested. public let description: String? @@ -931,7 +935,7 @@ extension CodeGuruSecurity { } public struct TagResourceRequest: AWSEncodableShape { - /// The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan. + /// The ARN of the ScanName object. You can retrieve this ARN by calling CreateScan, ListScans, or GetScan. public let resourceArn: String /// An array of key-value pairs used to tag an existing scan. A tag is a custom attribute label with two parts: A tag key. For example, CostCenter, Environment, or Secret. Tag keys are case sensitive. An optional tag value field. For example, 111122223333, Production, or a team name. Omitting the tag value is the same as using an empty string. Tag values are case sensitive. public let tags: [String: String] @@ -970,7 +974,7 @@ extension CodeGuruSecurity { } public struct UntagResourceRequest: AWSEncodableShape { - /// The ARN of the ScanName object. You can retrieve this ARN by calling ListScans or GetScan. + /// The ARN of the ScanName object. You can retrieve this ARN by calling CreateScan, ListScans, or GetScan. public let resourceArn: String /// A list of keys for each tag you want to remove from a scan. public let tagKeys: [String] @@ -1006,7 +1010,7 @@ extension CodeGuruSecurity { } public struct UpdateAccountConfigurationRequest: AWSEncodableShape { - /// The KMS key ARN you want to use for encryption. Defaults to service-side encryption if missing. + /// The customer-managed KMS key ARN you want to use for encryption. If not specified, CodeGuru Security will use an AWS-managed key for encryption. If you previously specified a customer-managed KMS key and want CodeGuru Security to use an AWS-managed key for encryption instead, pass nothing. public let encryptionConfig: EncryptionConfig public init(encryptionConfig: EncryptionConfig) { @@ -1023,7 +1027,7 @@ extension CodeGuruSecurity { } public struct UpdateAccountConfigurationResponse: AWSDecodableShape { - /// An EncryptionConfig object that contains the KMS key ARN to use for encryption. + /// An EncryptionConfig object that contains the KMS key ARN that is used for encryption. If you did not specify a customer-managed KMS key in the request, returns empty. public let encryptionConfig: EncryptionConfig public init(encryptionConfig: EncryptionConfig) { @@ -1040,13 +1044,22 @@ extension CodeGuruSecurity { public let filePath: FilePath? /// The identifier for the vulnerability. public let id: String? - /// The number of times the vulnerability appears in your code. + /// The number of times the vulnerability appears in your code. public let itemCount: Int? /// One or more URL addresses that contain details about a vulnerability. public let referenceUrls: [String]? /// One or more vulnerabilities that are related to the vulnerability being described. public let relatedVulnerabilities: [String]? + public init(filePath: FilePath? = nil, id: String? = nil, referenceUrls: [String]? = nil, relatedVulnerabilities: [String]? = nil) { + self.filePath = filePath + self.id = id + self.itemCount = nil + self.referenceUrls = referenceUrls + self.relatedVulnerabilities = relatedVulnerabilities + } + + @available(*, deprecated, message: "Members itemCount have been deprecated") public init(filePath: FilePath? = nil, id: String? = nil, itemCount: Int? = nil, referenceUrls: [String]? = nil, relatedVulnerabilities: [String]? = nil) { self.filePath = filePath self.id = id @@ -1065,7 +1078,7 @@ extension CodeGuruSecurity { } public struct ResourceId: AWSEncodableShape & AWSDecodableShape { - /// The identifier for the code file uploaded to the resource where a finding was detected. + /// The identifier for the code file uploaded to the resource object. Returned by CreateUploadUrl when you upload resources to be scanned. public let codeArtifactId: String? public init(codeArtifactId: String? = nil) { diff --git a/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift b/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift index 555e300e23..cd82718065 100644 --- a/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift +++ b/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift @@ -322,7 +322,7 @@ public struct CodePipeline: AWSService { ) } - /// Gets a summary of the most recent executions for a pipeline. + /// Gets a summary of the most recent executions for a pipeline. When applying the filter for pipeline executions that have succeeded in the stage, the operation returns all executions in the current pipeline version beginning on February 1, 2024. @Sendable public func listPipelineExecutions(_ input: ListPipelineExecutionsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListPipelineExecutionsOutput { return try await self.client.execute( @@ -660,7 +660,7 @@ extension CodePipeline { ) } - /// Gets a summary of the most recent executions for a pipeline. + /// Gets a summary of the most recent executions for a pipeline. When applying the filter for pipeline executions that have succeeded in the stage, the operation returns all executions in the current pipeline version beginning on February 1, 2024. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift b/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift index 2d48f80075..3330d1be0b 100644 --- a/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift +++ b/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift @@ -161,6 +161,7 @@ extension CodePipeline { public enum SourceRevisionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case commitId = "COMMIT_ID" case imageDigest = "IMAGE_DIGEST" + case s3ObjectKey = "S3_OBJECT_KEY" case s3ObjectVersionId = "S3_OBJECT_VERSION_ID" public var description: String { return self.rawValue } } @@ -2333,7 +2334,7 @@ extension CodePipeline { public struct ListActionExecutionsInput: AWSEncodableShape { /// Input information used to filter action execution history. public let filter: ActionExecutionFilter? - /// The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Action execution history is retained for up to 12 months, based on action execution start times. Default value is 100. Detailed execution history is available for executions run on or after February 21, 2019. + /// The maximum number of results to return in a single call. To retrieve the remaining results, make another call with the returned nextToken value. Action execution history is retained for up to 12 months, based on action execution start times. Default value is 100. public let maxResults: Int? /// The token that was returned from the previous ListActionExecutions call, which can be used to return the next set of action executions in the list. public let nextToken: String? diff --git a/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_api.swift b/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_api.swift index 4579349b19..56da470f0e 100644 --- a/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_api.swift +++ b/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_api.swift @@ -214,6 +214,19 @@ public struct ComputeOptimizer: AWSService { ) } + /// Export optimization recommendations for your Amazon Relational Database Service (Amazon RDS). Recommendations are exported in a comma-separated values (CSV) file, and its metadata in a JavaScript Object Notation (JSON) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting Recommendations in the Compute Optimizer User Guide. You can have only one Amazon RDS export job in progress per Amazon Web Services Region. + @Sendable + public func exportRDSDatabaseRecommendations(_ input: ExportRDSDatabaseRecommendationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ExportRDSDatabaseRecommendationsResponse { + return try await self.client.execute( + operation: "ExportRDSDatabaseRecommendations", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns Auto Scaling group recommendations. Compute Optimizer generates recommendations for Amazon EC2 Auto Scaling groups that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide. @Sendable public func getAutoScalingGroupRecommendations(_ input: GetAutoScalingGroupRecommendationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAutoScalingGroupRecommendationsResponse { @@ -357,6 +370,32 @@ public struct ComputeOptimizer: AWSService { ) } + /// Returns the projected metrics of Amazon RDS recommendations. + @Sendable + public func getRDSDatabaseRecommendationProjectedMetrics(_ input: GetRDSDatabaseRecommendationProjectedMetricsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetRDSDatabaseRecommendationProjectedMetricsResponse { + return try await self.client.execute( + operation: "GetRDSDatabaseRecommendationProjectedMetrics", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns Amazon RDS recommendations. Compute Optimizer generates recommendations for Amazon RDS that meet a specific set of requirements. For more information, see the Supported resources and requirements in the Compute Optimizer User Guide. + @Sendable + public func getRDSDatabaseRecommendations(_ input: GetRDSDatabaseRecommendationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetRDSDatabaseRecommendationsResponse { + return try await self.client.execute( + operation: "GetRDSDatabaseRecommendations", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns existing recommendation preferences, such as enhanced infrastructure metrics. Use the scope parameter to specify which preferences to return. You can specify to return preferences for an organization, a specific account ID, or a specific EC2 instance or Auto Scaling group Amazon Resource Name (ARN). For more information, see Activating enhanced infrastructure metrics in the Compute Optimizer User Guide. @Sendable public func getRecommendationPreferences(_ input: GetRecommendationPreferencesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetRecommendationPreferencesResponse { diff --git a/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift b/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift index a25e73c6f8..048602bd3c 100644 --- a/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift +++ b/Sources/Soto/Services/ComputeOptimizer/ComputeOptimizer_shapes.swift @@ -393,6 +393,69 @@ extension ComputeOptimizer { public var description: String { return self.rawValue } } + public enum ExportableRDSDBField: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case accountId = "AccountId" + case currentDbInstanceClass = "CurrentDBInstanceClass" + case currentInstanceOnDemandHourlyPrice = "CurrentInstanceOnDemandHourlyPrice" + case currentStorageConfigurationAllocatedStorage = "CurrentStorageConfigurationAllocatedStorage" + case currentStorageConfigurationIops = "CurrentStorageConfigurationIOPS" + case currentStorageConfigurationMaxAllocatedStorage = "CurrentStorageConfigurationMaxAllocatedStorage" + case currentStorageConfigurationStorageThroughput = "CurrentStorageConfigurationStorageThroughput" + case currentStorageConfigurationStorageType = "CurrentStorageConfigurationStorageType" + case currentStorageOnDemandMonthlyPrice = "CurrentStorageOnDemandMonthlyPrice" + case effectiveRecommendationPreferencesCpuVendorArchitectures = "EffectiveRecommendationPreferencesCpuVendorArchitectures" + case effectiveRecommendationPreferencesEnhancedInfrastructureMetrics = "EffectiveRecommendationPreferencesEnhancedInfrastructureMetrics" + case effectiveRecommendationPreferencesLookbackPeriod = "EffectiveRecommendationPreferencesLookBackPeriod" + case effectiveRecommendationPreferencesSavingsEstimationMode = "EffectiveRecommendationPreferencesSavingsEstimationMode" + case engine = "Engine" + case engineVersion = "EngineVersion" + case idle = "Idle" + case instanceFinding = "InstanceFinding" + case instanceFindingReasonCodes = "InstanceFindingReasonCodes" + case instanceRecommendationOptionsDbInstanceClass = "InstanceRecommendationOptionsDBInstanceClass" + case instanceRecommendationOptionsEstimatedMonthlySavingsCurrency = "InstanceRecommendationOptionsEstimatedMonthlySavingsCurrency" + case instanceRecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts = "InstanceRecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts" + case instanceRecommendationOptionsEstimatedMonthlySavingsValue = "InstanceRecommendationOptionsEstimatedMonthlySavingsValue" + case instanceRecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts = "InstanceRecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts" + case instanceRecommendationOptionsInstanceOnDemandHourlyPrice = "InstanceRecommendationOptionsInstanceOnDemandHourlyPrice" + case instanceRecommendationOptionsPerformanceRisk = "InstanceRecommendationOptionsPerformanceRisk" + case instanceRecommendationOptionsProjectedUtilizationMetricsCpuMaximum = "InstanceRecommendationOptionsProjectedUtilizationMetricsCpuMaximum" + case instanceRecommendationOptionsRank = "InstanceRecommendationOptionsRank" + case instanceRecommendationOptionsSavingsOpportunityAfterDiscountsPercentage = "InstanceRecommendationOptionsSavingsOpportunityAfterDiscountsPercentage" + case instanceRecommendationOptionsSavingsOpportunityPercentage = "InstanceRecommendationOptionsSavingsOpportunityPercentage" + case lastRefreshTimestamp = "LastRefreshTimestamp" + case lookbackPeriodInDays = "LookbackPeriodInDays" + case multiAzDbInstance = "MultiAZDBInstance" + case resourceArn = "ResourceArn" + case storageFinding = "StorageFinding" + case storageFindingReasonCodes = "StorageFindingReasonCodes" + case storageRecommendationOptionsAllocatedStorage = "StorageRecommendationOptionsAllocatedStorage" + case storageRecommendationOptionsEstimatedMonthlySavingsCurrency = "StorageRecommendationOptionsEstimatedMonthlySavingsCurrency" + case storageRecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts = "StorageRecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts" + case storageRecommendationOptionsEstimatedMonthlySavingsValue = "StorageRecommendationOptionsEstimatedMonthlySavingsValue" + case storageRecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts = "StorageRecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts" + case storageRecommendationOptionsIops = "StorageRecommendationOptionsIOPS" + case storageRecommendationOptionsMaxAllocatedStorage = "StorageRecommendationOptionsMaxAllocatedStorage" + case storageRecommendationOptionsOnDemandMonthlyPrice = "StorageRecommendationOptionsOnDemandMonthlyPrice" + case storageRecommendationOptionsRank = "StorageRecommendationOptionsRank" + case storageRecommendationOptionsSavingsOpportunityAfterDiscountsPercentage = "StorageRecommendationOptionsSavingsOpportunityAfterDiscountsPercentage" + case storageRecommendationOptionsSavingsOpportunityPercentage = "StorageRecommendationOptionsSavingsOpportunityPercentage" + case storageRecommendationOptionsStorageThroughput = "StorageRecommendationOptionsStorageThroughput" + case storageRecommendationOptionsStorageType = "StorageRecommendationOptionsStorageType" + case tags = "Tags" + case utilizationMetricsCpuMaximum = "UtilizationMetricsCpuMaximum" + case utilizationMetricsDatabaseConnectionsMaximum = "UtilizationMetricsDatabaseConnectionsMaximum" + case utilizationMetricsEbsVolumeReadIopsMaximum = "UtilizationMetricsEBSVolumeReadIOPSMaximum" + case utilizationMetricsEbsVolumeReadThroughputMaximum = "UtilizationMetricsEBSVolumeReadThroughputMaximum" + case utilizationMetricsEbsVolumeStorageSpaceUtilizationMaximum = "UtilizationMetricsEBSVolumeStorageSpaceUtilizationMaximum" + case utilizationMetricsEbsVolumeWriteIopsMaximum = "UtilizationMetricsEBSVolumeWriteIOPSMaximum" + case utilizationMetricsEbsVolumeWriteThroughputMaximum = "UtilizationMetricsEBSVolumeWriteThroughputMaximum" + case utilizationMetricsMemoryMaximum = "UtilizationMetricsMemoryMaximum" + case utilizationMetricsNetworkReceiveThroughputMaximum = "UtilizationMetricsNetworkReceiveThroughputMaximum" + case utilizationMetricsNetworkTransmitThroughputMaximum = "UtilizationMetricsNetworkTransmitThroughputMaximum" + public var description: String { return self.rawValue } + } + public enum ExportableVolumeField: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case accountId = "AccountId" case currentConfigurationRootVolume = "CurrentConfigurationRootVolume" @@ -481,6 +544,12 @@ extension ComputeOptimizer { public var description: String { return self.rawValue } } + public enum Idle: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `false` = "False" + case `true` = "True" + public var description: String { return self.rawValue } + } + public enum InferredWorkloadType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case amazonEmr = "AmazonEmr" case apacheCassandra = "ApacheCassandra" @@ -715,6 +784,79 @@ extension ComputeOptimizer { public var description: String { return self.rawValue } } + public enum RDSDBMetricName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cpu = "CPU" + case databaseConnections = "DatabaseConnections" + case ebsVolumeReadIops = "EBSVolumeReadIOPS" + case ebsVolumeReadThroughput = "EBSVolumeReadThroughput" + case ebsVolumeStorageSpaceUtilization = "EBSVolumeStorageSpaceUtilization" + case ebsVolumeWriteIops = "EBSVolumeWriteIOPS" + case ebsVolumeWriteThroughput = "EBSVolumeWriteThroughput" + case memory = "Memory" + case networkReceiveThroughput = "NetworkReceiveThroughput" + case networkTransmitThroughput = "NetworkTransmitThroughput" + public var description: String { return self.rawValue } + } + + public enum RDSDBMetricStatistic: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case average = "Average" + case maximum = "Maximum" + case minimum = "Minimum" + public var description: String { return self.rawValue } + } + + public enum RDSDBRecommendationFilterName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case idle = "Idle" + case instanceFinding = "InstanceFinding" + case instanceFindingReasonCode = "InstanceFindingReasonCode" + case storageFinding = "StorageFinding" + case storageFindingReasonCode = "StorageFindingReasonCode" + public var description: String { return self.rawValue } + } + + public enum RDSInstanceFinding: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case optimized = "Optimized" + case overProvisioned = "Overprovisioned" + case underProvisioned = "Underprovisioned" + public var description: String { return self.rawValue } + } + + public enum RDSInstanceFindingReasonCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cpuOverProvisioned = "CPUOverprovisioned" + case cpuUnderProvisioned = "CPUUnderprovisioned" + case ebsIopsOverProvisioned = "EBSIOPSOverprovisioned" + case ebsThroughputOverProvisioned = "EBSThroughputOverprovisioned" + case ebsThroughputUnderProvisioned = "EBSThroughputUnderprovisioned" + case networkBandwidthOverProvisioned = "NetworkBandwidthOverprovisioned" + case networkBandwidthUnderProvisioned = "NetworkBandwidthUnderprovisioned" + case newEngineVersionAvailable = "NewEngineVersionAvailable" + case newGenerationDbInstanceClassAvailable = "NewGenerationDBInstanceClassAvailable" + public var description: String { return self.rawValue } + } + + public enum RDSSavingsEstimationModeSource: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case costExplorerRightsizing = "CostExplorerRightsizing" + case costOptimizationHub = "CostOptimizationHub" + case publicPricing = "PublicPricing" + public var description: String { return self.rawValue } + } + + public enum RDSStorageFinding: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case optimized = "Optimized" + case overProvisioned = "Overprovisioned" + case underProvisioned = "Underprovisioned" + public var description: String { return self.rawValue } + } + + public enum RDSStorageFindingReasonCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case ebsVolumeAllocatedStorageUnderProvisioned = "EBSVolumeAllocatedStorageUnderprovisioned" + case ebsVolumeIopsOverProvisioned = "EBSVolumeIOPSOverprovisioned" + case ebsVolumeThroughputOverProvisioned = "EBSVolumeThroughputOverprovisioned" + case ebsVolumeThroughputUnderProvisioned = "EBSVolumeThroughputUnderprovisioned" + case newGenerationStorageTypeAvailable = "NewGenerationStorageTypeAvailable" + public var description: String { return self.rawValue } + } + public enum RecommendationPreferenceName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case enhancedInfrastructureMetrics = "EnhancedInfrastructureMetrics" case externalMetricsPreference = "ExternalMetricsPreference" @@ -732,6 +874,8 @@ extension ComputeOptimizer { case ecsService = "EcsService" case lambdaFunction = "LambdaFunction" case license = "License" + case rdsDbInstance = "RdsDBInstance" + case rdsDbInstanceStorage = "RdsDBInstanceStorage" public var description: String { return self.rawValue } } @@ -743,6 +887,7 @@ extension ComputeOptimizer { case lambdaFunction = "LambdaFunction" case license = "License" case notApplicable = "NotApplicable" + case rdsDbInstance = "RdsDBInstance" public var description: String { return self.rawValue } } @@ -1039,10 +1184,39 @@ extension ComputeOptimizer { } } + public struct DBStorageConfiguration: AWSDecodableShape { + /// The size of the RDS storage in gigabytes (GB). + public let allocatedStorage: Int? + /// The provisioned IOPs of the RDS storage. + public let iops: Int? + /// The maximum limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the RDS instance. + public let maxAllocatedStorage: Int? + /// The storage throughput of the RDS storage. + public let storageThroughput: Int? + /// The type of RDS storage. + public let storageType: String? + + public init(allocatedStorage: Int? = nil, iops: Int? = nil, maxAllocatedStorage: Int? = nil, storageThroughput: Int? = nil, storageType: String? = nil) { + self.allocatedStorage = allocatedStorage + self.iops = iops + self.maxAllocatedStorage = maxAllocatedStorage + self.storageThroughput = storageThroughput + self.storageType = storageType + } + + private enum CodingKeys: String, CodingKey { + case allocatedStorage = "allocatedStorage" + case iops = "iops" + case maxAllocatedStorage = "maxAllocatedStorage" + case storageThroughput = "storageThroughput" + case storageType = "storageType" + } + } + public struct DeleteRecommendationPreferencesRequest: AWSEncodableShape { /// The name of the recommendation preference to delete. public let recommendationPreferenceNames: [RecommendationPreferenceName] - /// The target resource type of the recommendation preference to delete. The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group. The valid values for this parameter are Ec2Instance and AutoScalingGroup. + /// The target resource type of the recommendation preference to delete. The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group. public let resourceType: ResourceType /// An object that describes the scope of the recommendation preference to delete. You can delete recommendation preferences that are created at the organization level (for management accounts of an organization only), account level, and resource level. For more information, see Activating enhanced infrastructure metrics in the Compute Optimizer User Guide. public let scope: Scope? @@ -1497,7 +1671,7 @@ extension ComputeOptimizer { } public struct EffectiveRecommendationPreferences: AWSDecodableShape { - /// Describes the CPU vendor and architecture for an instance or Auto Scaling group recommendations. For example, when you specify AWS_ARM64 with: A GetEC2InstanceRecommendations or GetAutoScalingGroupRecommendations request, Compute Optimizer returns recommendations that consist of Graviton2 instance types only. A GetEC2RecommendationProjectedMetrics request, Compute Optimizer returns projected utilization metrics for Graviton2 instance type recommendations only. A ExportEC2InstanceRecommendations or ExportAutoScalingGroupRecommendations request, Compute Optimizer exports recommendations that consist of Graviton2 instance types only. + /// Describes the CPU vendor and architecture for an instance or Auto Scaling group recommendations. For example, when you specify AWS_ARM64 with: A GetEC2InstanceRecommendations or GetAutoScalingGroupRecommendations request, Compute Optimizer returns recommendations that consist of Graviton instance types only. A GetEC2RecommendationProjectedMetrics request, Compute Optimizer returns projected utilization metrics for Graviton instance type recommendations only. A ExportEC2InstanceRecommendations or ExportAutoScalingGroupRecommendations request, Compute Optimizer exports recommendations that consist of Graviton instance types only. public let cpuVendorArchitectures: [CpuVendorArchitecture]? /// Describes the activation status of the enhanced infrastructure metrics preference. A status of Active confirms that the preference is applied in the latest recommendation refresh, and a status of Inactive confirms that it's not yet applied to recommendations. For more information, see Enhanced infrastructure metrics in the Compute Optimizer User Guide. public let enhancedInfrastructureMetrics: EnhancedInfrastructureMetrics? @@ -1884,6 +2058,57 @@ extension ComputeOptimizer { } } + public struct ExportRDSDatabaseRecommendationsRequest: AWSEncodableShape { + /// The Amazon Web Services account IDs for the export Amazon RDS recommendations. If your account is the management account or the delegated administrator of an organization, use this parameter to specify the member account you want to export recommendations to. This parameter can't be specified together with the include member accounts parameter. The parameters are mutually exclusive. If this parameter or the include member accounts parameter is omitted, the recommendations for member accounts aren't included in the export. You can specify multiple account IDs per request. + public let accountIds: [String]? + /// The recommendations data to include in the export file. For more information about the fields that can be exported, see Exported files in the Compute Optimizer User Guide. + public let fieldsToExport: [ExportableRDSDBField]? + /// The format of the export file. The CSV file is the only export file format currently supported. + public let fileFormat: FileFormat? + /// An array of objects to specify a filter that exports a more specific set of Amazon RDS recommendations. + public let filters: [RDSDBRecommendationFilter]? + /// If your account is the management account or the delegated administrator of an organization, this parameter indicates whether to include recommendations for resources in all member accounts of the organization. The member accounts must also be opted in to Compute Optimizer, and trusted access for Compute Optimizer must be enabled in the organization account. For more information, see Compute Optimizer and Amazon Web Services Organizations trusted access in the Compute Optimizer User Guide. If this parameter is omitted, recommendations for member accounts of the organization aren't included in the export file. If this parameter or the account ID parameter is omitted, recommendations for member accounts aren't included in the export. + public let includeMemberAccounts: Bool? + public let recommendationPreferences: RecommendationPreferences? + public let s3DestinationConfig: S3DestinationConfig + + public init(accountIds: [String]? = nil, fieldsToExport: [ExportableRDSDBField]? = nil, fileFormat: FileFormat? = nil, filters: [RDSDBRecommendationFilter]? = nil, includeMemberAccounts: Bool? = nil, recommendationPreferences: RecommendationPreferences? = nil, s3DestinationConfig: S3DestinationConfig) { + self.accountIds = accountIds + self.fieldsToExport = fieldsToExport + self.fileFormat = fileFormat + self.filters = filters + self.includeMemberAccounts = includeMemberAccounts + self.recommendationPreferences = recommendationPreferences + self.s3DestinationConfig = s3DestinationConfig + } + + private enum CodingKeys: String, CodingKey { + case accountIds = "accountIds" + case fieldsToExport = "fieldsToExport" + case fileFormat = "fileFormat" + case filters = "filters" + case includeMemberAccounts = "includeMemberAccounts" + case recommendationPreferences = "recommendationPreferences" + case s3DestinationConfig = "s3DestinationConfig" + } + } + + public struct ExportRDSDatabaseRecommendationsResponse: AWSDecodableShape { + /// The identification number of the export job. To view the status of an export job, use the DescribeRecommendationExportJobs action and specify the job ID. + public let jobId: String? + public let s3Destination: S3Destination? + + public init(jobId: String? = nil, s3Destination: S3Destination? = nil) { + self.jobId = jobId + self.s3Destination = s3Destination + } + + private enum CodingKeys: String, CodingKey { + case jobId = "jobId" + case s3Destination = "s3Destination" + } + } + public struct ExternalMetricStatus: AWSDecodableShape { /// The status code for Compute Optimizer's integration with an external metrics provider. public let statusCode: ExternalMetricStatusCode? @@ -2471,6 +2696,109 @@ extension ComputeOptimizer { } } + public struct GetRDSDatabaseRecommendationProjectedMetricsRequest: AWSEncodableShape { + /// The timestamp of the last projected metrics data point to return. + public let endTime: Date + /// The granularity, in seconds, of the projected metrics data points. + public let period: Int + public let recommendationPreferences: RecommendationPreferences? + /// The ARN that identifies the Amazon RDS. The following is the format of the ARN: arn:aws:rds:{region}:{accountId}:db:{resourceName} + public let resourceArn: String + /// The timestamp of the first projected metrics data point to return. + public let startTime: Date + /// The statistic of the projected metrics. + public let stat: MetricStatistic + + public init(endTime: Date, period: Int = 0, recommendationPreferences: RecommendationPreferences? = nil, resourceArn: String, startTime: Date, stat: MetricStatistic) { + self.endTime = endTime + self.period = period + self.recommendationPreferences = recommendationPreferences + self.resourceArn = resourceArn + self.startTime = startTime + self.stat = stat + } + + private enum CodingKeys: String, CodingKey { + case endTime = "endTime" + case period = "period" + case recommendationPreferences = "recommendationPreferences" + case resourceArn = "resourceArn" + case startTime = "startTime" + case stat = "stat" + } + } + + public struct GetRDSDatabaseRecommendationProjectedMetricsResponse: AWSDecodableShape { + /// An array of objects that describes the projected metrics. + public let recommendedOptionProjectedMetrics: [RDSDatabaseRecommendedOptionProjectedMetric]? + + public init(recommendedOptionProjectedMetrics: [RDSDatabaseRecommendedOptionProjectedMetric]? = nil) { + self.recommendedOptionProjectedMetrics = recommendedOptionProjectedMetrics + } + + private enum CodingKeys: String, CodingKey { + case recommendedOptionProjectedMetrics = "recommendedOptionProjectedMetrics" + } + } + + public struct GetRDSDatabaseRecommendationsRequest: AWSEncodableShape { + /// Return the Amazon RDS recommendations to the specified Amazon Web Services account IDs. If your account is the management account or the delegated administrator of an organization, use this parameter to return the Amazon RDS recommendations to specific member accounts. You can only specify one account ID per request. + public let accountIds: [String]? + /// An array of objects to specify a filter that returns a more specific list of Amazon RDS recommendations. + public let filters: [RDSDBRecommendationFilter]? + /// The maximum number of Amazon RDS recommendations to return with a single request. To retrieve the remaining results, make another request with the returned nextToken value. + public let maxResults: Int? + /// The token to advance to the next page of Amazon RDS recommendations. + public let nextToken: String? + public let recommendationPreferences: RecommendationPreferences? + /// The ARN that identifies the Amazon RDS. The following is the format of the ARN: arn:aws:rds:{region}:{accountId}:db:{resourceName} The following is the format of a DB Cluster ARN: arn:aws:rds:{region}:{accountId}:cluster:{resourceName} + public let resourceArns: [String]? + + public init(accountIds: [String]? = nil, filters: [RDSDBRecommendationFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, recommendationPreferences: RecommendationPreferences? = nil, resourceArns: [String]? = nil) { + self.accountIds = accountIds + self.filters = filters + self.maxResults = maxResults + self.nextToken = nextToken + self.recommendationPreferences = recommendationPreferences + self.resourceArns = resourceArns + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 0) + } + + private enum CodingKeys: String, CodingKey { + case accountIds = "accountIds" + case filters = "filters" + case maxResults = "maxResults" + case nextToken = "nextToken" + case recommendationPreferences = "recommendationPreferences" + case resourceArns = "resourceArns" + } + } + + public struct GetRDSDatabaseRecommendationsResponse: AWSDecodableShape { + /// An array of objects that describe errors of the request. + public let errors: [GetRecommendationError]? + /// The token to advance to the next page of Amazon RDS recommendations. + public let nextToken: String? + /// An array of objects that describe the Amazon RDS recommendations. + public let rdsDBRecommendations: [RDSDBRecommendation]? + + public init(errors: [GetRecommendationError]? = nil, nextToken: String? = nil, rdsDBRecommendations: [RDSDBRecommendation]? = nil) { + self.errors = errors + self.nextToken = nextToken + self.rdsDBRecommendations = rdsDBRecommendations + } + + private enum CodingKeys: String, CodingKey { + case errors = "errors" + case nextToken = "nextToken" + case rdsDBRecommendations = "rdsDBRecommendations" + } + } + public struct GetRecommendationError: AWSDecodableShape { /// The error code. public let code: String? @@ -2497,7 +2825,7 @@ extension ComputeOptimizer { public let maxResults: Int? /// The token to advance to the next page of recommendation preferences. public let nextToken: String? - /// The target resource type of the recommendation preference for which to return preferences. The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group. The valid values for this parameter are Ec2Instance and AutoScalingGroup. + /// The target resource type of the recommendation preference for which to return preferences. The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group. public let resourceType: ResourceType /// An object that describes the scope of the recommendation preference to return. You can return recommendation preferences that are created at the organization level (for management accounts of an organization only), account level, and resource level. For more information, see Activating enhanced infrastructure metrics in the Compute Optimizer User Guide. public let scope: Scope? @@ -2659,7 +2987,7 @@ extension ComputeOptimizer { public let effectiveRecommendationPreferences: EffectiveRecommendationPreferences? /// An object that describes Compute Optimizer's integration status with your external metrics provider. public let externalMetricStatus: ExternalMetricStatus? - /// The finding classification of the instance. Findings for instances include: Underprovisioned —An instance is considered under-provisioned when at least one specification of your instance, such as CPU, memory, or network, does not meet the performance requirements of your workload. Under-provisioned instances may lead to poor application performance. Overprovisioned —An instance is considered over-provisioned when at least one specification of your instance, such as CPU, memory, or network, can be sized down while still meeting the performance requirements of your workload, and no specification is under-provisioned. Over-provisioned instances may lead to unnecessary infrastructure cost. Optimized —An instance is considered optimized when all specifications of your instance, such as CPU, memory, and network, meet the performance requirements of your workload and is not over provisioned. For optimized resources, Compute Optimizer might recommend a new generation instance type. + /// The finding classification of the instance. Findings for instances include: Underprovisioned —An instance is considered under-provisioned when at least one specification of your instance, such as CPU, memory, or network, does not meet the performance requirements of your workload. Under-provisioned instances may lead to poor application performance. Overprovisioned —An instance is considered over-provisioned when at least one specification of your instance, such as CPU, memory, or network, can be sized down while still meeting the performance requirements of your workload, and no specification is under-provisioned. Over-provisioned instances may lead to unnecessary infrastructure cost. Optimized —An instance is considered optimized when all specifications of your instance, such as CPU, memory, and network, meet the performance requirements of your workload and is not over provisioned. For optimized resources, Compute Optimizer might recommend a new generation instance type. The valid values in your API responses appear as OVER_PROVISIONED, UNDER_PROVISIONED, or OPTIMIZED. public let finding: Finding? /// The reason for the finding classification of the instance. Finding reason codes for instances include: CPUOverprovisioned — The instance’s CPU configuration can be sized down while still meeting the performance requirements of your workload. This is identified by analyzing the CPUUtilization metric of the current instance during the look-back period. CPUUnderprovisioned — The instance’s CPU configuration doesn't meet the performance requirements of your workload and there is an alternative instance type that provides better CPU performance. This is identified by analyzing the CPUUtilization metric of the current instance during the look-back period. MemoryOverprovisioned — The instance’s memory configuration can be sized down while still meeting the performance requirements of your workload. This is identified by analyzing the memory utilization metric of the current instance during the look-back period. MemoryUnderprovisioned — The instance’s memory configuration doesn't meet the performance requirements of your workload and there is an alternative instance type that provides better memory performance. This is identified by analyzing the memory utilization metric of the current instance during the look-back period. Memory utilization is analyzed only for resources that have the unified CloudWatch agent installed on them. For more information, see Enabling memory utilization with the Amazon CloudWatch Agent in the Compute Optimizer User Guide. On Linux instances, Compute Optimizer analyses the mem_used_percent metric in the CWAgent namespace, or the legacy MemoryUtilization metric in the System/Linux namespace. On Windows instances, Compute Optimizer analyses the Memory % Committed Bytes In Use metric in the CWAgent namespace. EBSThroughputOverprovisioned — The instance’s EBS throughput configuration can be sized down while still meeting the performance requirements of your workload. This is identified by analyzing the VolumeReadBytes and VolumeWriteBytes metrics of EBS volumes attached to the current instance during the look-back period. EBSThroughputUnderprovisioned — The instance’s EBS throughput configuration doesn't meet the performance requirements of your workload and there is an alternative instance type that provides better EBS throughput performance. This is identified by analyzing the VolumeReadBytes and VolumeWriteBytes metrics of EBS volumes attached to the current instance during the look-back period. EBSIOPSOverprovisioned — The instance’s EBS IOPS configuration can be sized down while still meeting the performance requirements of your workload. This is identified by analyzing the VolumeReadOps and VolumeWriteOps metric of EBS volumes attached to the current instance during the look-back period. EBSIOPSUnderprovisioned — The instance’s EBS IOPS configuration doesn't meet the performance requirements of your workload and there is an alternative instance type that provides better EBS IOPS performance. This is identified by analyzing the VolumeReadOps and VolumeWriteOps metric of EBS volumes attached to the current instance during the look-back period. NetworkBandwidthOverprovisioned — The instance’s network bandwidth configuration can be sized down while still meeting the performance requirements of your workload. This is identified by analyzing the NetworkIn and NetworkOut metrics of the current instance during the look-back period. NetworkBandwidthUnderprovisioned — The instance’s network bandwidth configuration doesn't meet the performance requirements of your workload and there is an alternative instance type that provides better network bandwidth performance. This is identified by analyzing the NetworkIn and NetworkOut metrics of the current instance during the look-back period. This finding reason happens when the NetworkIn or NetworkOut performance of an instance is impacted. NetworkPPSOverprovisioned — The instance’s network PPS (packets per second) configuration can be sized down while still meeting the performance requirements of your workload. This is identified by analyzing the NetworkPacketsIn and NetworkPacketsIn metrics of the current instance during the look-back period. NetworkPPSUnderprovisioned — The instance’s network PPS (packets per second) configuration doesn't meet the performance requirements of your workload and there is an alternative instance type that provides better network PPS performance. This is identified by analyzing the NetworkPacketsIn and NetworkPacketsIn metrics of the current instance during the look-back period. DiskIOPSOverprovisioned — The instance’s disk IOPS configuration can be sized down while still meeting the performance requirements of your workload. This is identified by analyzing the DiskReadOps and DiskWriteOps metrics of the current instance during the look-back period. DiskIOPSUnderprovisioned — The instance’s disk IOPS configuration doesn't meet the performance requirements of your workload and there is an alternative instance type that provides better disk IOPS performance. This is identified by analyzing the DiskReadOps and DiskWriteOps metrics of the current instance during the look-back period. DiskThroughputOverprovisioned — The instance’s disk throughput configuration can be sized down while still meeting the performance requirements of your workload. This is identified by analyzing the DiskReadBytes and DiskWriteBytes metrics of the current instance during the look-back period. DiskThroughputUnderprovisioned — The instance’s disk throughput configuration doesn't meet the performance requirements of your workload and there is an alternative instance type that provides better disk throughput performance. This is identified by analyzing the DiskReadBytes and DiskWriteBytes metrics of the current instance during the look-back period. For more information about instance metrics, see List the available CloudWatch metrics for your instances in the Amazon Elastic Compute Cloud User Guide. For more information about EBS volume metrics, see Amazon CloudWatch metrics for Amazon EBS in the Amazon Elastic Compute Cloud User Guide. public let findingReasonCodes: [InstanceRecommendationFindingReasonCode]? @@ -3254,7 +3582,7 @@ extension ComputeOptimizer { public let lookBackPeriod: LookBackPeriodPreference? /// The preference to control which resource type values are considered when generating rightsizing recommendations. You can specify this preference as a combination of include and exclude lists. You must specify either an includeList or excludeList. If the preference is an empty set of resource type values, an error occurs. You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types. public let preferredResources: [PreferredResource]? - /// The target resource type of the recommendation preference to create. The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group. The valid values for this parameter are Ec2Instance and AutoScalingGroup. + /// The target resource type of the recommendation preference to create. The Ec2Instance option encompasses standalone instances and instances that are part of Auto Scaling groups. The AutoScalingGroup option encompasses only instances that are part of an Auto Scaling group. public let resourceType: ResourceType /// The status of the savings estimation mode preference to create or update. Specify the AfterDiscounts status to activate the preference, or specify BeforeDiscounts to deactivate the preference. Only the account manager or delegated administrator of your organization can activate this preference. For more information, see Savings estimation mode in the Compute Optimizer User Guide. public let savingsEstimationMode: SavingsEstimationMode? @@ -3292,6 +3620,329 @@ extension ComputeOptimizer { public init() {} } + public struct RDSDBInstanceRecommendationOption: AWSDecodableShape { + /// Describes the DB instance class recommendation option for your Amazon RDS instance. + public let dbInstanceClass: String? + /// The performance risk of the RDS instance recommendation option. + public let performanceRisk: Double? + /// An array of objects that describe the projected utilization metrics of the RDS instance recommendation option. + public let projectedUtilizationMetrics: [RDSDBUtilizationMetric]? + /// The rank identifier of the RDS instance recommendation option. + public let rank: Int? + public let savingsOpportunity: SavingsOpportunity? + /// Describes the savings opportunity for Amazon RDS recommendations or for the recommendation option. Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation. + public let savingsOpportunityAfterDiscounts: RDSInstanceSavingsOpportunityAfterDiscounts? + + public init(dbInstanceClass: String? = nil, performanceRisk: Double? = nil, projectedUtilizationMetrics: [RDSDBUtilizationMetric]? = nil, rank: Int? = nil, savingsOpportunity: SavingsOpportunity? = nil, savingsOpportunityAfterDiscounts: RDSInstanceSavingsOpportunityAfterDiscounts? = nil) { + self.dbInstanceClass = dbInstanceClass + self.performanceRisk = performanceRisk + self.projectedUtilizationMetrics = projectedUtilizationMetrics + self.rank = rank + self.savingsOpportunity = savingsOpportunity + self.savingsOpportunityAfterDiscounts = savingsOpportunityAfterDiscounts + } + + private enum CodingKeys: String, CodingKey { + case dbInstanceClass = "dbInstanceClass" + case performanceRisk = "performanceRisk" + case projectedUtilizationMetrics = "projectedUtilizationMetrics" + case rank = "rank" + case savingsOpportunity = "savingsOpportunity" + case savingsOpportunityAfterDiscounts = "savingsOpportunityAfterDiscounts" + } + } + + public struct RDSDBRecommendation: AWSDecodableShape { + /// The Amazon Web Services account ID of the Amazon RDS. + public let accountId: String? + /// The DB instance class of the current RDS instance. + public let currentDBInstanceClass: String? + /// The configuration of the current RDS storage. + public let currentStorageConfiguration: DBStorageConfiguration? + /// Describes the effective recommendation preferences for Amazon RDS. + public let effectiveRecommendationPreferences: RDSEffectiveRecommendationPreferences? + /// The engine of the RDS instance. + public let engine: String? + /// The database engine version. + public let engineVersion: String? + /// This indicates if the RDS instance is idle or not. + public let idle: Idle? + /// The finding classification of an Amazon RDS instance. Findings for Amazon RDS instance include: Underprovisioned — When Compute Optimizer detects that there’s not enough resource specifications, an Amazon RDS is considered under-provisioned. Overprovisioned — When Compute Optimizer detects that there’s excessive resource specifications, an Amazon RDS is considered over-provisioned. Optimized — When the specifications of your Amazon RDS instance meet the performance requirements of your workload, the service is considered optimized. + public let instanceFinding: RDSInstanceFinding? + /// The reason for the finding classification of an Amazon RDS instance. + public let instanceFindingReasonCodes: [RDSInstanceFindingReasonCode]? + /// An array of objects that describe the recommendation options for the Amazon RDS instance. + public let instanceRecommendationOptions: [RDSDBInstanceRecommendationOption]? + /// The timestamp of when the Amazon RDS recommendation was last generated. + public let lastRefreshTimestamp: Date? + /// The number of days the Amazon RDS utilization metrics were analyzed. + public let lookbackPeriodInDays: Double? + /// The ARN of the current Amazon RDS. The following is the format of the ARN: arn:aws:rds:{region}:{accountId}:db:{resourceName} + public let resourceArn: String? + /// The finding classification of Amazon RDS storage. Findings for Amazon RDS instance include: Underprovisioned — When Compute Optimizer detects that there’s not enough storage, an Amazon RDS is considered under-provisioned. Overprovisioned — When Compute Optimizer detects that there’s excessive storage, an Amazon RDS is considered over-provisioned. Optimized — When the storage of your Amazon RDS meet the performance requirements of your workload, the service is considered optimized. + public let storageFinding: RDSStorageFinding? + /// The reason for the finding classification of Amazon RDS storage. + public let storageFindingReasonCodes: [RDSStorageFindingReasonCode]? + /// An array of objects that describe the recommendation options for Amazon RDS storage. + public let storageRecommendationOptions: [RDSDBStorageRecommendationOption]? + /// A list of tags assigned to your Amazon RDS recommendations. + public let tags: [Tag]? + /// An array of objects that describe the utilization metrics of the Amazon RDS. + public let utilizationMetrics: [RDSDBUtilizationMetric]? + + public init(accountId: String? = nil, currentDBInstanceClass: String? = nil, currentStorageConfiguration: DBStorageConfiguration? = nil, effectiveRecommendationPreferences: RDSEffectiveRecommendationPreferences? = nil, engine: String? = nil, engineVersion: String? = nil, idle: Idle? = nil, instanceFinding: RDSInstanceFinding? = nil, instanceFindingReasonCodes: [RDSInstanceFindingReasonCode]? = nil, instanceRecommendationOptions: [RDSDBInstanceRecommendationOption]? = nil, lastRefreshTimestamp: Date? = nil, lookbackPeriodInDays: Double? = nil, resourceArn: String? = nil, storageFinding: RDSStorageFinding? = nil, storageFindingReasonCodes: [RDSStorageFindingReasonCode]? = nil, storageRecommendationOptions: [RDSDBStorageRecommendationOption]? = nil, tags: [Tag]? = nil, utilizationMetrics: [RDSDBUtilizationMetric]? = nil) { + self.accountId = accountId + self.currentDBInstanceClass = currentDBInstanceClass + self.currentStorageConfiguration = currentStorageConfiguration + self.effectiveRecommendationPreferences = effectiveRecommendationPreferences + self.engine = engine + self.engineVersion = engineVersion + self.idle = idle + self.instanceFinding = instanceFinding + self.instanceFindingReasonCodes = instanceFindingReasonCodes + self.instanceRecommendationOptions = instanceRecommendationOptions + self.lastRefreshTimestamp = lastRefreshTimestamp + self.lookbackPeriodInDays = lookbackPeriodInDays + self.resourceArn = resourceArn + self.storageFinding = storageFinding + self.storageFindingReasonCodes = storageFindingReasonCodes + self.storageRecommendationOptions = storageRecommendationOptions + self.tags = tags + self.utilizationMetrics = utilizationMetrics + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + case currentDBInstanceClass = "currentDBInstanceClass" + case currentStorageConfiguration = "currentStorageConfiguration" + case effectiveRecommendationPreferences = "effectiveRecommendationPreferences" + case engine = "engine" + case engineVersion = "engineVersion" + case idle = "idle" + case instanceFinding = "instanceFinding" + case instanceFindingReasonCodes = "instanceFindingReasonCodes" + case instanceRecommendationOptions = "instanceRecommendationOptions" + case lastRefreshTimestamp = "lastRefreshTimestamp" + case lookbackPeriodInDays = "lookbackPeriodInDays" + case resourceArn = "resourceArn" + case storageFinding = "storageFinding" + case storageFindingReasonCodes = "storageFindingReasonCodes" + case storageRecommendationOptions = "storageRecommendationOptions" + case tags = "tags" + case utilizationMetrics = "utilizationMetrics" + } + } + + public struct RDSDBRecommendationFilter: AWSEncodableShape { + /// The name of the filter. Specify Finding to return recommendations with a specific finding classification. You can filter your Amazon RDS recommendations by tag:key and tag-key tags. A tag:key is a key and value combination of a tag assigned to your Amazon RDS recommendations. Use the tag key in the filter name and the tag value as the filter value. For example, to find all Amazon RDS service recommendations that have a tag with the key of Owner and the value of TeamA, specify tag:Owner for the filter name and TeamA for the filter value. A tag-key is the key of a tag assigned to your Amazon RDS recommendations. Use this filter to find all of your Amazon RDS recommendations that have a tag with a specific key. This doesn’t consider the tag value. For example, you can find your Amazon RDS service recommendations with a tag key value of Owner or without any tag keys assigned. + public let name: RDSDBRecommendationFilterName? + /// The value of the filter. + public let values: [String]? + + public init(name: RDSDBRecommendationFilterName? = nil, values: [String]? = nil) { + self.name = name + self.values = values + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case values = "values" + } + } + + public struct RDSDBStorageRecommendationOption: AWSDecodableShape { + /// The rank identifier of the RDS storage recommendation option. + public let rank: Int? + public let savingsOpportunity: SavingsOpportunity? + /// Describes the savings opportunity for Amazon RDS storage recommendations or for the recommendation option. Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve this by implementing a given Compute Optimizer recommendation. + public let savingsOpportunityAfterDiscounts: RDSStorageSavingsOpportunityAfterDiscounts? + /// The recommended storage configuration. + public let storageConfiguration: DBStorageConfiguration? + + public init(rank: Int? = nil, savingsOpportunity: SavingsOpportunity? = nil, savingsOpportunityAfterDiscounts: RDSStorageSavingsOpportunityAfterDiscounts? = nil, storageConfiguration: DBStorageConfiguration? = nil) { + self.rank = rank + self.savingsOpportunity = savingsOpportunity + self.savingsOpportunityAfterDiscounts = savingsOpportunityAfterDiscounts + self.storageConfiguration = storageConfiguration + } + + private enum CodingKeys: String, CodingKey { + case rank = "rank" + case savingsOpportunity = "savingsOpportunity" + case savingsOpportunityAfterDiscounts = "savingsOpportunityAfterDiscounts" + case storageConfiguration = "storageConfiguration" + } + } + + public struct RDSDBUtilizationMetric: AWSDecodableShape { + /// The name of the utilization metric. + public let name: RDSDBMetricName? + /// The statistic of the utilization metric. The Compute Optimizer API, Command Line Interface (CLI), and SDKs return utilization metrics using only the Maximum statistic, which is the highest value observed during the specified period. The Compute Optimizer console displays graphs for some utilization metrics using the Average statistic, which is the value of Sum / SampleCount during the specified period. For more information, see Viewing resource recommendations in the Compute Optimizer User Guide. You can also get averaged utilization metric data for your resources using Amazon CloudWatch. For more information, see the Amazon CloudWatch User Guide. + public let statistic: RDSDBMetricStatistic? + /// The value of the utilization metric. + public let value: Double? + + public init(name: RDSDBMetricName? = nil, statistic: RDSDBMetricStatistic? = nil, value: Double? = nil) { + self.name = name + self.statistic = statistic + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case statistic = "statistic" + case value = "value" + } + } + + public struct RDSDatabaseProjectedMetric: AWSDecodableShape { + /// The name of the projected metric. + public let name: RDSDBMetricName? + /// The timestamps of the projected metric. + public let timestamps: [Date]? + /// The values for the projected metric. + public let values: [Double]? + + public init(name: RDSDBMetricName? = nil, timestamps: [Date]? = nil, values: [Double]? = nil) { + self.name = name + self.timestamps = timestamps + self.values = values + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case timestamps = "timestamps" + case values = "values" + } + } + + public struct RDSDatabaseRecommendedOptionProjectedMetric: AWSDecodableShape { + /// An array of objects that describe the projected metric. + public let projectedMetrics: [RDSDatabaseProjectedMetric]? + /// The rank identifier of the RDS instance recommendation option. + public let rank: Int? + /// The recommended DB instance class for the Amazon RDS. + public let recommendedDBInstanceClass: String? + + public init(projectedMetrics: [RDSDatabaseProjectedMetric]? = nil, rank: Int? = nil, recommendedDBInstanceClass: String? = nil) { + self.projectedMetrics = projectedMetrics + self.rank = rank + self.recommendedDBInstanceClass = recommendedDBInstanceClass + } + + private enum CodingKeys: String, CodingKey { + case projectedMetrics = "projectedMetrics" + case rank = "rank" + case recommendedDBInstanceClass = "recommendedDBInstanceClass" + } + } + + public struct RDSEffectiveRecommendationPreferences: AWSDecodableShape { + /// Describes the CPU vendor and architecture for Amazon RDS recommendations. + public let cpuVendorArchitectures: [CpuVendorArchitecture]? + /// Describes the activation status of the enhanced infrastructure metrics preference. A status of Active confirms that the preference is applied in the latest recommendation refresh, and a status of Inactive confirms that it's not yet applied to recommendations. For more information, see Enhanced infrastructure metrics in the Compute Optimizer User Guide. + public let enhancedInfrastructureMetrics: EnhancedInfrastructureMetrics? + /// The number of days the utilization metrics of the Amazon RDS are analyzed. + public let lookBackPeriod: LookBackPeriodPreference? + /// Describes the savings estimation mode preference applied for calculating savings opportunity for Amazon RDS. + public let savingsEstimationMode: RDSSavingsEstimationMode? + + public init(cpuVendorArchitectures: [CpuVendorArchitecture]? = nil, enhancedInfrastructureMetrics: EnhancedInfrastructureMetrics? = nil, lookBackPeriod: LookBackPeriodPreference? = nil, savingsEstimationMode: RDSSavingsEstimationMode? = nil) { + self.cpuVendorArchitectures = cpuVendorArchitectures + self.enhancedInfrastructureMetrics = enhancedInfrastructureMetrics + self.lookBackPeriod = lookBackPeriod + self.savingsEstimationMode = savingsEstimationMode + } + + private enum CodingKeys: String, CodingKey { + case cpuVendorArchitectures = "cpuVendorArchitectures" + case enhancedInfrastructureMetrics = "enhancedInfrastructureMetrics" + case lookBackPeriod = "lookBackPeriod" + case savingsEstimationMode = "savingsEstimationMode" + } + } + + public struct RDSInstanceEstimatedMonthlySavings: AWSDecodableShape { + /// The currency of the estimated monthly savings. + public let currency: Currency? + /// The value of the estimated monthly savings for Amazon RDS instances. + public let value: Double? + + public init(currency: Currency? = nil, value: Double? = nil) { + self.currency = currency + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case currency = "currency" + case value = "value" + } + } + + public struct RDSInstanceSavingsOpportunityAfterDiscounts: AWSDecodableShape { + /// The estimated monthly savings possible by adopting Compute Optimizer’s Amazon RDS instance recommendations. This includes any applicable Savings Plans discounts. + public let estimatedMonthlySavings: RDSInstanceEstimatedMonthlySavings? + /// The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s Amazon RDS instance recommendations. This includes any applicable Savings Plans discounts. + public let savingsOpportunityPercentage: Double? + + public init(estimatedMonthlySavings: RDSInstanceEstimatedMonthlySavings? = nil, savingsOpportunityPercentage: Double? = nil) { + self.estimatedMonthlySavings = estimatedMonthlySavings + self.savingsOpportunityPercentage = savingsOpportunityPercentage + } + + private enum CodingKeys: String, CodingKey { + case estimatedMonthlySavings = "estimatedMonthlySavings" + case savingsOpportunityPercentage = "savingsOpportunityPercentage" + } + } + + public struct RDSSavingsEstimationMode: AWSDecodableShape { + /// Describes the source for calculating the savings opportunity for Amazon RDS. + public let source: RDSSavingsEstimationModeSource? + + public init(source: RDSSavingsEstimationModeSource? = nil) { + self.source = source + } + + private enum CodingKeys: String, CodingKey { + case source = "source" + } + } + + public struct RDSStorageEstimatedMonthlySavings: AWSDecodableShape { + /// The currency of the estimated monthly savings. + public let currency: Currency? + /// The value of the estimated monthly savings for Amazon RDS storage. + public let value: Double? + + public init(currency: Currency? = nil, value: Double? = nil) { + self.currency = currency + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case currency = "currency" + case value = "value" + } + } + + public struct RDSStorageSavingsOpportunityAfterDiscounts: AWSDecodableShape { + /// The estimated monthly savings possible by adopting Compute Optimizer’s Amazon RDS storage recommendations. This includes any applicable Savings Plans discounts. + public let estimatedMonthlySavings: RDSStorageEstimatedMonthlySavings? + /// The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s Amazon RDS storage recommendations. This includes any applicable Savings Plans discounts. + public let savingsOpportunityPercentage: Double? + + public init(estimatedMonthlySavings: RDSStorageEstimatedMonthlySavings? = nil, savingsOpportunityPercentage: Double? = nil) { + self.estimatedMonthlySavings = estimatedMonthlySavings + self.savingsOpportunityPercentage = savingsOpportunityPercentage + } + + private enum CodingKeys: String, CodingKey { + case estimatedMonthlySavings = "estimatedMonthlySavings" + case savingsOpportunityPercentage = "savingsOpportunityPercentage" + } + } + public struct ReasonCodeSummary: AWSDecodableShape { /// The name of the finding reason code. public let name: FindingReasonCode? @@ -3347,7 +3998,7 @@ extension ComputeOptimizer { } public struct RecommendationPreferences: AWSEncodableShape { - /// Specifies the CPU vendor and architecture for Amazon EC2 instance and Auto Scaling group recommendations. For example, when you specify AWS_ARM64 with: A GetEC2InstanceRecommendations or GetAutoScalingGroupRecommendations request, Compute Optimizer returns recommendations that consist of Graviton2 instance types only. A GetEC2RecommendationProjectedMetrics request, Compute Optimizer returns projected utilization metrics for Graviton2 instance type recommendations only. A ExportEC2InstanceRecommendations or ExportAutoScalingGroupRecommendations request, Compute Optimizer exports recommendations that consist of Graviton2 instance types only. + /// Specifies the CPU vendor and architecture for Amazon EC2 instance and Auto Scaling group recommendations. For example, when you specify AWS_ARM64 with: A GetEC2InstanceRecommendations or GetAutoScalingGroupRecommendations request, Compute Optimizer returns recommendations that consist of Graviton instance types only. A GetEC2RecommendationProjectedMetrics request, Compute Optimizer returns projected utilization metrics for Graviton instance type recommendations only. A ExportEC2InstanceRecommendations or ExportAutoScalingGroupRecommendations request, Compute Optimizer exports recommendations that consist of Graviton instance types only. public let cpuVendorArchitectures: [CpuVendorArchitecture]? public init(cpuVendorArchitectures: [CpuVendorArchitecture]? = nil) { diff --git a/Sources/Soto/Services/Connect/Connect_api.swift b/Sources/Soto/Services/Connect/Connect_api.swift index 6690f7182e..1b222b5813 100644 --- a/Sources/Soto/Services/Connect/Connect_api.swift +++ b/Sources/Soto/Services/Connect/Connect_api.swift @@ -342,7 +342,7 @@ public struct Connect: AWSService { ) } - /// Claims an available phone number to your Amazon Connect instance or traffic distribution group. You can call this API only in the same Amazon Web Services Region where the Amazon Connect instance or traffic distribution group was created. For more information about how to use this operation, see Claim a phone number in your country and Claim phone numbers to traffic distribution groups in the Amazon Connect Administrator Guide. You can call the SearchAvailablePhoneNumbers API for available phone numbers that you can claim. Call the DescribePhoneNumber API to verify the status of a previous ClaimPhoneNumber operation. If you plan to claim and release numbers frequently during a 30 day period, contact us for a service quota exception. Otherwise, it is possible you will be blocked from claiming and releasing any more numbers until 30 days past the oldest number released has expired. By default you can claim and release up to 200% of your maximum number of active phone numbers during any 30 day period. If you claim and release phone numbers using the UI or API during a rolling 30 day cycle that exceeds 200% of your phone number service level quota, you will be blocked from claiming any more numbers until 30 days past the oldest number released has expired. For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 30 day period you release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers until you open an Amazon Web Services support ticket. + /// Claims an available phone number to your Amazon Connect instance or traffic distribution group. You can call this API only in the same Amazon Web Services Region where the Amazon Connect instance or traffic distribution group was created. For more information about how to use this operation, see Claim a phone number in your country and Claim phone numbers to traffic distribution groups in the Amazon Connect Administrator Guide. You can call the SearchAvailablePhoneNumbers API for available phone numbers that you can claim. Call the DescribePhoneNumber API to verify the status of a previous ClaimPhoneNumber operation. If you plan to claim and release numbers frequently, contact us for a service quota exception. Otherwise, it is possible you will be blocked from claiming and releasing any more numbers until up to 180 days past the oldest number released has expired. By default you can claim and release up to 200% of your maximum number of active phone numbers. If you claim and release phone numbers using the UI or API during a rolling 180 day cycle that exceeds 200% of your phone number service level quota, you will be blocked from claiming any more numbers until 180 days past the oldest number released has expired. For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 180 day period you release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers until you open an Amazon Web Services support ticket. @Sendable public func claimPhoneNumber(_ input: ClaimPhoneNumberRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ClaimPhoneNumberResponse { return try await self.client.execute( @@ -500,7 +500,7 @@ public struct Connect: AWSService { ) } - /// Creates a prompt. For more information about prompts, such as supported file types and maximum length, see Create prompts in the Amazon Connect Administrator's Guide. + /// Creates a prompt. For more information about prompts, such as supported file types and maximum length, see Create prompts in the Amazon Connect Administrator Guide. @Sendable public func createPrompt(_ input: CreatePromptRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePromptResponse { return try await self.client.execute( @@ -1037,7 +1037,7 @@ public struct Connect: AWSService { ) } - /// Describes the specified flow. You can also create and update flows using the Amazon Connect Flow language. + /// Describes the specified flow. You can also create and update flows using the Amazon Connect Flow language. Use the $SAVED alias in the request to describe the SAVED content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. Once a contact flow is published, $SAVED needs to be supplied to view saved content that has not been published. In the response, Status indicates the flow status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content. SAVED does not initiate validation of the content. SAVED | PUBLISHED @Sendable public func describeContactFlow(_ input: DescribeContactFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeContactFlowResponse { return try await self.client.execute( @@ -1050,7 +1050,7 @@ public struct Connect: AWSService { ) } - /// Describes the specified flow module. + /// Describes the specified flow module. Use the $SAVED alias in the request to describe the SAVED content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. Once a contact flow is published, $SAVED needs to be supplied to view saved content that has not been published. @Sendable public func describeContactFlowModule(_ input: DescribeContactFlowModuleRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeContactFlowModuleResponse { return try await self.client.execute( @@ -1583,7 +1583,7 @@ public struct Connect: AWSService { ) } - /// Gets metric data from the specified Amazon Connect instance. GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals. For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator's Guide. + /// Gets metric data from the specified Amazon Connect instance. GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals. For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator Guide. @Sendable public func getMetricDataV2(_ input: GetMetricDataV2Request, logger: Logger = AWSClient.loggingDisabled) async throws -> GetMetricDataV2Response { return try await self.client.execute( @@ -2259,7 +2259,7 @@ public struct Connect: AWSService { ) } - /// Releases a phone number previously claimed to an Amazon Connect instance or traffic distribution group. You can call this API only in the Amazon Web Services Region where the number was claimed. To release phone numbers from a traffic distribution group, use the ReleasePhoneNumber API, not the Amazon Connect admin website. After releasing a phone number, the phone number enters into a cooldown period of 30 days. It cannot be searched for or claimed again until the period has ended. If you accidentally release a phone number, contact Amazon Web Services Support. If you plan to claim and release numbers frequently during a 30 day period, contact us for a service quota exception. Otherwise, it is possible you will be blocked from claiming and releasing any more numbers until 30 days past the oldest number released has expired. By default you can claim and release up to 200% of your maximum number of active phone numbers during any 30 day period. If you claim and release phone numbers using the UI or API during a rolling 30 day cycle that exceeds 200% of your phone number service level quota, you will be blocked from claiming any more numbers until 30 days past the oldest number released has expired. For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 30 day period you release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers until you open an Amazon Web Services support ticket. + /// Releases a phone number previously claimed to an Amazon Connect instance or traffic distribution group. You can call this API only in the Amazon Web Services Region where the number was claimed. To release phone numbers from a traffic distribution group, use the ReleasePhoneNumber API, not the Amazon Connect admin website. After releasing a phone number, the phone number enters into a cooldown period for up to 180 days. It cannot be searched for or claimed again until the period has ended. If you accidentally release a phone number, contact Amazon Web Services Support. If you plan to claim and release numbers frequently, contact us for a service quota exception. Otherwise, it is possible you will be blocked from claiming and releasing any more numbers until up to 180 days past the oldest number released has expired. By default you can claim and release up to 200% of your maximum number of active phone numbers. If you claim and release phone numbers using the UI or API during a rolling 180 day cycle that exceeds 200% of your phone number service level quota, you will be blocked from claiming any more numbers until 180 days past the oldest number released has expired. For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 180 day period you release 99, claim 99, and then release 99, you will have exceeded the 200% limit. At that point you are blocked from claiming any more numbers until you open an Amazon Web Services support ticket. @Sendable public func releasePhoneNumber(_ input: ReleasePhoneNumberRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -2324,6 +2324,32 @@ public struct Connect: AWSService { ) } + /// Searches the flow modules in an Amazon Connect instance, with optional filtering. + @Sendable + public func searchContactFlowModules(_ input: SearchContactFlowModulesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchContactFlowModulesResponse { + return try await self.client.execute( + operation: "SearchContactFlowModules", + path: "/search-contact-flow-modules", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Searches the contact flows in an Amazon Connect instance, with optional filtering. + @Sendable + public func searchContactFlows(_ input: SearchContactFlowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchContactFlowsResponse { + return try await self.client.execute( + operation: "SearchContactFlows", + path: "/search-contact-flows", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Searches contacts in an Amazon Connect instance. @Sendable public func searchContacts(_ input: SearchContactsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchContactsResponse { @@ -2584,7 +2610,7 @@ public struct Connect: AWSService { ) } - /// Ends the specified contact. Use this API to stop queued callbacks. It does not work for voice contacts that use the following initiation methods: DISCONNECT TRANSFER QUEUE_TRANSFER Chat and task contacts can be terminated in any state, regardless of initiation method. + /// Ends the specified contact. Use this API to stop queued callbacks. It does not work for voice contacts that use the following initiation methods: DISCONNECT TRANSFER QUEUE_TRANSFER EXTERNAL_OUTBOUND MONITOR Chat and task contacts can be terminated in any state, regardless of initiation method. @Sendable public func stopContact(_ input: StopContactRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopContactResponse { return try await self.client.execute( @@ -2766,7 +2792,7 @@ public struct Connect: AWSService { ) } - /// Updates the specified flow. You can also create and update flows using the Amazon Connect Flow language. + /// Updates the specified flow. You can also create and update flows using the Amazon Connect Flow language. Use the $SAVED alias in the request to describe the SAVED content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. Once a contact flow is published, $SAVED needs to be supplied to view saved content that has not been published. @Sendable public func updateContactFlowContent(_ input: UpdateContactFlowContentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateContactFlowContentResponse { return try await self.client.execute( @@ -2792,7 +2818,7 @@ public struct Connect: AWSService { ) } - /// Updates specified flow module for the specified Amazon Connect instance. + /// Updates specified flow module for the specified Amazon Connect instance. Use the $SAVED alias in the request to describe the SAVED content of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. Once a contact flow is published, $SAVED needs to be supplied to view saved content that has not been published. @Sendable public func updateContactFlowModuleContent(_ input: UpdateContactFlowModuleContentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateContactFlowModuleContentResponse { return try await self.client.execute( @@ -2831,7 +2857,7 @@ public struct Connect: AWSService { ) } - /// This API is in preview release for Amazon Connect and is subject to change. Updates routing priority and age on the contact (QueuePriority and QueueTimeAdjustmentInSeconds). These properties can be used to change a customer's position in the queue. For example, you can move a contact to the back of the queue by setting a lower routing priority relative to other contacts in queue; or you can move a contact to the front of the queue by increasing the routing age which will make the contact look artificially older and therefore higher up in the first-in-first-out routing order. Note that adjusting the routing age of a contact affects only its position in queue, and not its actual queue wait time as reported through metrics. These properties can also be updated by using the Set routing priority / age flow block. + /// Updates routing priority and age on the contact (QueuePriority and QueueTimeAdjustmentInSeconds). These properties can be used to change a customer's position in the queue. For example, you can move a contact to the back of the queue by setting a lower routing priority relative to other contacts in queue; or you can move a contact to the front of the queue by increasing the routing age which will make the contact look artificially older and therefore higher up in the first-in-first-out routing order. Note that adjusting the routing age of a contact affects only its position in queue, and not its actual queue wait time as reported through metrics. These properties can also be updated by using the Set routing priority / age flow block. Either QueuePriority or QueueTimeAdjustmentInSeconds should be provided within the request body, but not both. @Sendable public func updateContactRoutingData(_ input: UpdateContactRoutingDataRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateContactRoutingDataResponse { return try await self.client.execute( @@ -3385,7 +3411,7 @@ extension Connect { ) } - /// Gets metric data from the specified Amazon Connect instance. GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals. For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator's Guide. + /// Gets metric data from the specified Amazon Connect instance. GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals. For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator Guide. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -4221,6 +4247,44 @@ extension Connect { ) } + /// Searches the flow modules in an Amazon Connect instance, with optional filtering. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func searchContactFlowModulesPaginator( + _ input: SearchContactFlowModulesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchContactFlowModules, + inputKey: \SearchContactFlowModulesRequest.nextToken, + outputKey: \SearchContactFlowModulesResponse.nextToken, + logger: logger + ) + } + + /// Searches the contact flows in an Amazon Connect instance, with optional filtering. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func searchContactFlowsPaginator( + _ input: SearchContactFlowsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchContactFlows, + inputKey: \SearchContactFlowsRequest.nextToken, + outputKey: \SearchContactFlowsResponse.nextToken, + logger: logger + ) + } + /// Searches contacts in an Amazon Connect instance. /// Return PaginatorSequence for operation. /// @@ -4954,6 +5018,30 @@ extension Connect.SearchAvailablePhoneNumbersRequest: AWSPaginateToken { } } +extension Connect.SearchContactFlowModulesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Connect.SearchContactFlowModulesRequest { + return .init( + instanceId: self.instanceId, + maxResults: self.maxResults, + nextToken: token, + searchCriteria: self.searchCriteria, + searchFilter: self.searchFilter + ) + } +} + +extension Connect.SearchContactFlowsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Connect.SearchContactFlowsRequest { + return .init( + instanceId: self.instanceId, + maxResults: self.maxResults, + nextToken: token, + searchCriteria: self.searchCriteria, + searchFilter: self.searchFilter + ) + } +} + extension Connect.SearchContactsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Connect.SearchContactsRequest { return .init( diff --git a/Sources/Soto/Services/Connect/Connect_shapes.swift b/Sources/Soto/Services/Connect/Connect_shapes.swift index 5032ad0f23..d1d62b1c9b 100644 --- a/Sources/Soto/Services/Connect/Connect_shapes.swift +++ b/Sources/Soto/Services/Connect/Connect_shapes.swift @@ -57,6 +57,24 @@ extension Connect { public var description: String { return self.rawValue } } + public enum AnsweringMachineDetectionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case amdError = "AMD_ERROR" + case amdNotApplicable = "AMD_NOT_APPLICABLE" + case amdUnanswered = "AMD_UNANSWERED" + case amdUnresolved = "AMD_UNRESOLVED" + case answered = "ANSWERED" + case error = "ERROR" + case faxMachineDetected = "FAX_MACHINE_DETECTED" + case humanAnswered = "HUMAN_ANSWERED" + case sitToneBusy = "SIT_TONE_BUSY" + case sitToneDetected = "SIT_TONE_DETECTED" + case sitToneInvalidNumber = "SIT_TONE_INVALID_NUMBER" + case undetected = "UNDETECTED" + case voicemailBeep = "VOICEMAIL_BEEP" + case voicemailNoBeep = "VOICEMAIL_NO_BEEP" + public var description: String { return self.rawValue } + } + public enum ArtifactStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case approved = "APPROVED" case inProgress = "IN_PROGRESS" @@ -107,6 +125,12 @@ extension Connect { public var description: String { return self.rawValue } } + public enum ContactFlowStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case published = "PUBLISHED" + case saved = "SAVED" + public var description: String { return self.rawValue } + } + public enum ContactFlowType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case agentHold = "AGENT_HOLD" case agentTransfer = "AGENT_TRANSFER" @@ -787,6 +811,14 @@ extension Connect { public var description: String { return self.rawValue } } + public enum RoutingCriteriaStepStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case expired = "EXPIRED" + case inactive = "INACTIVE" + case joined = "JOINED" + public var description: String { return self.rawValue } + } + public enum RulePublishStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case draft = "DRAFT" case published = "PUBLISHED" @@ -1436,6 +1468,19 @@ extension Connect { } } + public struct AgentHierarchyGroup: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the group. + public let arn: String? + + public init(arn: String? = nil) { + self.arn = arn + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + } + } + public struct AgentHierarchyGroups: AWSEncodableShape { /// The identifiers for level 1 hierarchy groups. public let l1Ids: [String]? @@ -1476,24 +1521,48 @@ extension Connect { public struct AgentInfo: AWSDecodableShape { /// Agent pause duration for a contact in seconds. public let agentPauseDurationInSeconds: Int? + public let capabilities: ParticipantCapabilities? /// The timestamp when the contact was connected to the agent. public let connectedToAgentTimestamp: Date? + /// Information regarding Agent’s device. + public let deviceInfo: DeviceInfo? + /// The agent hierarchy groups for the agent. + public let hierarchyGroups: HierarchyGroups? /// The identifier of the agent who accepted the contact. public let id: String? - public init(agentPauseDurationInSeconds: Int? = nil, connectedToAgentTimestamp: Date? = nil, id: String? = nil) { + public init(agentPauseDurationInSeconds: Int? = nil, capabilities: ParticipantCapabilities? = nil, connectedToAgentTimestamp: Date? = nil, deviceInfo: DeviceInfo? = nil, hierarchyGroups: HierarchyGroups? = nil, id: String? = nil) { self.agentPauseDurationInSeconds = agentPauseDurationInSeconds + self.capabilities = capabilities self.connectedToAgentTimestamp = connectedToAgentTimestamp + self.deviceInfo = deviceInfo + self.hierarchyGroups = hierarchyGroups self.id = id } private enum CodingKeys: String, CodingKey { case agentPauseDurationInSeconds = "AgentPauseDurationInSeconds" + case capabilities = "Capabilities" case connectedToAgentTimestamp = "ConnectedToAgentTimestamp" + case deviceInfo = "DeviceInfo" + case hierarchyGroups = "HierarchyGroups" case id = "Id" } } + public struct AgentQualityMetrics: AWSDecodableShape { + /// Information about the audio quality of the Agent + public let audio: AudioQualityMetricsInfo? + + public init(audio: AudioQualityMetricsInfo? = nil) { + self.audio = audio + } + + private enum CodingKeys: String, CodingKey { + case audio = "Audio" + } + } + public struct AgentStatus: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the agent status. public let agentStatusARN: String? @@ -2362,6 +2431,31 @@ extension Connect { } } + public struct AttributeCondition: AWSDecodableShape { + /// The operator of the condition. + public let comparisonOperator: String? + /// The name of predefined attribute. + public let name: String? + /// The proficiency level of the condition. + public let proficiencyLevel: Float? + /// The value of predefined attribute. + public let value: String? + + public init(comparisonOperator: String? = nil, name: String? = nil, proficiencyLevel: Float? = nil, value: String? = nil) { + self.comparisonOperator = comparisonOperator + self.name = name + self.proficiencyLevel = proficiencyLevel + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case comparisonOperator = "ComparisonOperator" + case name = "Name" + case proficiencyLevel = "ProficiencyLevel" + case value = "Value" + } + } + public struct AudioFeatures: AWSDecodableShape { /// Makes echo reduction available to clients who connect to the meeting. public let echoReduction: MeetingFeatureStatus? @@ -2375,6 +2469,23 @@ extension Connect { } } + public struct AudioQualityMetricsInfo: AWSDecodableShape { + /// List of potential issues causing degradation of quality on a media connection. If the service did not detect any potential quality issues the list is empty. Valid values: HighPacketLoss | HighRoundTripTime | HighJitterBuffer + public let potentialQualityIssues: [String]? + /// Number measuring the estimated quality of the media connection. + public let qualityScore: Float? + + public init(potentialQualityIssues: [String]? = nil, qualityScore: Float? = nil) { + self.potentialQualityIssues = potentialQualityIssues + self.qualityScore = qualityScore + } + + private enum CodingKeys: String, CodingKey { + case potentialQualityIssues = "PotentialQualityIssues" + case qualityScore = "QualityScore" + } + } + public struct AvailableNumberSummary: AWSDecodableShape { /// The phone number. Phone numbers are formatted [+] [country code] [subscriber number including area code]. public let phoneNumber: String? @@ -2662,7 +2773,7 @@ extension Connect { } } - public struct Campaign: AWSEncodableShape { + public struct Campaign: AWSEncodableShape & AWSDecodableShape { /// A unique identifier for a campaign. public let campaignId: String? @@ -2947,12 +3058,23 @@ extension Connect { public struct Contact: AWSDecodableShape { /// Information about the agent who accepted the contact. public let agentInfo: AgentInfo? + /// Indicates how an outbound campaign call is actually disposed if the contact is connected to Amazon Connect. + public let answeringMachineDetectionStatus: AnsweringMachineDetectionStatus? /// The Amazon Resource Name (ARN) for the contact. public let arn: String? + public let campaign: Campaign? /// How the contact reached your contact center. public let channel: Channel? + /// The timestamp when customer endpoint connected to Amazon Connect. + public let connectedToSystemTimestamp: Date? + /// Information about the Customer on the contact. + public let customer: Customer? + /// Information about customer’s voice activity. + public let customerVoiceActivity: CustomerVoiceActivity? /// The description of the contact. public let description: String? + /// Information about the call disconnect experience. + public let disconnectDetails: DisconnectDetails? /// The timestamp when the customer endpoint disconnected from Amazon Connect. public let disconnectTimestamp: Date? /// The identifier for the contact. @@ -2973,6 +3095,8 @@ extension Connect { public let name: String? /// If this contact is not the first contact, this is the ID of the previous contact. public let previousContactId: String? + /// Information about the quality of the participant's media connection. + public let qualityMetrics: QualityMetrics? /// If this contact was queued, this contains information about the queue. public let queueInfo: QueueInfo? /// An integer that represents the queue priority to be applied to the contact (lower priorities are routed preferentially). Cannot be specified if the QueueTimeAdjustmentSeconds is specified. Must be statically defined, must be larger than zero, and a valid integer value. Default Value is 5. @@ -2981,8 +3105,12 @@ extension Connect { public let queueTimeAdjustmentSeconds: Int? /// The contactId that is related to this contact. public let relatedContactId: String? + /// Latest routing criteria on the contact. + public let routingCriteria: RoutingCriteria? /// The timestamp, in Unix epoch time format, at which to start running the inbound flow. public let scheduledTimestamp: Date? + /// A set of system defined key-value pairs stored on individual contact segments using an attribute map. The attributes are standard Amazon Connect attributes and can be accessed in flows. Attribute keys can include only alphanumeric, -, and _ characters. This field can be used to show channel subtype. For example, connect:Guide or connect:SMS. + public let segmentAttributes: [String: SegmentAttributeValue]? /// Tags associated with the contact. This contains both Amazon Web Services generated and user-defined tags. public let tags: [String: String]? /// Total pause count for a contact. @@ -2992,11 +3120,17 @@ extension Connect { /// Information about Amazon Connect Wisdom. public let wisdomInfo: WisdomInfo? - public init(agentInfo: AgentInfo? = nil, arn: String? = nil, channel: Channel? = nil, description: String? = nil, disconnectTimestamp: Date? = nil, id: String? = nil, initialContactId: String? = nil, initiationMethod: ContactInitiationMethod? = nil, initiationTimestamp: Date? = nil, lastPausedTimestamp: Date? = nil, lastResumedTimestamp: Date? = nil, lastUpdateTimestamp: Date? = nil, name: String? = nil, previousContactId: String? = nil, queueInfo: QueueInfo? = nil, queuePriority: Int64? = nil, queueTimeAdjustmentSeconds: Int? = nil, relatedContactId: String? = nil, scheduledTimestamp: Date? = nil, tags: [String: String]? = nil, totalPauseCount: Int? = nil, totalPauseDurationInSeconds: Int? = nil, wisdomInfo: WisdomInfo? = nil) { + public init(agentInfo: AgentInfo? = nil, answeringMachineDetectionStatus: AnsweringMachineDetectionStatus? = nil, arn: String? = nil, campaign: Campaign? = nil, channel: Channel? = nil, connectedToSystemTimestamp: Date? = nil, customer: Customer? = nil, customerVoiceActivity: CustomerVoiceActivity? = nil, description: String? = nil, disconnectDetails: DisconnectDetails? = nil, disconnectTimestamp: Date? = nil, id: String? = nil, initialContactId: String? = nil, initiationMethod: ContactInitiationMethod? = nil, initiationTimestamp: Date? = nil, lastPausedTimestamp: Date? = nil, lastResumedTimestamp: Date? = nil, lastUpdateTimestamp: Date? = nil, name: String? = nil, previousContactId: String? = nil, qualityMetrics: QualityMetrics? = nil, queueInfo: QueueInfo? = nil, queuePriority: Int64? = nil, queueTimeAdjustmentSeconds: Int? = nil, relatedContactId: String? = nil, routingCriteria: RoutingCriteria? = nil, scheduledTimestamp: Date? = nil, segmentAttributes: [String: SegmentAttributeValue]? = nil, tags: [String: String]? = nil, totalPauseCount: Int? = nil, totalPauseDurationInSeconds: Int? = nil, wisdomInfo: WisdomInfo? = nil) { self.agentInfo = agentInfo + self.answeringMachineDetectionStatus = answeringMachineDetectionStatus self.arn = arn + self.campaign = campaign self.channel = channel + self.connectedToSystemTimestamp = connectedToSystemTimestamp + self.customer = customer + self.customerVoiceActivity = customerVoiceActivity self.description = description + self.disconnectDetails = disconnectDetails self.disconnectTimestamp = disconnectTimestamp self.id = id self.initialContactId = initialContactId @@ -3007,11 +3141,14 @@ extension Connect { self.lastUpdateTimestamp = lastUpdateTimestamp self.name = name self.previousContactId = previousContactId + self.qualityMetrics = qualityMetrics self.queueInfo = queueInfo self.queuePriority = queuePriority self.queueTimeAdjustmentSeconds = queueTimeAdjustmentSeconds self.relatedContactId = relatedContactId + self.routingCriteria = routingCriteria self.scheduledTimestamp = scheduledTimestamp + self.segmentAttributes = segmentAttributes self.tags = tags self.totalPauseCount = totalPauseCount self.totalPauseDurationInSeconds = totalPauseDurationInSeconds @@ -3020,9 +3157,15 @@ extension Connect { private enum CodingKeys: String, CodingKey { case agentInfo = "AgentInfo" + case answeringMachineDetectionStatus = "AnsweringMachineDetectionStatus" case arn = "Arn" + case campaign = "Campaign" case channel = "Channel" + case connectedToSystemTimestamp = "ConnectedToSystemTimestamp" + case customer = "Customer" + case customerVoiceActivity = "CustomerVoiceActivity" case description = "Description" + case disconnectDetails = "DisconnectDetails" case disconnectTimestamp = "DisconnectTimestamp" case id = "Id" case initialContactId = "InitialContactId" @@ -3033,11 +3176,14 @@ extension Connect { case lastUpdateTimestamp = "LastUpdateTimestamp" case name = "Name" case previousContactId = "PreviousContactId" + case qualityMetrics = "QualityMetrics" case queueInfo = "QueueInfo" case queuePriority = "QueuePriority" case queueTimeAdjustmentSeconds = "QueueTimeAdjustmentSeconds" case relatedContactId = "RelatedContactId" + case routingCriteria = "RoutingCriteria" case scheduledTimestamp = "ScheduledTimestamp" + case segmentAttributes = "SegmentAttributes" case tags = "Tags" case totalPauseCount = "TotalPauseCount" case totalPauseDurationInSeconds = "TotalPauseDurationInSeconds" @@ -3137,18 +3283,21 @@ extension Connect { public let name: String? /// The type of flow. public let state: ContactFlowState? + /// The status of the contact flow. + public let status: ContactFlowStatus? /// The tags used to organize, track, or control access for this resource. For example, { "Tags": {"key1":"value1", "key2":"value2"} }. public let tags: [String: String]? /// The type of the flow. For descriptions of the available types, see Choose a flow type in the Amazon Connect Administrator Guide. public let type: ContactFlowType? - public init(arn: String? = nil, content: String? = nil, description: String? = nil, id: String? = nil, name: String? = nil, state: ContactFlowState? = nil, tags: [String: String]? = nil, type: ContactFlowType? = nil) { + public init(arn: String? = nil, content: String? = nil, description: String? = nil, id: String? = nil, name: String? = nil, state: ContactFlowState? = nil, status: ContactFlowStatus? = nil, tags: [String: String]? = nil, type: ContactFlowType? = nil) { self.arn = arn self.content = content self.description = description self.id = id self.name = name self.state = state + self.status = status self.tags = tags self.type = type } @@ -3160,6 +3309,7 @@ extension Connect { case id = "Id" case name = "Name" case state = "State" + case status = "Status" case tags = "Tags" case type = "Type" } @@ -3206,6 +3356,38 @@ extension Connect { } } + public struct ContactFlowModuleSearchCriteria: AWSEncodableShape { + /// A list of conditions which would be applied together with an AND condition. + public let andConditions: [ContactFlowModuleSearchCriteria]? + /// A list of conditions which would be applied together with an OR condition. + public let orConditions: [ContactFlowModuleSearchCriteria]? + public let stringCondition: StringCondition? + + public init(andConditions: [ContactFlowModuleSearchCriteria]? = nil, orConditions: [ContactFlowModuleSearchCriteria]? = nil, stringCondition: StringCondition? = nil) { + self.andConditions = andConditions + self.orConditions = orConditions + self.stringCondition = stringCondition + } + + private enum CodingKeys: String, CodingKey { + case andConditions = "AndConditions" + case orConditions = "OrConditions" + case stringCondition = "StringCondition" + } + } + + public struct ContactFlowModuleSearchFilter: AWSEncodableShape { + public let tagFilter: ControlPlaneTagFilter? + + public init(tagFilter: ControlPlaneTagFilter? = nil) { + self.tagFilter = tagFilter + } + + private enum CodingKeys: String, CodingKey { + case tagFilter = "TagFilter" + } + } + public struct ContactFlowModuleSummary: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the flow module. public let arn: String? @@ -3231,11 +3413,57 @@ extension Connect { } } + public struct ContactFlowSearchCriteria: AWSEncodableShape { + /// A list of conditions which would be applied together with an AND condition. + public let andConditions: [ContactFlowSearchCriteria]? + /// A list of conditions which would be applied together with an OR condition. + public let orConditions: [ContactFlowSearchCriteria]? + /// The state of the flow. + public let stateCondition: ContactFlowState? + /// The status of the flow. + public let statusCondition: ContactFlowStatus? + public let stringCondition: StringCondition? + /// The type of flow. + public let typeCondition: ContactFlowType? + + public init(andConditions: [ContactFlowSearchCriteria]? = nil, orConditions: [ContactFlowSearchCriteria]? = nil, stateCondition: ContactFlowState? = nil, statusCondition: ContactFlowStatus? = nil, stringCondition: StringCondition? = nil, typeCondition: ContactFlowType? = nil) { + self.andConditions = andConditions + self.orConditions = orConditions + self.stateCondition = stateCondition + self.statusCondition = statusCondition + self.stringCondition = stringCondition + self.typeCondition = typeCondition + } + + private enum CodingKeys: String, CodingKey { + case andConditions = "AndConditions" + case orConditions = "OrConditions" + case stateCondition = "StateCondition" + case statusCondition = "StatusCondition" + case stringCondition = "StringCondition" + case typeCondition = "TypeCondition" + } + } + + public struct ContactFlowSearchFilter: AWSEncodableShape { + public let tagFilter: ControlPlaneTagFilter? + + public init(tagFilter: ControlPlaneTagFilter? = nil) { + self.tagFilter = tagFilter + } + + private enum CodingKeys: String, CodingKey { + case tagFilter = "TagFilter" + } + } + public struct ContactFlowSummary: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the flow. public let arn: String? /// The type of flow. public let contactFlowState: ContactFlowState? + /// The status of the contact flow. + public let contactFlowStatus: ContactFlowStatus? /// The type of flow. public let contactFlowType: ContactFlowType? /// The identifier of the flow. @@ -3243,9 +3471,10 @@ extension Connect { /// The name of the flow. public let name: String? - public init(arn: String? = nil, contactFlowState: ContactFlowState? = nil, contactFlowType: ContactFlowType? = nil, id: String? = nil, name: String? = nil) { + public init(arn: String? = nil, contactFlowState: ContactFlowState? = nil, contactFlowStatus: ContactFlowStatus? = nil, contactFlowType: ContactFlowType? = nil, id: String? = nil, name: String? = nil) { self.arn = arn self.contactFlowState = contactFlowState + self.contactFlowStatus = contactFlowStatus self.contactFlowType = contactFlowType self.id = id self.name = name @@ -3254,6 +3483,7 @@ extension Connect { private enum CodingKeys: String, CodingKey { case arn = "Arn" case contactFlowState = "ContactFlowState" + case contactFlowStatus = "ContactFlowStatus" case contactFlowType = "ContactFlowType" case id = "Id" case name = "Name" @@ -3585,16 +3815,19 @@ extension Connect { public let instanceId: String /// The name of the flow. public let name: String + /// Indicates the flow status as either SAVED or PUBLISHED. The PUBLISHED status will initiate validation on the content. the SAVED status does not initiate validation of the content. SAVED | PUBLISHED. + public let status: ContactFlowStatus? /// The tags used to organize, track, or control access for this resource. For example, { "Tags": {"key1":"value1", "key2":"value2"} }. public let tags: [String: String]? /// The type of the flow. For descriptions of the available types, see Choose a flow type in the Amazon Connect Administrator Guide. public let type: ContactFlowType - public init(content: String, description: String? = nil, instanceId: String, name: String, tags: [String: String]? = nil, type: ContactFlowType) { + public init(content: String, description: String? = nil, instanceId: String, name: String, status: ContactFlowStatus? = nil, tags: [String: String]? = nil, type: ContactFlowType) { self.content = content self.description = description self.instanceId = instanceId self.name = name + self.status = status self.tags = tags self.type = type } @@ -3606,6 +3839,7 @@ extension Connect { try container.encodeIfPresent(self.description, forKey: .description) request.encodePath(self.instanceId, key: "InstanceId") try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.status, forKey: .status) try container.encodeIfPresent(self.tags, forKey: .tags) try container.encode(self.type, forKey: .type) } @@ -3628,6 +3862,7 @@ extension Connect { case content = "Content" case description = "Description" case name = "Name" + case status = "Status" case tags = "Tags" case type = "Type" } @@ -5363,6 +5598,52 @@ extension Connect { } } + public struct Customer: AWSDecodableShape { + public let capabilities: ParticipantCapabilities? + /// Information regarding Customer’s device. + public let deviceInfo: DeviceInfo? + + public init(capabilities: ParticipantCapabilities? = nil, deviceInfo: DeviceInfo? = nil) { + self.capabilities = capabilities + self.deviceInfo = deviceInfo + } + + private enum CodingKeys: String, CodingKey { + case capabilities = "Capabilities" + case deviceInfo = "DeviceInfo" + } + } + + public struct CustomerQualityMetrics: AWSDecodableShape { + /// Information about the audio quality of the Customer + public let audio: AudioQualityMetricsInfo? + + public init(audio: AudioQualityMetricsInfo? = nil) { + self.audio = audio + } + + private enum CodingKeys: String, CodingKey { + case audio = "Audio" + } + } + + public struct CustomerVoiceActivity: AWSDecodableShape { + /// Timestamp that measures the end of the customer greeting from an outbound voice call. + public let greetingEndTimestamp: Date? + /// Timestamp that measures the beginning of the customer greeting from an outbound voice call. + public let greetingStartTimestamp: Date? + + public init(greetingEndTimestamp: Date? = nil, greetingStartTimestamp: Date? = nil) { + self.greetingEndTimestamp = greetingEndTimestamp + self.greetingStartTimestamp = greetingStartTimestamp + } + + private enum CodingKeys: String, CodingKey { + case greetingEndTimestamp = "GreetingEndTimestamp" + case greetingStartTimestamp = "GreetingStartTimestamp" + } + } + public struct DateReference: AWSDecodableShape { /// Identifier of the date reference. public let name: String? @@ -7104,6 +7385,27 @@ extension Connect { } } + public struct DeviceInfo: AWSDecodableShape { + /// Operating system that the participant used for the call. + public let operatingSystem: String? + /// Name of the platform that the participant used for the call. + public let platformName: String? + /// Version of the platform that the participant used for the call. + public let platformVersion: String? + + public init(operatingSystem: String? = nil, platformName: String? = nil, platformVersion: String? = nil) { + self.operatingSystem = operatingSystem + self.platformName = platformName + self.platformVersion = platformVersion + } + + private enum CodingKeys: String, CodingKey { + case operatingSystem = "OperatingSystem" + case platformName = "PlatformName" + case platformVersion = "PlatformVersion" + } + } + public struct Dimensions: AWSDecodableShape { /// The channel used for grouping and filters. public let channel: Channel? @@ -7542,6 +7844,19 @@ extension Connect { } } + public struct DisconnectDetails: AWSDecodableShape { + /// Indicates the potential disconnection issues for a call. This field is not populated if the service does not detect potential issues. + public let potentialDisconnectIssue: String? + + public init(potentialDisconnectIssue: String? = nil) { + self.potentialDisconnectIssue = potentialDisconnectIssue + } + + private enum CodingKeys: String, CodingKey { + case potentialDisconnectIssue = "PotentialDisconnectIssue" + } + } + public struct DisconnectReason: AWSEncodableShape { /// A code that indicates how the contact was terminated. public let code: String? @@ -8389,6 +8704,44 @@ extension Connect { } } + public struct Expiry: AWSDecodableShape { + /// The number of seconds to wait before expiring the routing step. + public let durationInSeconds: Int? + /// The timestamp indicating when the routing step expires. + public let expiryTimestamp: Date? + + public init(durationInSeconds: Int? = nil, expiryTimestamp: Date? = nil) { + self.durationInSeconds = durationInSeconds + self.expiryTimestamp = expiryTimestamp + } + + private enum CodingKeys: String, CodingKey { + case durationInSeconds = "DurationInSeconds" + case expiryTimestamp = "ExpiryTimestamp" + } + } + + public struct Expression: AWSDecodableShape { + /// List of routing expressions which will be AND-ed together. + public let andExpression: [Expression]? + /// An object to specify the predefined attribute condition. + public let attributeCondition: AttributeCondition? + /// List of routing expressions which will be OR-ed together. + public let orExpression: [Expression]? + + public init(andExpression: [Expression]? = nil, attributeCondition: AttributeCondition? = nil, orExpression: [Expression]? = nil) { + self.andExpression = andExpression + self.attributeCondition = attributeCondition + self.orExpression = orExpression + } + + private enum CodingKeys: String, CodingKey { + case andExpression = "AndExpression" + case attributeCondition = "AttributeCondition" + case orExpression = "OrExpression" + } + } + public struct FailedRequest: AWSDecodableShape { /// Reason code for the failure. public let failureReasonCode: FailureReasonCode? @@ -8556,7 +8909,7 @@ extension Connect { public let fileId: String /// The unique identifier of the Connect instance. public let instanceId: String - /// Optional override for the expiry of the pre-signed S3 URL in seconds. + /// Optional override for the expiry of the pre-signed S3 URL in seconds. The default value is 300. public let urlExpiryInSeconds: Int? public init(associatedResourceArn: String, fileId: String, instanceId: String, urlExpiryInSeconds: Int? = nil) { @@ -8588,7 +8941,7 @@ extension Connect { } public struct GetAttachedFileResponse: AWSDecodableShape { - /// The resource to which the attached file is (being) uploaded to. Cases are the only current supported resource. This value must be a valid ARN. + /// The resource to which the attached file is (being) uploaded to. Cases are the only current supported resource. public let associatedResourceArn: String? /// Represents the identity that created the file. public let createdBy: CreatedByInfo? @@ -9009,15 +9362,15 @@ extension Connect { public struct GetMetricDataV2Request: AWSEncodableShape { /// The timestamp, in UNIX Epoch time format, at which to end the reporting interval for the retrieval of historical metrics data. The time must be later than the start time timestamp. It cannot be later than the current timestamp. public let endTime: Date - /// The filters to apply to returned metrics. You can filter on the following resources: Queues Routing profiles Agents Channels User hierarchy groups Feature Routing step expression At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups. To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator's Guide. Note the following limits: Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: QUEUE | ROUTING_PROFILE | AGENT | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | FEATURE | CASE_TEMPLATE_ARN | CASE_STATUS | contact/segmentAttributes/connect:Subtype | ROUTING_STEP_EXPRESSION Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters. contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It is available only to contacts analyzed by Contact Lens conversational analytics. connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key. ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is. + /// The filters to apply to returned metrics. You can filter on the following resources: Agents Channels Feature Queues Routing profiles Routing step expression User hierarchy groups At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups. To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator Guide. Note the following limits: Filter keys: A maximum of 5 filter keys are supported in a single request. Valid filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | FEATURE | FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID | FLOWS_NEXT_RESOURCE_QUEUE_ID | FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | INITIATION_METHOD | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED | Filter values: A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 filter values, along with 3 channel filters. contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It is available only to contacts analyzed by Contact Lens conversational analytics. connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key. ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This filter is case and order sensitive. JSON string fields must be sorted in ascending order and JSON array order should be kept as is. Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the Q_CONNECT_ENABLED filter key. TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow. FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow This filter is available only for contact record-driven metrics. public let filters: [FilterV2] - /// The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues. If no grouping is specified, a summary of all metrics is returned. Valid grouping keys: QUEUE | ROUTING_PROFILE | AGENT | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN | CASE_STATUS | contact/segmentAttributes/connect:Subtype | ROUTING_STEP_EXPRESSION + /// The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues. If no grouping is specified, a summary of all metrics is returned. Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | INITIATION_METHOD | Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION public let groupings: [String]? /// The interval period and timezone to apply to returned metrics. IntervalPeriod: An aggregated grouping applied to request metrics. Valid IntervalPeriod values are: FIFTEEN_MIN | THIRTY_MIN | HOUR | DAY | WEEK | TOTAL. For example, if IntervalPeriod is selected THIRTY_MIN, StartTime and EndTime differs by 1 day, then Amazon Connect returns 48 results in the response. Each result is aggregated by the THIRTY_MIN period. By default Amazon Connect aggregates results based on the TOTAL interval period. The following list describes restrictions on StartTime and EndTime based on which IntervalPeriod is requested. FIFTEEN_MIN: The difference between StartTime and EndTime must be less than 3 days. THIRTY_MIN: The difference between StartTime and EndTime must be less than 3 days. HOUR: The difference between StartTime and EndTime must be less than 3 days. DAY: The difference between StartTime and EndTime must be less than 35 days. WEEK: The difference between StartTime and EndTime must be less than 35 days. TOTAL: The difference between StartTime and EndTime must be less than 35 days. TimeZone: The timezone applied to requested metrics. public let interval: IntervalDetails? /// The maximum number of results to return per page. public let maxResults: Int? - /// The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator's Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype UI name: Average conversation duration AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Average customer talk time CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_ABANDONED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Contact abandoned CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Maximum queued time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: Not available PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: Not available PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: Not available SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts answered in X seconds SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype UI name: Callback attempts + /// The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts abandoned in X seconds CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts answered in X seconds CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: Not available PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: Not available PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: Not available SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts public let metrics: [MetricV2] /// The token for the next set of results. Use the value returned in the previous /// response in the next request to retrieve the next set of results. @@ -9387,6 +9740,35 @@ extension Connect { } } + public struct HierarchyGroups: AWSDecodableShape { + /// The group at level one of the agent hierarchy. + public let level1: AgentHierarchyGroup? + /// The group at level two of the agent hierarchy. + public let level2: AgentHierarchyGroup? + /// The group at level three of the agent hierarchy. + public let level3: AgentHierarchyGroup? + /// The group at level four of the agent hierarchy. + public let level4: AgentHierarchyGroup? + /// The group at level five of the agent hierarchy. + public let level5: AgentHierarchyGroup? + + public init(level1: AgentHierarchyGroup? = nil, level2: AgentHierarchyGroup? = nil, level3: AgentHierarchyGroup? = nil, level4: AgentHierarchyGroup? = nil, level5: AgentHierarchyGroup? = nil) { + self.level1 = level1 + self.level2 = level2 + self.level3 = level3 + self.level4 = level4 + self.level5 = level5 + } + + private enum CodingKeys: String, CodingKey { + case level1 = "Level1" + case level2 = "Level2" + case level3 = "Level3" + case level4 = "Level4" + case level5 = "Level5" + } + } + public struct HierarchyLevel: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the hierarchy level. public let arn: String? @@ -12730,7 +13112,7 @@ extension Connect { } public struct MetricFilterV2: AWSEncodableShape & AWSDecodableShape { - /// The key to use for filtering data. Valid metric filter keys: INITIATION_METHOD, DISCONNECT_REASON. These are the same values as the InitiationMethod and DisconnectReason in the contact record. For more information, see ContactTraceRecord in the Amazon Connect Administrator's Guide. + /// The key to use for filtering data. Valid metric filter keys: INITIATION_METHOD, DISCONNECT_REASON. These are the same values as the InitiationMethod and DisconnectReason in the contact record. For more information, see ContactTraceRecord in the Amazon Connect Administrator Guide. public let metricFilterKey: String? /// The values to use for filtering data. Valid metric filter values for INITIATION_METHOD: INBOUND | OUTBOUND | TRANSFER | QUEUE_TRANSFER | CALLBACK | API Valid metric filter values for DISCONNECT_REASON: CUSTOMER_DISCONNECT | AGENT_DISCONNECT | THIRD_PARTY_DISCONNECT | TELECOM_PROBLEM | BARGED | CONTACT_FLOW_DISCONNECT | OTHER | EXPIRED | API public let metricFilterValues: [String]? @@ -12997,7 +13379,7 @@ extension Connect { } } - public struct ParticipantCapabilities: AWSEncodableShape { + public struct ParticipantCapabilities: AWSEncodableShape & AWSDecodableShape { /// The configuration having the video sharing capabilities for participants over the call. public let video: VideoCapability? @@ -13411,6 +13793,23 @@ extension Connect { public init() {} } + public struct QualityMetrics: AWSDecodableShape { + /// Information about the quality of Agent media connection. + public let agent: AgentQualityMetrics? + /// Information about the quality of Customer media connection. + public let customer: CustomerQualityMetrics? + + public init(agent: AgentQualityMetrics? = nil, customer: CustomerQualityMetrics? = nil) { + self.agent = agent + self.customer = customer + } + + private enum CodingKeys: String, CodingKey { + case agent = "Agent" + case customer = "Customer" + } + } + public struct Queue: AWSDecodableShape { /// The description of the queue. public let description: String? @@ -14222,9 +14621,32 @@ extension Connect { public init() {} } + public struct RoutingCriteria: AWSDecodableShape { + /// The timestamp indicating when the routing criteria is set to active. A routing criteria is activated when contact is transferred to a queue. ActivationTimestamp will be set on routing criteria for contacts in agent queue even though Routing criteria is never activated for contacts in agent queue. + public let activationTimestamp: Date? + /// Information about the index of the routing criteria. + public let index: Int? + /// List of routing steps. When Amazon Connect does not find an available agent meeting the requirements in a step for a given step duration, the routing criteria will move on to the next step sequentially until a join is completed with an agent. When all steps are exhausted, the contact will be offered to any agent in the queue. + public let steps: [Step]? + + public init(activationTimestamp: Date? = nil, index: Int? = nil, steps: [Step]? = nil) { + self.activationTimestamp = activationTimestamp + self.index = index + self.steps = steps + } + + private enum CodingKeys: String, CodingKey { + case activationTimestamp = "ActivationTimestamp" + case index = "Index" + case steps = "Steps" + } + } + public struct RoutingProfile: AWSDecodableShape { /// Whether agents with this routing profile will have their routing order calculated based on time since their last inbound contact or longest idle time. public let agentAvailabilityTimer: AgentAvailabilityTimer? + /// The IDs of the associated queue. + public let associatedQueueIds: [String]? /// The identifier of the default outbound queue for this routing profile. public let defaultOutboundQueueId: String? /// The description of the routing profile. @@ -14252,8 +14674,9 @@ extension Connect { /// The tags used to organize, track, or control access for this resource. For example, { "Tags": {"key1":"value1", "key2":"value2"} }. public let tags: [String: String]? - public init(agentAvailabilityTimer: AgentAvailabilityTimer? = nil, defaultOutboundQueueId: String? = nil, description: String? = nil, instanceId: String? = nil, isDefault: Bool? = nil, lastModifiedRegion: String? = nil, lastModifiedTime: Date? = nil, mediaConcurrencies: [MediaConcurrency]? = nil, name: String? = nil, numberOfAssociatedQueues: Int64? = nil, numberOfAssociatedUsers: Int64? = nil, routingProfileArn: String? = nil, routingProfileId: String? = nil, tags: [String: String]? = nil) { + public init(agentAvailabilityTimer: AgentAvailabilityTimer? = nil, associatedQueueIds: [String]? = nil, defaultOutboundQueueId: String? = nil, description: String? = nil, instanceId: String? = nil, isDefault: Bool? = nil, lastModifiedRegion: String? = nil, lastModifiedTime: Date? = nil, mediaConcurrencies: [MediaConcurrency]? = nil, name: String? = nil, numberOfAssociatedQueues: Int64? = nil, numberOfAssociatedUsers: Int64? = nil, routingProfileArn: String? = nil, routingProfileId: String? = nil, tags: [String: String]? = nil) { self.agentAvailabilityTimer = agentAvailabilityTimer + self.associatedQueueIds = associatedQueueIds self.defaultOutboundQueueId = defaultOutboundQueueId self.description = description self.instanceId = instanceId @@ -14271,6 +14694,7 @@ extension Connect { private enum CodingKeys: String, CodingKey { case agentAvailabilityTimer = "AgentAvailabilityTimer" + case associatedQueueIds = "AssociatedQueueIds" case defaultOutboundQueueId = "DefaultOutboundQueueId" case description = "Description" case instanceId = "InstanceId" @@ -14387,7 +14811,7 @@ extension Connect { public let andConditions: [RoutingProfileSearchCriteria]? /// A list of conditions which would be applied together with an OR condition. public let orConditions: [RoutingProfileSearchCriteria]? - /// A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are name, description, and resourceID. + /// A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are associatedQueueIds, name, description, and resourceID. public let stringCondition: StringCondition? public init(andConditions: [RoutingProfileSearchCriteria]? = nil, orConditions: [RoutingProfileSearchCriteria]? = nil, stringCondition: StringCondition? = nil) { @@ -14708,6 +15132,124 @@ extension Connect { } } + public struct SearchContactFlowModulesRequest: AWSEncodableShape { + /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + public let instanceId: String + /// The maximum number of results to return per page. + public let maxResults: Int? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + /// The search criteria to be used to return contact flow modules. The name and description fields support "contains" queries with a minimum of 2 characters and a maximum of 25 characters. Any queries with character lengths outside of this range will result in invalid results. + public let searchCriteria: ContactFlowModuleSearchCriteria? + /// Filters to be applied to search results. + public let searchFilter: ContactFlowModuleSearchFilter? + + public init(instanceId: String, maxResults: Int? = nil, nextToken: String? = nil, searchCriteria: ContactFlowModuleSearchCriteria? = nil, searchFilter: ContactFlowModuleSearchFilter? = nil) { + self.instanceId = instanceId + self.maxResults = maxResults + self.nextToken = nextToken + self.searchCriteria = searchCriteria + self.searchFilter = searchFilter + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2500) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case instanceId = "InstanceId" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case searchCriteria = "SearchCriteria" + case searchFilter = "SearchFilter" + } + } + + public struct SearchContactFlowModulesResponse: AWSDecodableShape { + /// The total number of contact flows which matched your search query. + public let approximateTotalCount: Int64? + /// The search criteria to be used to return contact flow modules. + public let contactFlowModules: [ContactFlowModule]? + /// If there are additional results, this is the token for the next set of results. + public let nextToken: String? + + public init(approximateTotalCount: Int64? = nil, contactFlowModules: [ContactFlowModule]? = nil, nextToken: String? = nil) { + self.approximateTotalCount = approximateTotalCount + self.contactFlowModules = contactFlowModules + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case approximateTotalCount = "ApproximateTotalCount" + case contactFlowModules = "ContactFlowModules" + case nextToken = "NextToken" + } + } + + public struct SearchContactFlowsRequest: AWSEncodableShape { + /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + public let instanceId: String + /// The maximum number of results to return per page. + public let maxResults: Int? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + /// The search criteria to be used to return flows. The name and description fields support "contains" queries with a minimum of 2 characters and a maximum of 25 characters. Any queries with character lengths outside of this range will result in invalid results. + public let searchCriteria: ContactFlowSearchCriteria? + /// Filters to be applied to search results. + public let searchFilter: ContactFlowSearchFilter? + + public init(instanceId: String, maxResults: Int? = nil, nextToken: String? = nil, searchCriteria: ContactFlowSearchCriteria? = nil, searchFilter: ContactFlowSearchFilter? = nil) { + self.instanceId = instanceId + self.maxResults = maxResults + self.nextToken = nextToken + self.searchCriteria = searchCriteria + self.searchFilter = searchFilter + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2500) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case instanceId = "InstanceId" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case searchCriteria = "SearchCriteria" + case searchFilter = "SearchFilter" + } + } + + public struct SearchContactFlowsResponse: AWSDecodableShape { + /// The total number of contact flows which matched your search query. + public let approximateTotalCount: Int64? + /// Information about the contact flows. + public let contactFlows: [ContactFlow]? + /// If there are additional results, this is the token for the next set of results. + public let nextToken: String? + + public init(approximateTotalCount: Int64? = nil, contactFlows: [ContactFlow]? = nil, nextToken: String? = nil) { + self.approximateTotalCount = approximateTotalCount + self.contactFlows = contactFlows + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case approximateTotalCount = "ApproximateTotalCount" + case contactFlows = "ContactFlows" + case nextToken = "NextToken" + } + } + public struct SearchContactsRequest: AWSEncodableShape { /// The identifier of Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. public let instanceId: String @@ -15669,7 +16211,7 @@ extension Connect { } } - public struct SegmentAttributeValue: AWSEncodableShape { + public struct SegmentAttributeValue: AWSEncodableShape & AWSDecodableShape { /// The value of a segment attribute. public let valueString: String? @@ -15888,7 +16430,7 @@ extension Connect { public let instanceId: String /// The tags used to organize, track, or control access for this resource. For example, { "Tags": {"key1":"value1", "key2":"value2"} }. public let tags: [String: String]? - /// Optional override for the expiry of the pre-signed S3 URL in seconds. + /// Optional override for the expiry of the pre-signed S3 URL in seconds. The default value is 300. public let urlExpiryInSeconds: Int? public init(associatedResourceArn: String, clientToken: String? = StartAttachedFileUploadRequest.idempotencyToken(), createdBy: CreatedByInfo? = nil, fileName: String, fileSizeInBytes: Int64, fileUseCaseType: FileUseCaseType, instanceId: String, tags: [String: String]? = nil, urlExpiryInSeconds: Int? = nil) { @@ -16520,6 +17062,27 @@ extension Connect { } } + public struct Step: AWSDecodableShape { + /// An object to specify the expiration of a routing step. + public let expiry: Expiry? + /// A tagged union to specify expression for a routing step. + public let expression: Expression? + /// Represents status of the Routing step. + public let status: RoutingCriteriaStepStatus? + + public init(expiry: Expiry? = nil, expression: Expression? = nil, status: RoutingCriteriaStepStatus? = nil) { + self.expiry = expiry + self.expression = expression + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case expiry = "Expiry" + case expression = "Expression" + case status = "Status" + } + } + public struct StopContactRecordingRequest: AWSEncodableShape { /// The identifier of the contact. public let contactId: String diff --git a/Sources/Soto/Services/ControlTower/ControlTower_api.swift b/Sources/Soto/Services/ControlTower/ControlTower_api.swift index fd852bd997..d9e8ae28a8 100644 --- a/Sources/Soto/Services/ControlTower/ControlTower_api.swift +++ b/Sources/Soto/Services/ControlTower/ControlTower_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS ControlTower service. /// -/// These interfaces allow you to apply the Amazon Web Services library of pre-defined controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms "control" and "guardrail" are synonyms. To call these APIs, you'll need to know: the controlIdentifier for the control--or guardrail--you are targeting. the ARN associated with the target organizational unit (OU), which we call the targetIdentifier. the ARN associated with a resource that you wish to tag or untag. To get the controlIdentifier for your Amazon Web Services Control Tower control: The controlIdentifier is an ARN that is specified for each control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation. The controlIdentifier is unique in each Amazon Web Services Region for each control. You can find the controlIdentifier for each Region and control in the Tables of control metadata in the Amazon Web Services Control Tower User Guide. A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and Elective controls is given in Resource identifiers for APIs and controls in the Controls reference guide section of the Amazon Web Services Control Tower User Guide. Remember that Mandatory controls cannot be added or removed. ARN format: arn:aws:controltower:{REGION}::control/{CONTROL_NAME} Example: arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED To get the targetIdentifier: The targetIdentifier is the ARN for an OU. In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU. OU ARN format: arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId} Details and examples Control API input and output examples with CLI Enable controls with CloudFormation Control metadata tables List of identifiers for legacy controls Controls reference guide Controls library groupings Creating Amazon Web Services Control Tower resources with Amazon Web Services CloudFormation To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower Recording API Requests Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made the request and when, and so on. For more about Amazon Web Services Control Tower and its support for CloudTrail, see Logging Amazon Web Services Control Tower Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User Guide. +/// Amazon Web Services Control Tower offers application programming interface (API) operations that support programmatic interaction with these types of resources: Controls DisableControl EnableControl GetEnabledControl ListControlOperations ListEnabledControls UpdateEnabledControl Landing zones CreateLandingZone DeleteLandingZone GetLandingZone GetLandingZoneOperation ListLandingZones ResetLandingZone UpdateLandingZone Baselines DisableBaseline EnableBaseline GetBaseline GetBaselineOperation GetEnabledBaseline ListBaselines ListEnabledBaselines ResetEnabledBaseline UpdateEnabledBaseline Tagging ListTagsForResource TagResource UntagResource For more information about these types of resources, see the Amazon Web Services Control Tower User Guide . About control APIs These interfaces allow you to apply the Amazon Web Services library of pre-defined controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms "control" and "guardrail" are synonyms. To call these APIs, you'll need to know: the controlIdentifier for the control--or guardrail--you are targeting. the ARN associated with the target organizational unit (OU), which we call the targetIdentifier. the ARN associated with a resource that you wish to tag or untag. To get the controlIdentifier for your Amazon Web Services Control Tower control: The controlIdentifier is an ARN that is specified for each control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation. The controlIdentifier is unique in each Amazon Web Services Region for each control. You can find the controlIdentifier for each Region and control in the Tables of control metadata or the Control availability by Region tables in the Amazon Web Services Control Tower Controls Reference Guide. A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and Elective controls is given in Resource identifiers for APIs and controls in the Amazon Web Services Control Tower Controls Reference Guide . Remember that Mandatory controls cannot be added or removed. ARN format: arn:aws:controltower:{REGION}::control/{CONTROL_NAME} Example: arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED To get the targetIdentifier: The targetIdentifier is the ARN for an OU. In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU. OU ARN format: arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId} About landing zone APIs You can configure and launch an Amazon Web Services Control Tower landing zone with APIs. For an introduction and steps, see Getting started with Amazon Web Services Control Tower using APIs. For an overview of landing zone API operations, see Amazon Web Services Control Tower supports landing zone APIs. The individual API operations for landing zones are detailed in this document, the API reference manual, in the "Actions" section. About baseline APIs You can apply the AWSControlTowerBaseline baseline to an organizational unit (OU) as a way to register the OU with Amazon Web Services Control Tower, programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration with baselines. You can call the baseline API operations to view the baselines that Amazon Web Services Control Tower enables for your landing zone, on your behalf, when setting up the landing zone. These baselines are read-only baselines. The individual API operations for baselines are detailed in this document, the API reference manual, in the "Actions" section. For usage examples, see Baseline API input and output examples with CLI. Details and examples Control API input and output examples with CLI Baseline API input and output examples with CLI Enable controls with CloudFormation Launch a landing zone with CloudFormation Control metadata tables (large page) Control availability by Region tables (large page) List of identifiers for legacy controls Controls reference guide Controls library groupings Creating Amazon Web Services Control Tower resources with Amazon Web Services CloudFormation To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower Recording API Requests Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made the request and when, and so on. For more about Amazon Web Services Control Tower and its support for CloudTrail, see Logging Amazon Web Services Control Tower Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User Guide. public struct ControlTower: AWSService { // MARK: Member variables @@ -75,6 +75,7 @@ public struct ControlTower: AWSService { static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.fips]: .init(endpoints: [ "ca-central-1": "controltower-fips.ca-central-1.amazonaws.com", + "ca-west-1": "controltower-fips.ca-west-1.amazonaws.com", "us-east-1": "controltower-fips.us-east-1.amazonaws.com", "us-east-2": "controltower-fips.us-east-2.amazonaws.com", "us-gov-east-1": "controltower-fips.us-gov-east-1.amazonaws.com", @@ -112,7 +113,7 @@ public struct ControlTower: AWSService { ) } - /// Disable an EnabledBaseline resource on the specified Target. This API starts an asynchronous operation to remove all resources deployed as part of the baseline enablement. The resource will vary depending on the enabled baseline. + /// Disable an EnabledBaseline resource on the specified Target. This API starts an asynchronous operation to remove all resources deployed as part of the baseline enablement. The resource will vary depending on the enabled baseline. For usage examples, see the Amazon Web Services Control Tower User Guide . @Sendable public func disableBaseline(_ input: DisableBaselineInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DisableBaselineOutput { return try await self.client.execute( @@ -125,7 +126,7 @@ public struct ControlTower: AWSService { ) } - /// This API call turns off a control. It starts an asynchronous operation that deletes AWS resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. For usage examples, see the Amazon Web Services Control Tower User Guide . + /// This API call turns off a control. It starts an asynchronous operation that deletes Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. For usage examples, see the Controls Reference Guide . @Sendable public func disableControl(_ input: DisableControlInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DisableControlOutput { return try await self.client.execute( @@ -138,7 +139,7 @@ public struct ControlTower: AWSService { ) } - /// Enable (apply) a Baseline to a Target. This API starts an asynchronous operation to deploy resources specified by the Baseline to the specified Target. + /// Enable (apply) a Baseline to a Target. This API starts an asynchronous operation to deploy resources specified by the Baseline to the specified Target. For usage examples, see the Amazon Web Services Control Tower User Guide . @Sendable public func enableBaseline(_ input: EnableBaselineInput, logger: Logger = AWSClient.loggingDisabled) async throws -> EnableBaselineOutput { return try await self.client.execute( @@ -151,7 +152,7 @@ public struct ControlTower: AWSService { ) } - /// This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage examples, see the Amazon Web Services Control Tower User Guide . + /// This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage examples, see the Controls Reference Guide . @Sendable public func enableControl(_ input: EnableControlInput, logger: Logger = AWSClient.loggingDisabled) async throws -> EnableControlOutput { return try await self.client.execute( @@ -164,7 +165,7 @@ public struct ControlTower: AWSService { ) } - /// Retrieve details about an existing Baseline resource by specifying its identifier. + /// Retrieve details about an existing Baseline resource by specifying its identifier. For usage examples, see the Amazon Web Services Control Tower User Guide . @Sendable public func getBaseline(_ input: GetBaselineInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetBaselineOutput { return try await self.client.execute( @@ -177,7 +178,7 @@ public struct ControlTower: AWSService { ) } - /// Returns the details of an asynchronous baseline operation, as initiated by any of these APIs: EnableBaseline, DisableBaseline, UpdateEnabledBaseline, ResetEnabledBaseline. A status message is displayed in case of operation failure. + /// Returns the details of an asynchronous baseline operation, as initiated by any of these APIs: EnableBaseline, DisableBaseline, UpdateEnabledBaseline, ResetEnabledBaseline. A status message is displayed in case of operation failure. For usage examples, see the Amazon Web Services Control Tower User Guide . @Sendable public func getBaselineOperation(_ input: GetBaselineOperationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetBaselineOperationOutput { return try await self.client.execute( @@ -190,7 +191,7 @@ public struct ControlTower: AWSService { ) } - /// Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage examples, see the Amazon Web Services Control Tower User Guide . + /// Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage examples, see the Controls Reference Guide . @Sendable public func getControlOperation(_ input: GetControlOperationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetControlOperationOutput { return try await self.client.execute( @@ -216,7 +217,7 @@ public struct ControlTower: AWSService { ) } - /// Retrieves details about an enabled control. For usage examples, see the Amazon Web Services Control Tower User Guide . + /// Retrieves details about an enabled control. For usage examples, see the Controls Reference Guide . @Sendable public func getEnabledControl(_ input: GetEnabledControlInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetEnabledControlOutput { return try await self.client.execute( @@ -242,7 +243,7 @@ public struct ControlTower: AWSService { ) } - /// Returns the status of the specified landing zone operation. Details for an operation are available for 60 days. + /// Returns the status of the specified landing zone operation. Details for an operation are available for 90 days. @Sendable public func getLandingZoneOperation(_ input: GetLandingZoneOperationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetLandingZoneOperationOutput { return try await self.client.execute( @@ -255,7 +256,7 @@ public struct ControlTower: AWSService { ) } - /// Returns a summary list of all available baselines. + /// Returns a summary list of all available baselines. For usage examples, see the Amazon Web Services Control Tower User Guide . @Sendable public func listBaselines(_ input: ListBaselinesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListBaselinesOutput { return try await self.client.execute( @@ -268,7 +269,20 @@ public struct ControlTower: AWSService { ) } - /// Returns a list of summaries describing EnabledBaseline resources. You can filter the list by the corresponding Baseline or Target of the EnabledBaseline resources. + /// Provides a list of operations in progress or queued. For usage examples, see ListControlOperation examples. + @Sendable + public func listControlOperations(_ input: ListControlOperationsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListControlOperationsOutput { + return try await self.client.execute( + operation: "ListControlOperations", + path: "/list-control-operations", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of summaries describing EnabledBaseline resources. You can filter the list by the corresponding Baseline or Target of the EnabledBaseline resources. For usage examples, see the Amazon Web Services Control Tower User Guide . @Sendable public func listEnabledBaselines(_ input: ListEnabledBaselinesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListEnabledBaselinesOutput { return try await self.client.execute( @@ -281,7 +295,7 @@ public struct ControlTower: AWSService { ) } - /// Lists the controls enabled by Amazon Web Services Control Tower on the specified organizational unit and the accounts it contains. For usage examples, see the Amazon Web Services Control Tower User Guide . + /// Lists the controls enabled by Amazon Web Services Control Tower on the specified organizational unit and the accounts it contains. For usage examples, see the Controls Reference Guide . @Sendable public func listEnabledControls(_ input: ListEnabledControlsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListEnabledControlsOutput { return try await self.client.execute( @@ -294,6 +308,19 @@ public struct ControlTower: AWSService { ) } + /// Lists all landing zone operations from the past 90 days. Results are sorted by time, with the most recent operation first. + @Sendable + public func listLandingZoneOperations(_ input: ListLandingZoneOperationsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListLandingZoneOperationsOutput { + return try await self.client.execute( + operation: "ListLandingZoneOperations", + path: "/list-landingzone-operations", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns the landing zone ARN for the landing zone deployed in your managed account. This API also creates an ARN for existing accounts that do not yet have a landing zone ARN. Returns one landing zone ARN. @Sendable public func listLandingZones(_ input: ListLandingZonesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListLandingZonesOutput { @@ -307,7 +334,7 @@ public struct ControlTower: AWSService { ) } - /// Returns a list of tags associated with the resource. For usage examples, see the Amazon Web Services Control Tower User Guide . + /// Returns a list of tags associated with the resource. For usage examples, see the Controls Reference Guide . @Sendable public func listTagsForResource(_ input: ListTagsForResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceOutput { return try await self.client.execute( @@ -320,7 +347,7 @@ public struct ControlTower: AWSService { ) } - /// Re-enables an EnabledBaseline resource. For example, this API can re-apply the existing Baseline after a new member account is moved to the target OU. + /// Re-enables an EnabledBaseline resource. For example, this API can re-apply the existing Baseline after a new member account is moved to the target OU. For usage examples, see the Amazon Web Services Control Tower User Guide . @Sendable public func resetEnabledBaseline(_ input: ResetEnabledBaselineInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ResetEnabledBaselineOutput { return try await self.client.execute( @@ -333,7 +360,7 @@ public struct ControlTower: AWSService { ) } - /// This API call resets a landing zone. It starts an asynchronous operation that resets the landing zone to the parameters specified in its original configuration. + /// This API call resets a landing zone. It starts an asynchronous operation that resets the landing zone to the parameters specified in the original configuration, which you specified in the manifest file. Nothing in the manifest file's original landing zone configuration is changed during the reset process, by default. This API is not the same as a rollback of a landing zone version, which is not a supported operation. @Sendable public func resetLandingZone(_ input: ResetLandingZoneInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ResetLandingZoneOutput { return try await self.client.execute( @@ -346,7 +373,7 @@ public struct ControlTower: AWSService { ) } - /// Applies tags to a resource. For usage examples, see the Amazon Web Services Control Tower User Guide . + /// Applies tags to a resource. For usage examples, see the Controls Reference Guide . @Sendable public func tagResource(_ input: TagResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceOutput { return try await self.client.execute( @@ -359,7 +386,7 @@ public struct ControlTower: AWSService { ) } - /// Removes tags from a resource. For usage examples, see the Amazon Web Services Control Tower User Guide . + /// Removes tags from a resource. For usage examples, see the Controls Reference Guide . @Sendable public func untagResource(_ input: UntagResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceOutput { return try await self.client.execute( @@ -372,7 +399,7 @@ public struct ControlTower: AWSService { ) } - /// Updates an EnabledBaseline resource's applied parameters or version. + /// Updates an EnabledBaseline resource's applied parameters or version. For usage examples, see the Amazon Web Services Control Tower User Guide . @Sendable public func updateEnabledBaseline(_ input: UpdateEnabledBaselineInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEnabledBaselineOutput { return try await self.client.execute( @@ -385,7 +412,7 @@ public struct ControlTower: AWSService { ) } - /// Updates the configuration of an already enabled control. If the enabled control shows an EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request. If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services Control Tower will update the control to match any valid parameters that you supply. If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling DisableControl and again calling EnableControl, or you can run an extending governance operation. For usage examples, see the Amazon Web Services Control Tower User Guide + /// Updates the configuration of an already enabled control. If the enabled control shows an EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request. If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services Control Tower updates the control to match any valid parameters that you supply. If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling DisableControl and again calling EnableControl, or you can run an extending governance operation. For usage examples, see the Controls Reference Guide . @Sendable public func updateEnabledControl(_ input: UpdateEnabledControlInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEnabledControlOutput { return try await self.client.execute( @@ -425,7 +452,7 @@ extension ControlTower { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension ControlTower { - /// Returns a summary list of all available baselines. + /// Returns a summary list of all available baselines. For usage examples, see the Amazon Web Services Control Tower User Guide . /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -444,7 +471,26 @@ extension ControlTower { ) } - /// Returns a list of summaries describing EnabledBaseline resources. You can filter the list by the corresponding Baseline or Target of the EnabledBaseline resources. + /// Provides a list of operations in progress or queued. For usage examples, see ListControlOperation examples. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listControlOperationsPaginator( + _ input: ListControlOperationsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listControlOperations, + inputKey: \ListControlOperationsInput.nextToken, + outputKey: \ListControlOperationsOutput.nextToken, + logger: logger + ) + } + + /// Returns a list of summaries describing EnabledBaseline resources. You can filter the list by the corresponding Baseline or Target of the EnabledBaseline resources. For usage examples, see the Amazon Web Services Control Tower User Guide . /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -463,7 +509,7 @@ extension ControlTower { ) } - /// Lists the controls enabled by Amazon Web Services Control Tower on the specified organizational unit and the accounts it contains. For usage examples, see the Amazon Web Services Control Tower User Guide . + /// Lists the controls enabled by Amazon Web Services Control Tower on the specified organizational unit and the accounts it contains. For usage examples, see the Controls Reference Guide . /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -482,6 +528,25 @@ extension ControlTower { ) } + /// Lists all landing zone operations from the past 90 days. Results are sorted by time, with the most recent operation first. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listLandingZoneOperationsPaginator( + _ input: ListLandingZoneOperationsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listLandingZoneOperations, + inputKey: \ListLandingZoneOperationsInput.nextToken, + outputKey: \ListLandingZoneOperationsOutput.nextToken, + logger: logger + ) + } + /// Returns the landing zone ARN for the landing zone deployed in your managed account. This API also creates an ARN for existing accounts that do not yet have a landing zone ARN. Returns one landing zone ARN. /// Return PaginatorSequence for operation. /// @@ -511,6 +576,16 @@ extension ControlTower.ListBaselinesInput: AWSPaginateToken { } } +extension ControlTower.ListControlOperationsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> ControlTower.ListControlOperationsInput { + return .init( + filter: self.filter, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension ControlTower.ListEnabledBaselinesInput: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> ControlTower.ListEnabledBaselinesInput { return .init( @@ -524,6 +599,7 @@ extension ControlTower.ListEnabledBaselinesInput: AWSPaginateToken { extension ControlTower.ListEnabledControlsInput: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> ControlTower.ListEnabledControlsInput { return .init( + filter: self.filter, maxResults: self.maxResults, nextToken: token, targetIdentifier: self.targetIdentifier @@ -531,6 +607,16 @@ extension ControlTower.ListEnabledControlsInput: AWSPaginateToken { } } +extension ControlTower.ListLandingZoneOperationsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> ControlTower.ListLandingZoneOperationsInput { + return .init( + filter: self.filter, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension ControlTower.ListLandingZonesInput: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> ControlTower.ListLandingZonesInput { return .init( diff --git a/Sources/Soto/Services/ControlTower/ControlTower_shapes.swift b/Sources/Soto/Services/ControlTower/ControlTower_shapes.swift index 84f1b4fec0..43e1081223 100644 --- a/Sources/Soto/Services/ControlTower/ControlTower_shapes.swift +++ b/Sources/Soto/Services/ControlTower/ControlTower_shapes.swift @@ -157,9 +157,15 @@ extension ControlTower { } public struct ControlOperation: AWSDecodableShape { + /// The controlIdentifier of the control for the operation. + public let controlIdentifier: String? + /// The controlIdentifier of the enabled control. + public let enabledControlIdentifier: String? /// The time that the operation finished. @OptionalCustomCoding public var endTime: Date? + /// The identifier of the specified operation. + public let operationIdentifier: String? /// One of ENABLE_CONTROL or DISABLE_CONTROL. public let operationType: ControlOperationType? /// The time that the operation began. @@ -169,26 +175,140 @@ extension ControlTower { public let status: ControlOperationStatus? /// If the operation result is FAILED, this string contains a message explaining why the operation failed. public let statusMessage: String? + /// The target upon which the control operation is working. + public let targetIdentifier: String? + + public init(controlIdentifier: String? = nil, enabledControlIdentifier: String? = nil, endTime: Date? = nil, operationIdentifier: String? = nil, operationType: ControlOperationType? = nil, startTime: Date? = nil, status: ControlOperationStatus? = nil, statusMessage: String? = nil, targetIdentifier: String? = nil) { + self.controlIdentifier = controlIdentifier + self.enabledControlIdentifier = enabledControlIdentifier + self.endTime = endTime + self.operationIdentifier = operationIdentifier + self.operationType = operationType + self.startTime = startTime + self.status = status + self.statusMessage = statusMessage + self.targetIdentifier = targetIdentifier + } + + private enum CodingKeys: String, CodingKey { + case controlIdentifier = "controlIdentifier" + case enabledControlIdentifier = "enabledControlIdentifier" + case endTime = "endTime" + case operationIdentifier = "operationIdentifier" + case operationType = "operationType" + case startTime = "startTime" + case status = "status" + case statusMessage = "statusMessage" + case targetIdentifier = "targetIdentifier" + } + } + + public struct ControlOperationFilter: AWSEncodableShape { + /// The set of controlIdentifier returned by the filter. + public let controlIdentifiers: [String]? + /// The set of ControlOperation objects returned by the filter. + public let controlOperationTypes: [ControlOperationType]? + /// The set controlIdentifier of enabled controls selected by the filter. + public let enabledControlIdentifiers: [String]? + /// Lists the status of control operations. + public let statuses: [ControlOperationStatus]? + /// The set of targetIdentifier objects returned by the filter. + public let targetIdentifiers: [String]? + + public init(controlIdentifiers: [String]? = nil, controlOperationTypes: [ControlOperationType]? = nil, enabledControlIdentifiers: [String]? = nil, statuses: [ControlOperationStatus]? = nil, targetIdentifiers: [String]? = nil) { + self.controlIdentifiers = controlIdentifiers + self.controlOperationTypes = controlOperationTypes + self.enabledControlIdentifiers = enabledControlIdentifiers + self.statuses = statuses + self.targetIdentifiers = targetIdentifiers + } + + public func validate(name: String) throws { + try self.controlIdentifiers?.forEach { + try validate($0, name: "controlIdentifiers[]", parent: name, max: 2048) + try validate($0, name: "controlIdentifiers[]", parent: name, min: 20) + try validate($0, name: "controlIdentifiers[]", parent: name, pattern: "^arn:aws[0-9a-zA-Z_\\-:\\/]+$") + } + try self.validate(self.controlIdentifiers, name: "controlIdentifiers", parent: name, max: 1) + try self.validate(self.controlIdentifiers, name: "controlIdentifiers", parent: name, min: 1) + try self.validate(self.controlOperationTypes, name: "controlOperationTypes", parent: name, max: 1) + try self.validate(self.controlOperationTypes, name: "controlOperationTypes", parent: name, min: 1) + try self.enabledControlIdentifiers?.forEach { + try validate($0, name: "enabledControlIdentifiers[]", parent: name, max: 2048) + try validate($0, name: "enabledControlIdentifiers[]", parent: name, min: 20) + try validate($0, name: "enabledControlIdentifiers[]", parent: name, pattern: "^arn:aws[0-9a-zA-Z_\\-:\\/]+$") + } + try self.validate(self.enabledControlIdentifiers, name: "enabledControlIdentifiers", parent: name, max: 1) + try self.validate(self.enabledControlIdentifiers, name: "enabledControlIdentifiers", parent: name, min: 1) + try self.validate(self.statuses, name: "statuses", parent: name, max: 1) + try self.validate(self.statuses, name: "statuses", parent: name, min: 1) + try self.targetIdentifiers?.forEach { + try validate($0, name: "targetIdentifiers[]", parent: name, max: 2048) + try validate($0, name: "targetIdentifiers[]", parent: name, min: 20) + try validate($0, name: "targetIdentifiers[]", parent: name, pattern: "^arn:aws[0-9a-zA-Z_\\-:\\/]+$") + } + try self.validate(self.targetIdentifiers, name: "targetIdentifiers", parent: name, max: 1) + try self.validate(self.targetIdentifiers, name: "targetIdentifiers", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case controlIdentifiers = "controlIdentifiers" + case controlOperationTypes = "controlOperationTypes" + case enabledControlIdentifiers = "enabledControlIdentifiers" + case statuses = "statuses" + case targetIdentifiers = "targetIdentifiers" + } + } + + public struct ControlOperationSummary: AWSDecodableShape { + /// The controlIdentifier of a control. + public let controlIdentifier: String? + /// The controlIdentifier of an enabled control. + public let enabledControlIdentifier: String? + /// The time at which the control operation was completed. + @OptionalCustomCoding + public var endTime: Date? + /// The unique identifier of a control operation. + public let operationIdentifier: String? + /// The type of operation. + public let operationType: ControlOperationType? + /// The time at which a control operation began. + @OptionalCustomCoding + public var startTime: Date? + /// The status of the specified control operation. + public let status: ControlOperationStatus? + /// A speficic message displayed as part of the control status. + public let statusMessage: String? + /// The unique identifier of the target of a control operation. + public let targetIdentifier: String? - public init(endTime: Date? = nil, operationType: ControlOperationType? = nil, startTime: Date? = nil, status: ControlOperationStatus? = nil, statusMessage: String? = nil) { + public init(controlIdentifier: String? = nil, enabledControlIdentifier: String? = nil, endTime: Date? = nil, operationIdentifier: String? = nil, operationType: ControlOperationType? = nil, startTime: Date? = nil, status: ControlOperationStatus? = nil, statusMessage: String? = nil, targetIdentifier: String? = nil) { + self.controlIdentifier = controlIdentifier + self.enabledControlIdentifier = enabledControlIdentifier self.endTime = endTime + self.operationIdentifier = operationIdentifier self.operationType = operationType self.startTime = startTime self.status = status self.statusMessage = statusMessage + self.targetIdentifier = targetIdentifier } private enum CodingKeys: String, CodingKey { + case controlIdentifier = "controlIdentifier" + case enabledControlIdentifier = "enabledControlIdentifier" case endTime = "endTime" + case operationIdentifier = "operationIdentifier" case operationType = "operationType" case startTime = "startTime" case status = "status" case statusMessage = "statusMessage" + case targetIdentifier = "targetIdentifier" } } public struct CreateLandingZoneInput: AWSEncodableShape { - /// The manifest.yaml file is a text file that describes your Amazon Web Services resources. For examples, review The manifest file. + /// The manifest JSON file is a text file that describes your Amazon Web Services resources. For examples, review Launch your landing zone. public let manifest: String /// Tags to be applied to the landing zone. public let tags: [String: String]? @@ -633,6 +753,41 @@ extension ControlTower { } } + public struct EnabledControlFilter: AWSEncodableShape { + /// The set of controlIdentifier returned by the filter. + public let controlIdentifiers: [String]? + /// A list of DriftStatus items. + public let driftStatuses: [DriftStatus]? + /// A list of EnablementStatus items. + public let statuses: [EnablementStatus]? + + public init(controlIdentifiers: [String]? = nil, driftStatuses: [DriftStatus]? = nil, statuses: [EnablementStatus]? = nil) { + self.controlIdentifiers = controlIdentifiers + self.driftStatuses = driftStatuses + self.statuses = statuses + } + + public func validate(name: String) throws { + try self.controlIdentifiers?.forEach { + try validate($0, name: "controlIdentifiers[]", parent: name, max: 2048) + try validate($0, name: "controlIdentifiers[]", parent: name, min: 20) + try validate($0, name: "controlIdentifiers[]", parent: name, pattern: "^arn:aws[0-9a-zA-Z_\\-:\\/]+$") + } + try self.validate(self.controlIdentifiers, name: "controlIdentifiers", parent: name, max: 1) + try self.validate(self.controlIdentifiers, name: "controlIdentifiers", parent: name, min: 1) + try self.validate(self.driftStatuses, name: "driftStatuses", parent: name, max: 1) + try self.validate(self.driftStatuses, name: "driftStatuses", parent: name, min: 1) + try self.validate(self.statuses, name: "statuses", parent: name, max: 1) + try self.validate(self.statuses, name: "statuses", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case controlIdentifiers = "controlIdentifiers" + case driftStatuses = "driftStatuses" + case statuses = "statuses" + } + } + public struct EnabledControlParameter: AWSEncodableShape { /// The key of a key/value pair. public let key: String @@ -697,9 +852,9 @@ extension ControlTower { } public struct EnablementStatusSummary: AWSDecodableShape { - /// The last operation identifier for the enabled control. + /// The last operation identifier for the enabled resource. public let lastOperationIdentifier: String? - /// The deployment status of the enabled control. Valid values: SUCCEEDED: The enabledControl configuration was deployed successfully. UNDER_CHANGE: The enabledControl configuration is changing. FAILED: The enabledControl configuration failed to deploy. + /// The deployment status of the enabled resource. Valid values: SUCCEEDED: The EnabledControl or EnabledBaseline configuration was deployed successfully. UNDER_CHANGE: The EnabledControl or EnabledBaseline configuration is changing. FAILED: The EnabledControl or EnabledBaseline configuration failed to deploy. public let status: EnablementStatus? public init(lastOperationIdentifier: String? = nil, status: EnablementStatus? = nil) { @@ -944,7 +1099,7 @@ extension ControlTower { public let driftStatus: LandingZoneDriftStatusSummary? /// The latest available version of the landing zone. public let latestAvailableVersion: String? - /// The landing zone manifest.yaml text file that specifies the landing zone configurations. + /// The landing zone manifest JSON text file that specifies the landing zone configurations. public let manifest: String /// The landing zone deployment status. One of ACTIVE, PROCESSING, FAILED. public let status: LandingZoneStatus? @@ -987,6 +1142,8 @@ extension ControlTower { /// The landing zone operation end time. @OptionalCustomCoding public var endTime: Date? + /// The operationIdentifier of the landing zone operation. + public let operationIdentifier: String? /// The landing zone operation type. Valid values: DELETE: The DeleteLandingZone operation. CREATE: The CreateLandingZone operation. UPDATE: The UpdateLandingZone operation. RESET: The ResetLandingZone operation. public let operationType: LandingZoneOperationType? /// The landing zone operation start time. @@ -997,8 +1154,9 @@ extension ControlTower { /// If the operation result is FAILED, this string contains a message explaining why the operation failed. public let statusMessage: String? - public init(endTime: Date? = nil, operationType: LandingZoneOperationType? = nil, startTime: Date? = nil, status: LandingZoneOperationStatus? = nil, statusMessage: String? = nil) { + public init(endTime: Date? = nil, operationIdentifier: String? = nil, operationType: LandingZoneOperationType? = nil, startTime: Date? = nil, status: LandingZoneOperationStatus? = nil, statusMessage: String? = nil) { self.endTime = endTime + self.operationIdentifier = operationIdentifier self.operationType = operationType self.startTime = startTime self.status = status @@ -1007,6 +1165,7 @@ extension ControlTower { private enum CodingKeys: String, CodingKey { case endTime = "endTime" + case operationIdentifier = "operationIdentifier" case operationType = "operationType" case startTime = "startTime" case status = "status" @@ -1014,6 +1173,51 @@ extension ControlTower { } } + public struct LandingZoneOperationFilter: AWSEncodableShape { + /// The statuses of the set of landing zone operations selected by the filter. + public let statuses: [LandingZoneOperationStatus]? + /// The set of landing zone operation types selected by the filter. + public let types: [LandingZoneOperationType]? + + public init(statuses: [LandingZoneOperationStatus]? = nil, types: [LandingZoneOperationType]? = nil) { + self.statuses = statuses + self.types = types + } + + public func validate(name: String) throws { + try self.validate(self.statuses, name: "statuses", parent: name, max: 1) + try self.validate(self.statuses, name: "statuses", parent: name, min: 1) + try self.validate(self.types, name: "types", parent: name, max: 1) + try self.validate(self.types, name: "types", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case statuses = "statuses" + case types = "types" + } + } + + public struct LandingZoneOperationSummary: AWSDecodableShape { + /// The operationIdentifier of the landing zone operation. + public let operationIdentifier: String? + /// The type of the landing zone operation. + public let operationType: LandingZoneOperationType? + /// The status of the landing zone operation. + public let status: LandingZoneOperationStatus? + + public init(operationIdentifier: String? = nil, operationType: LandingZoneOperationType? = nil, status: LandingZoneOperationStatus? = nil) { + self.operationIdentifier = operationIdentifier + self.operationType = operationType + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case operationIdentifier = "operationIdentifier" + case operationType = "operationType" + case status = "status" + } + } + public struct LandingZoneSummary: AWSDecodableShape { /// The ARN of the landing zone. public let arn: String? @@ -1066,6 +1270,51 @@ extension ControlTower { } } + public struct ListControlOperationsInput: AWSEncodableShape { + /// An input filter for the ListControlOperations API that lets you select the types of control operations to view. + public let filter: ControlOperationFilter? + /// The maximum number of results to be shown. + public let maxResults: Int? + /// A pagination token. + public let nextToken: String? + + public init(filter: ControlOperationFilter? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.filter = filter + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.filter?.validate(name: "\(name).filter") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "\\S+") + } + + private enum CodingKeys: String, CodingKey { + case filter = "filter" + case maxResults = "maxResults" + case nextToken = "nextToken" + } + } + + public struct ListControlOperationsOutput: AWSDecodableShape { + /// Returns a list of output from control operations. + public let controlOperations: [ControlOperationSummary] + /// A pagination token. + public let nextToken: String? + + public init(controlOperations: [ControlOperationSummary], nextToken: String? = nil) { + self.controlOperations = controlOperations + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case controlOperations = "controlOperations" + case nextToken = "nextToken" + } + } + public struct ListEnabledBaselinesInput: AWSEncodableShape { /// A filter applied on the ListEnabledBaseline operation. Allowed filters are baselineIdentifiers and targetIdentifiers. The filter can be applied for either, or both. public let filter: EnabledBaselineFilter? @@ -1112,20 +1361,24 @@ extension ControlTower { } public struct ListEnabledControlsInput: AWSEncodableShape { + /// An input filter for the ListEnabledControls API that lets you select the types of control operations to view. + public let filter: EnabledControlFilter? /// How many results to return per API call. public let maxResults: Int? /// The token to continue the list from a previous API call with the same parameters. public let nextToken: String? /// The ARN of the organizational unit. For information on how to find the targetIdentifier, see the overview page. - public let targetIdentifier: String + public let targetIdentifier: String? - public init(maxResults: Int? = nil, nextToken: String? = nil, targetIdentifier: String) { + public init(filter: EnabledControlFilter? = nil, maxResults: Int? = nil, nextToken: String? = nil, targetIdentifier: String? = nil) { + self.filter = filter self.maxResults = maxResults self.nextToken = nextToken self.targetIdentifier = targetIdentifier } public func validate(name: String) throws { + try self.filter?.validate(name: "\(name).filter") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 200) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.targetIdentifier, name: "targetIdentifier", parent: name, max: 2048) @@ -1134,6 +1387,7 @@ extension ControlTower { } private enum CodingKeys: String, CodingKey { + case filter = "filter" case maxResults = "maxResults" case nextToken = "nextToken" case targetIdentifier = "targetIdentifier" @@ -1157,6 +1411,50 @@ extension ControlTower { } } + public struct ListLandingZoneOperationsInput: AWSEncodableShape { + /// An input filter for the ListLandingZoneOperations API that lets you select the types of landing zone operations to view. + public let filter: LandingZoneOperationFilter? + /// How many results to return per API call. + public let maxResults: Int? + /// The token to continue the list from a previous API call with the same parameters. + public let nextToken: String? + + public init(filter: LandingZoneOperationFilter? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.filter = filter + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.filter?.validate(name: "\(name).filter") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case filter = "filter" + case maxResults = "maxResults" + case nextToken = "nextToken" + } + } + + public struct ListLandingZoneOperationsOutput: AWSDecodableShape { + /// Lists landing zone operations. + public let landingZoneOperations: [LandingZoneOperationSummary] + /// Retrieves the next page of results. If the string is empty, the response is the end of the results. + public let nextToken: String? + + public init(landingZoneOperations: [LandingZoneOperationSummary], nextToken: String? = nil) { + self.landingZoneOperations = landingZoneOperations + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case landingZoneOperations = "landingZoneOperations" + case nextToken = "nextToken" + } + } + public struct ListLandingZonesInput: AWSEncodableShape { /// The maximum number of returned landing zone ARNs, which is one. public let maxResults: Int? @@ -1460,7 +1758,7 @@ extension ControlTower { public struct UpdateLandingZoneInput: AWSEncodableShape { /// The unique identifier of the landing zone. public let landingZoneIdentifier: String - /// The manifest.yaml file is a text file that describes your Amazon Web Services resources. For examples, review The manifest file. + /// The manifest file (JSON) is a text file that describes your Amazon Web Services resources. For an example, review Launch your landing zone. The example manifest file contains each of the available parameters. The schema for the landing zone's JSON manifest file is not published, by design. public let manifest: String /// The landing zone version, for example, 3.2. public let version: String diff --git a/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_shapes.swift b/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_shapes.swift index 5491302c64..1c5b84cb22 100644 --- a/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_shapes.swift +++ b/Sources/Soto/Services/CostOptimizationHub/CostOptimizationHub_shapes.swift @@ -74,6 +74,8 @@ extension CostOptimizationHub { case elastiCacheReservedInstances = "ElastiCacheReservedInstances" case lambdaFunction = "LambdaFunction" case openSearchReservedInstances = "OpenSearchReservedInstances" + case rdsDbInstance = "RdsDbInstance" + case rdsDbInstanceStorage = "RdsDbInstanceStorage" case rdsReservedInstances = "RdsReservedInstances" case redshiftReservedInstances = "RedshiftReservedInstances" case sageMakerSavingsPlans = "SageMakerSavingsPlans" @@ -113,6 +115,10 @@ extension CostOptimizationHub { case lambdaFunction(LambdaFunction) /// The OpenSearch reserved instances recommendation details. case openSearchReservedInstances(OpenSearchReservedInstances) + /// The DB instance recommendation details. + case rdsDbInstance(RdsDbInstance) + /// The DB instance storage recommendation details. + case rdsDbInstanceStorage(RdsDbInstanceStorage) /// The RDS reserved instances recommendation details. case rdsReservedInstances(RdsReservedInstances) /// The Redshift reserved instances recommendation details. @@ -160,6 +166,12 @@ extension CostOptimizationHub { case .openSearchReservedInstances: let value = try container.decode(OpenSearchReservedInstances.self, forKey: .openSearchReservedInstances) self = .openSearchReservedInstances(value) + case .rdsDbInstance: + let value = try container.decode(RdsDbInstance.self, forKey: .rdsDbInstance) + self = .rdsDbInstance(value) + case .rdsDbInstanceStorage: + let value = try container.decode(RdsDbInstanceStorage.self, forKey: .rdsDbInstanceStorage) + self = .rdsDbInstanceStorage(value) case .rdsReservedInstances: let value = try container.decode(RdsReservedInstances.self, forKey: .rdsReservedInstances) self = .rdsReservedInstances(value) @@ -183,6 +195,8 @@ extension CostOptimizationHub { case elastiCacheReservedInstances = "elastiCacheReservedInstances" case lambdaFunction = "lambdaFunction" case openSearchReservedInstances = "openSearchReservedInstances" + case rdsDbInstance = "rdsDbInstance" + case rdsDbInstanceStorage = "rdsDbInstanceStorage" case rdsReservedInstances = "rdsReservedInstances" case redshiftReservedInstances = "redshiftReservedInstances" case sageMakerSavingsPlans = "sageMakerSavingsPlans" @@ -300,6 +314,19 @@ extension CostOptimizationHub { } } + public struct DbInstanceConfiguration: AWSDecodableShape { + /// The DB instance class of the DB instance. + public let dbInstanceClass: String? + + public init(dbInstanceClass: String? = nil) { + self.dbInstanceClass = dbInstanceClass + } + + private enum CodingKeys: String, CodingKey { + case dbInstanceClass = "dbInstanceClass" + } + } + public struct EbsVolume: AWSDecodableShape { /// The Amazon Elastic Block Store volume configuration used for recommendations. public let configuration: EbsVolumeConfiguration? @@ -791,7 +818,7 @@ extension CostOptimizationHub { public let currentResourceDetails: ResourceDetails? /// The type of resource. public let currentResourceType: ResourceType? - /// The estimated monthly cost of the recommendation. + /// The estimated monthly cost of the current resource. For Reserved Instances and Savings Plans, it refers to the cost for eligible usage. public let estimatedMonthlyCost: Double? /// The estimated monthly savings amount for the recommendation. public let estimatedMonthlySavings: Double? @@ -1176,6 +1203,76 @@ extension CostOptimizationHub { } } + public struct RdsDbInstance: AWSDecodableShape { + /// The Amazon RDS DB instance configuration used for recommendations. + public let configuration: RdsDbInstanceConfiguration? + public let costCalculation: ResourceCostCalculation? + + public init(configuration: RdsDbInstanceConfiguration? = nil, costCalculation: ResourceCostCalculation? = nil) { + self.configuration = configuration + self.costCalculation = costCalculation + } + + private enum CodingKeys: String, CodingKey { + case configuration = "configuration" + case costCalculation = "costCalculation" + } + } + + public struct RdsDbInstanceConfiguration: AWSDecodableShape { + /// Details about the instance configuration. + public let instance: DbInstanceConfiguration? + + public init(instance: DbInstanceConfiguration? = nil) { + self.instance = instance + } + + private enum CodingKeys: String, CodingKey { + case instance = "instance" + } + } + + public struct RdsDbInstanceStorage: AWSDecodableShape { + /// The Amazon RDS DB instance storage configuration used for recommendations. + public let configuration: RdsDbInstanceStorageConfiguration? + public let costCalculation: ResourceCostCalculation? + + public init(configuration: RdsDbInstanceStorageConfiguration? = nil, costCalculation: ResourceCostCalculation? = nil) { + self.configuration = configuration + self.costCalculation = costCalculation + } + + private enum CodingKeys: String, CodingKey { + case configuration = "configuration" + case costCalculation = "costCalculation" + } + } + + public struct RdsDbInstanceStorageConfiguration: AWSDecodableShape { + /// The new amount of storage in GB to allocate for the DB instance. + public let allocatedStorageInGb: Double? + /// The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. + public let iops: Double? + /// The storage throughput for the DB instance. + public let storageThroughput: Double? + /// The storage type to associate with the DB instance. + public let storageType: String? + + public init(allocatedStorageInGb: Double? = nil, iops: Double? = nil, storageThroughput: Double? = nil, storageType: String? = nil) { + self.allocatedStorageInGb = allocatedStorageInGb + self.iops = iops + self.storageThroughput = storageThroughput + self.storageType = storageType + } + + private enum CodingKeys: String, CodingKey { + case allocatedStorageInGb = "allocatedStorageInGb" + case iops = "iops" + case storageThroughput = "storageThroughput" + case storageType = "storageType" + } + } + public struct RdsReservedInstances: AWSDecodableShape { /// The RDS reserved instances configuration used for recommendations. public let configuration: RdsReservedInstancesConfiguration? @@ -1281,7 +1378,7 @@ extension CostOptimizationHub { public let currentResourceSummary: String? /// The current resource type. public let currentResourceType: String? - /// The estimated monthly cost for the recommendation. + /// The estimated monthly cost of the current resource. For Reserved Instances and Savings Plans, it refers to the cost for eligible usage. public let estimatedMonthlyCost: Double? /// The estimated monthly savings amount for the recommendation. public let estimatedMonthlySavings: Double? diff --git a/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_api.swift b/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_api.swift index 2642113ccc..7a36286541 100644 --- a/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_api.swift +++ b/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS CustomerProfiles service. /// -/// Amazon Connect Customer Profiles Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center. If you're new to Amazon Connect, you might find it helpful to review the Amazon Connect Administrator Guide. +/// Amazon Connect Customer Profiles Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center. For more information about the Amazon Connect Customer Profiles feature, see Use Customer Profiles in the Amazon Connect Administrator's Guide. public struct CustomerProfiles: AWSService { // MARK: Member variables @@ -95,7 +95,7 @@ public struct CustomerProfiles: AWSService { ) } - /// Creates a new calculated attribute definition. After creation, new object data ingested into Customer Profiles will be included in the calculated attribute, which can be retrieved for a profile using the GetCalculatedAttributeForProfile API. Defining a calculated attribute makes it available for all profiles within a domain. Each calculated attribute can only reference one ObjectType and at most, two fields from that ObjectType. + /// Creates a new calculated attribute definition. After creation, new object data ingested into Customer Profiles will be included in the calculated attribute, which can be retrieved for a profile using the GetCalculatedAttributeForProfile API. Defining a calculated attribute makes it available for all profiles within a domain. Each calculated attribute can only reference one ObjectType and at most, two fields from that ObjectType. @Sendable public func createCalculatedAttributeDefinition(_ input: CreateCalculatedAttributeDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCalculatedAttributeDefinitionResponse { return try await self.client.execute( @@ -108,7 +108,7 @@ public struct CustomerProfiles: AWSService { ) } - /// Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations. Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain. Use this API or UpdateDomain to enable identity resolution: set Matching to true. To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply. + /// Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations. Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain. Use this API or UpdateDomain to enable identity resolution: set Matching to true. To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply. It is not possible to associate a Customer Profiles domain with an Amazon Connect Instance directly from the API. If you would like to create a domain and associate a Customer Profiles domain, use the Amazon Connect admin website. For more information, see Enable Customer Profiles. Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain. @Sendable public func createDomain(_ input: CreateDomainRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDomainResponse { return try await self.client.execute( @@ -121,7 +121,7 @@ public struct CustomerProfiles: AWSService { ) } - /// Creates an event stream, which is a subscription to real-time events, such as when profiles are created and updated through Amazon Connect Customer Profiles. Each event stream can be associated with only one Kinesis Data Stream destination in the same region and Amazon Web Services account as the customer profiles domain + /// Creates an event stream, which is a subscription to real-time events, such as when profiles are created and updated through Amazon Connect Customer Profiles. Each event stream can be associated with only one Kinesis Data Stream destination in the same region and Amazon Web Services account as the customer profiles domain @Sendable public func createEventStream(_ input: CreateEventStreamRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEventStreamResponse { return try await self.client.execute( @@ -134,8 +134,7 @@ public struct CustomerProfiles: AWSService { ) } - /// Creates an integration workflow. An integration workflow is an async process which ingests historic data and sets up an integration for ongoing updates. The supported Amazon AppFlow sources are Salesforce, ServiceNow, and Marketo. - /// + /// Creates an integration workflow. An integration workflow is an async process which ingests historic data and sets up an integration for ongoing updates. The supported Amazon AppFlow sources are Salesforce, ServiceNow, and Marketo. @Sendable public func createIntegrationWorkflow(_ input: CreateIntegrationWorkflowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateIntegrationWorkflowResponse { return try await self.client.execute( @@ -161,7 +160,7 @@ public struct CustomerProfiles: AWSService { ) } - /// Deletes an existing calculated attribute definition. Note that deleting a default calculated attribute is possible, however once deleted, you will be unable to undo that action and will need to recreate it on your own using the CreateCalculatedAttributeDefinition API if you want it back. + /// Deletes an existing calculated attribute definition. Note that deleting a default calculated attribute is possible, however once deleted, you will be unable to undo that action and will need to recreate it on your own using the CreateCalculatedAttributeDefinition API if you want it back. @Sendable public func deleteCalculatedAttributeDefinition(_ input: DeleteCalculatedAttributeDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteCalculatedAttributeDefinitionResponse { return try await self.client.execute( @@ -426,7 +425,7 @@ public struct CustomerProfiles: AWSService { ) } - /// Returns a set of profiles that belong to the same matching group using the matchId or profileId. You can also specify the type of matching that you want for finding similar profiles using either RULE_BASED_MATCHING or ML_BASED_MATCHING. + /// Returns a set of profiles that belong to the same matching group using the matchId or profileId. You can also specify the type of matching that you want for finding similar profiles using either RULE_BASED_MATCHING or ML_BASED_MATCHING. @Sendable public func getSimilarProfiles(_ input: GetSimilarProfilesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetSimilarProfilesResponse { return try await self.client.execute( @@ -725,7 +724,7 @@ public struct CustomerProfiles: AWSService { ) } - /// Updates an existing calculated attribute definition. When updating the Conditions, note that increasing the date range of a calculated attribute will not trigger inclusion of historical data greater than the current date range. + /// Updates an existing calculated attribute definition. When updating the Conditions, note that increasing the date range of a calculated attribute will not trigger inclusion of historical data greater than the current date range. @Sendable public func updateCalculatedAttributeDefinition(_ input: UpdateCalculatedAttributeDefinitionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateCalculatedAttributeDefinitionResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_shapes.swift b/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_shapes.swift index f814b9da83..4c15f18b7d 100644 --- a/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_shapes.swift +++ b/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_shapes.swift @@ -647,7 +647,7 @@ extension CustomerProfiles { public struct AttributeTypesSelector: AWSEncodableShape & AWSDecodableShape { /// The Address type. You can choose from Address, BusinessAddress, MaillingAddress, and ShippingAddress. You only can use the Address type in the MatchingRule. For example, if you want to match profile based on BusinessAddress.City or MaillingAddress.City, you need to choose the BusinessAddress and the MaillingAddress to represent the Address type and specify the Address.City on the matching rule. public let address: [String]? - /// Configures the AttributeMatchingModel, you can either choose ONE_TO_ONE or MANY_TO_MANY. + /// Configures the AttributeMatchingModel, you can either choose ONE_TO_ONE or MANY_TO_MANY. public let attributeMatchingModel: AttributeMatchingModel /// The Email type. You can choose from EmailAddress, BusinessEmailAddress and PersonalEmailAddress. You only can use the EmailAddress type in the MatchingRule. For example, if you want to match profile based on PersonalEmailAddress or BusinessEmailAddress, you need to choose the PersonalEmailAddress and the BusinessEmailAddress to represent the EmailAddress type and only specify the EmailAddress on the matching rule. public let emailAddress: [String]? @@ -977,7 +977,7 @@ extension CustomerProfiles { /// API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from /// S3. public let matching: MatchingRequest? - /// The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3. + /// The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3. public let ruleBasedMatching: RuleBasedMatchingRequest? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? @@ -1054,7 +1054,7 @@ extension CustomerProfiles { /// API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from /// S3. public let matching: MatchingResponse? - /// The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3. + /// The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3. public let ruleBasedMatching: RuleBasedMatchingResponse? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? @@ -1091,7 +1091,7 @@ extension CustomerProfiles { public let eventStreamName: String /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? - /// The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name + /// The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name public let uri: String public init(domainName: String, eventStreamName: String, tags: [String: String]? = nil, uri: String) { @@ -1233,7 +1233,7 @@ extension CustomerProfiles { } public struct CreateProfileRequest: AWSEncodableShape { - /// A unique account number that you have given to the customer. + /// An account number that you have given to the customer. public let accountNumber: String? /// Any additional information relevant to the customer’s profile. public let additionalInformation: String? @@ -1816,7 +1816,7 @@ extension CustomerProfiles { public let status: EventStreamDestinationStatus /// The timestamp when the status last changed to UNHEALHY. public let unhealthySince: Date? - /// The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name. + /// The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name. public let uri: String public init(status: EventStreamDestinationStatus, unhealthySince: Date? = nil, uri: String) { @@ -1933,7 +1933,7 @@ extension CustomerProfiles { public let status: EventStreamDestinationStatus /// The timestamp when the status last changed to UNHEALHY. public let unhealthySince: Date? - /// The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name. + /// The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name. public let uri: String public init(message: String? = nil, status: EventStreamDestinationStatus, unhealthySince: Date? = nil, uri: String) { @@ -2450,7 +2450,7 @@ extension CustomerProfiles { /// API to return and review the results. Or, if you have configured ExportingConfig in the MatchingRequest, you can download the results from /// S3. public let matching: MatchingResponse? - /// The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3. + /// The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can download the results from S3. public let ruleBasedMatching: RuleBasedMatchingResponse? /// Usage-specific statistics about the domain. public let stats: DomainStats? @@ -2668,7 +2668,7 @@ extension CustomerProfiles { public let createdAt: Date /// The unique name of the domain. public let domainName: String - /// Boolean that shows if the Flow that's associated with the Integration is created in Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in flowDefinition. + /// Boolean that shows if the Flow that's associated with the Integration is created in Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in flowDefinition. public let isUnstructured: Bool? /// The timestamp of when the domain was most recently edited. public let lastUpdatedAt: Date @@ -2818,6 +2818,10 @@ extension CustomerProfiles { public let keys: [String: [ObjectTypeKey]]? /// The timestamp of when the domain was most recently edited. public let lastUpdatedAt: Date? + /// The amount of provisioned profile object max count available. + public let maxAvailableProfileObjectCount: Int? + /// The amount of profile object max count assigned to the object type. + public let maxProfileObjectCount: Int? /// The name of the profile object type. public let objectTypeName: String /// The format of your sourceLastUpdatedTimestamp that was previously set up. @@ -2827,7 +2831,7 @@ extension CustomerProfiles { /// A unique identifier for the object template. public let templateId: String? - public init(allowProfileCreation: Bool? = nil, createdAt: Date? = nil, description: String, encryptionKey: String? = nil, expirationDays: Int? = nil, fields: [String: ObjectTypeField]? = nil, keys: [String: [ObjectTypeKey]]? = nil, lastUpdatedAt: Date? = nil, objectTypeName: String, sourceLastUpdatedTimestampFormat: String? = nil, tags: [String: String]? = nil, templateId: String? = nil) { + public init(allowProfileCreation: Bool? = nil, createdAt: Date? = nil, description: String, encryptionKey: String? = nil, expirationDays: Int? = nil, fields: [String: ObjectTypeField]? = nil, keys: [String: [ObjectTypeKey]]? = nil, lastUpdatedAt: Date? = nil, maxAvailableProfileObjectCount: Int? = nil, maxProfileObjectCount: Int? = nil, objectTypeName: String, sourceLastUpdatedTimestampFormat: String? = nil, tags: [String: String]? = nil, templateId: String? = nil) { self.allowProfileCreation = allowProfileCreation self.createdAt = createdAt self.description = description @@ -2836,6 +2840,8 @@ extension CustomerProfiles { self.fields = fields self.keys = keys self.lastUpdatedAt = lastUpdatedAt + self.maxAvailableProfileObjectCount = maxAvailableProfileObjectCount + self.maxProfileObjectCount = maxProfileObjectCount self.objectTypeName = objectTypeName self.sourceLastUpdatedTimestampFormat = sourceLastUpdatedTimestampFormat self.tags = tags @@ -2851,6 +2857,8 @@ extension CustomerProfiles { case fields = "Fields" case keys = "Keys" case lastUpdatedAt = "LastUpdatedAt" + case maxAvailableProfileObjectCount = "MaxAvailableProfileObjectCount" + case maxProfileObjectCount = "MaxProfileObjectCount" case objectTypeName = "ObjectTypeName" case sourceLastUpdatedTimestampFormat = "SourceLastUpdatedTimestampFormat" case tags = "Tags" @@ -3665,7 +3673,7 @@ extension CustomerProfiles { public let createdAt: Date /// The unique name of the domain. public let domainName: String - /// Boolean that shows if the Flow that's associated with the Integration is created in Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in flowDefinition. + /// Boolean that shows if the Flow that's associated with the Integration is created in Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in flowDefinition. public let isUnstructured: Bool? /// The timestamp of when the domain was most recently edited. public let lastUpdatedAt: Date @@ -3770,15 +3778,21 @@ extension CustomerProfiles { public let description: String /// The timestamp of when the domain was most recently edited. public let lastUpdatedAt: Date? + /// The amount of provisioned profile object max count available. + public let maxAvailableProfileObjectCount: Int? + /// The amount of profile object max count assigned to the object type. + public let maxProfileObjectCount: Int? /// The name of the profile object type. public let objectTypeName: String /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? - public init(createdAt: Date? = nil, description: String, lastUpdatedAt: Date? = nil, objectTypeName: String, tags: [String: String]? = nil) { + public init(createdAt: Date? = nil, description: String, lastUpdatedAt: Date? = nil, maxAvailableProfileObjectCount: Int? = nil, maxProfileObjectCount: Int? = nil, objectTypeName: String, tags: [String: String]? = nil) { self.createdAt = createdAt self.description = description self.lastUpdatedAt = lastUpdatedAt + self.maxAvailableProfileObjectCount = maxAvailableProfileObjectCount + self.maxProfileObjectCount = maxProfileObjectCount self.objectTypeName = objectTypeName self.tags = tags } @@ -3787,6 +3801,8 @@ extension CustomerProfiles { case createdAt = "CreatedAt" case description = "Description" case lastUpdatedAt = "LastUpdatedAt" + case maxAvailableProfileObjectCount = "MaxAvailableProfileObjectCount" + case maxProfileObjectCount = "MaxProfileObjectCount" case objectTypeName = "ObjectTypeName" case tags = "Tags" } @@ -4375,7 +4391,7 @@ extension CustomerProfiles { } public struct ObjectFilter: AWSEncodableShape { - /// A searchable identifier of a profile object. The predefined keys you can use to search for _asset include: _assetId, _assetName, and _serialNumber. The predefined keys you can use to search for _case include: _caseId. The predefined keys you can use to search for _order include: _orderId. + /// A searchable identifier of a profile object. The predefined keys you can use to search for _asset include: _assetId, _assetName, and _serialNumber. The predefined keys you can use to search for _case include: _caseId. The predefined keys you can use to search for _order include: _orderId. public let keyName: String /// A list of key values. public let values: [String] @@ -4432,7 +4448,7 @@ extension CustomerProfiles { public struct ObjectTypeKey: AWSEncodableShape & AWSDecodableShape { /// The reference for the key name of the fields map. public let fieldNames: [String]? - /// The types of keys that a ProfileObject can have. Each ProfileObject can have only 1 UNIQUE key but multiple PROFILE keys. PROFILE, ASSET, CASE, or ORDER means that this key can be used to tie an object to a PROFILE, ASSET, CASE, or ORDER respectively. UNIQUE means that it can be used to uniquely identify an object. If a key a is marked as SECONDARY, it will be used to search for profiles after all other PROFILE keys have been searched. A LOOKUP_ONLY key is only used to match a profile but is not persisted to be used for searching of the profile. A NEW_ONLY key is only used if the profile does not already exist before the object is ingested, otherwise it is only used for matching objects to profiles. + /// The types of keys that a ProfileObject can have. Each ProfileObject can have only 1 UNIQUE key but multiple PROFILE keys. PROFILE, ASSET, CASE, or ORDER means that this key can be used to tie an object to a PROFILE, ASSET, CASE, or ORDER respectively. UNIQUE means that it can be used to uniquely identify an object. If a key a is marked as SECONDARY, it will be used to search for profiles after all other PROFILE keys have been searched. A LOOKUP_ONLY key is only used to match a profile but is not persisted to be used for searching of the profile. A NEW_ONLY key is only used if the profile does not already exist before the object is ingested, otherwise it is only used for matching objects to profiles. public let standardIdentifiers: [StandardIdentifier]? public init(fieldNames: [String]? = nil, standardIdentifiers: [StandardIdentifier]? = nil) { @@ -4455,7 +4471,7 @@ extension CustomerProfiles { } public struct Profile: AWSDecodableShape { - /// A unique account number that you have given to the customer. + /// An account number that you have given to the customer. public let accountNumber: String? /// Any additional information relevant to the customer’s profile. public let additionalInformation: String? @@ -4640,7 +4656,7 @@ extension CustomerProfiles { public let createdAt: Date /// The unique name of the domain. public let domainName: String - /// Boolean that shows if the Flow that's associated with the Integration is created in Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in flowDefinition. + /// Boolean that shows if the Flow that's associated with the Integration is created in Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in flowDefinition. public let isUnstructured: Bool? /// The timestamp of when the domain was most recently edited. public let lastUpdatedAt: Date @@ -4749,16 +4765,18 @@ extension CustomerProfiles { public let fields: [String: ObjectTypeField]? /// A list of unique keys that can be used to map data to the profile. public let keys: [String: [ObjectTypeKey]]? + /// The amount of profile object max count assigned to the object type + public let maxProfileObjectCount: Int? /// The name of the profile object type. public let objectTypeName: String /// The format of your sourceLastUpdatedTimestamp that was previously set up. public let sourceLastUpdatedTimestampFormat: String? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? - /// A unique identifier for the object template. For some attributes in the request, the service will use the default value from the object template when TemplateId is present. If these attributes are present in the request, the service may return a BadRequestException. These attributes include: AllowProfileCreation, SourceLastUpdatedTimestampFormat, Fields, and Keys. For example, if AllowProfileCreation is set to true when TemplateId is set, the service may return a BadRequestException. + /// A unique identifier for the object template. For some attributes in the request, the service will use the default value from the object template when TemplateId is present. If these attributes are present in the request, the service may return a BadRequestException. These attributes include: AllowProfileCreation, SourceLastUpdatedTimestampFormat, Fields, and Keys. For example, if AllowProfileCreation is set to true when TemplateId is set, the service may return a BadRequestException. public let templateId: String? - public init(allowProfileCreation: Bool? = nil, description: String, domainName: String, encryptionKey: String? = nil, expirationDays: Int? = nil, fields: [String: ObjectTypeField]? = nil, keys: [String: [ObjectTypeKey]]? = nil, objectTypeName: String, sourceLastUpdatedTimestampFormat: String? = nil, tags: [String: String]? = nil, templateId: String? = nil) { + public init(allowProfileCreation: Bool? = nil, description: String, domainName: String, encryptionKey: String? = nil, expirationDays: Int? = nil, fields: [String: ObjectTypeField]? = nil, keys: [String: [ObjectTypeKey]]? = nil, maxProfileObjectCount: Int? = nil, objectTypeName: String, sourceLastUpdatedTimestampFormat: String? = nil, tags: [String: String]? = nil, templateId: String? = nil) { self.allowProfileCreation = allowProfileCreation self.description = description self.domainName = domainName @@ -4766,6 +4784,7 @@ extension CustomerProfiles { self.expirationDays = expirationDays self.fields = fields self.keys = keys + self.maxProfileObjectCount = maxProfileObjectCount self.objectTypeName = objectTypeName self.sourceLastUpdatedTimestampFormat = sourceLastUpdatedTimestampFormat self.tags = tags @@ -4782,6 +4801,7 @@ extension CustomerProfiles { try container.encodeIfPresent(self.expirationDays, forKey: .expirationDays) try container.encodeIfPresent(self.fields, forKey: .fields) try container.encodeIfPresent(self.keys, forKey: .keys) + try container.encodeIfPresent(self.maxProfileObjectCount, forKey: .maxProfileObjectCount) request.encodePath(self.objectTypeName, key: "ObjectTypeName") try container.encodeIfPresent(self.sourceLastUpdatedTimestampFormat, forKey: .sourceLastUpdatedTimestampFormat) try container.encodeIfPresent(self.tags, forKey: .tags) @@ -4808,6 +4828,7 @@ extension CustomerProfiles { try validate($0.key, name: "keys.key", parent: name, min: 1) try validate($0.key, name: "keys.key", parent: name, pattern: "^[a-zA-Z0-9_-]+$") } + try self.validate(self.maxProfileObjectCount, name: "maxProfileObjectCount", parent: name, min: 1) try self.validate(self.objectTypeName, name: "objectTypeName", parent: name, max: 255) try self.validate(self.objectTypeName, name: "objectTypeName", parent: name, min: 1) try self.validate(self.objectTypeName, name: "objectTypeName", parent: name, pattern: "^[a-zA-Z_][a-zA-Z_0-9-]*$") @@ -4833,6 +4854,7 @@ extension CustomerProfiles { case expirationDays = "ExpirationDays" case fields = "Fields" case keys = "Keys" + case maxProfileObjectCount = "MaxProfileObjectCount" case sourceLastUpdatedTimestampFormat = "SourceLastUpdatedTimestampFormat" case tags = "Tags" case templateId = "TemplateId" @@ -4856,6 +4878,10 @@ extension CustomerProfiles { public let keys: [String: [ObjectTypeKey]]? /// The timestamp of when the domain was most recently edited. public let lastUpdatedAt: Date? + /// The amount of provisioned profile object max count available. + public let maxAvailableProfileObjectCount: Int? + /// The amount of profile object max count assigned to the object type. + public let maxProfileObjectCount: Int? /// The name of the profile object type. public let objectTypeName: String /// The format of your sourceLastUpdatedTimestamp that was previously set up in fields that were parsed using SimpleDateFormat. If you have sourceLastUpdatedTimestamp in your field, you must set up sourceLastUpdatedTimestampFormat. @@ -4865,7 +4891,7 @@ extension CustomerProfiles { /// A unique identifier for the object template. public let templateId: String? - public init(allowProfileCreation: Bool? = nil, createdAt: Date? = nil, description: String, encryptionKey: String? = nil, expirationDays: Int? = nil, fields: [String: ObjectTypeField]? = nil, keys: [String: [ObjectTypeKey]]? = nil, lastUpdatedAt: Date? = nil, objectTypeName: String, sourceLastUpdatedTimestampFormat: String? = nil, tags: [String: String]? = nil, templateId: String? = nil) { + public init(allowProfileCreation: Bool? = nil, createdAt: Date? = nil, description: String, encryptionKey: String? = nil, expirationDays: Int? = nil, fields: [String: ObjectTypeField]? = nil, keys: [String: [ObjectTypeKey]]? = nil, lastUpdatedAt: Date? = nil, maxAvailableProfileObjectCount: Int? = nil, maxProfileObjectCount: Int? = nil, objectTypeName: String, sourceLastUpdatedTimestampFormat: String? = nil, tags: [String: String]? = nil, templateId: String? = nil) { self.allowProfileCreation = allowProfileCreation self.createdAt = createdAt self.description = description @@ -4874,6 +4900,8 @@ extension CustomerProfiles { self.fields = fields self.keys = keys self.lastUpdatedAt = lastUpdatedAt + self.maxAvailableProfileObjectCount = maxAvailableProfileObjectCount + self.maxProfileObjectCount = maxProfileObjectCount self.objectTypeName = objectTypeName self.sourceLastUpdatedTimestampFormat = sourceLastUpdatedTimestampFormat self.tags = tags @@ -4889,6 +4917,8 @@ extension CustomerProfiles { case fields = "Fields" case keys = "Keys" case lastUpdatedAt = "LastUpdatedAt" + case maxAvailableProfileObjectCount = "MaxAvailableProfileObjectCount" + case maxProfileObjectCount = "MaxProfileObjectCount" case objectTypeName = "ObjectTypeName" case sourceLastUpdatedTimestampFormat = "SourceLastUpdatedTimestampFormat" case tags = "Tags" @@ -4925,7 +4955,7 @@ extension CustomerProfiles { /// The flag that enables the rule-based matching process of duplicate profiles. public let enabled: Bool public let exportingConfig: ExportingConfig? - /// Configures how the rule-based matching process should match profiles. You can have up to 15 MatchingRule in the MatchingRules. + /// Configures how the rule-based matching process should match profiles. You can have up to 15 MatchingRule in the MatchingRules. public let matchingRules: [MatchingRule]? /// Indicates the maximum allowed rule level. public let maxAllowedRuleLevelForMatching: Int? @@ -4975,7 +5005,7 @@ extension CustomerProfiles { /// The flag that enables the rule-based matching process of duplicate profiles. public let enabled: Bool? public let exportingConfig: ExportingConfig? - /// Configures how the rule-based matching process should match profiles. You can have up to 15 MatchingRule in the MatchingRules. + /// Configures how the rule-based matching process should match profiles. You can have up to 15 MatchingRule in the MatchingRules. public let matchingRules: [MatchingRule]? /// Indicates the maximum allowed rule level. public let maxAllowedRuleLevelForMatching: Int? @@ -5781,7 +5811,7 @@ extension CustomerProfiles { } public struct UpdateProfileRequest: AWSEncodableShape { - /// A unique account number that you have given to the customer. + /// An account number that you have given to the customer. public let accountNumber: String? /// Any additional information relevant to the customer’s profile. public let additionalInformation: String? diff --git a/Sources/Soto/Services/DataSync/DataSync_shapes.swift b/Sources/Soto/Services/DataSync/DataSync_shapes.swift index ff25443da7..0d062c0c17 100644 --- a/Sources/Soto/Services/DataSync/DataSync_shapes.swift +++ b/Sources/Soto/Services/DataSync/DataSync_shapes.swift @@ -300,6 +300,7 @@ extension DataSync { } public enum TaskExecutionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cancelling = "CANCELLING" case error = "ERROR" case launching = "LAUNCHING" case preparing = "PREPARING" diff --git a/Sources/Soto/Services/DataZone/DataZone_api.swift b/Sources/Soto/Services/DataZone/DataZone_api.swift index 772376770c..686953d417 100644 --- a/Sources/Soto/Services/DataZone/DataZone_api.swift +++ b/Sources/Soto/Services/DataZone/DataZone_api.swift @@ -175,6 +175,19 @@ public struct DataZone: AWSService { ) } + /// Associates the environment role in Amazon DataZone. + @Sendable + public func associateEnvironmentRole(_ input: AssociateEnvironmentRoleInput, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateEnvironmentRoleOutput { + return try await self.client.execute( + operation: "AssociateEnvironmentRole", + path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/roles/{environmentRoleArn}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Cancels the metadata generation run. @Sendable public func cancelMetadataGenerationRun(_ input: CancelMetadataGenerationRunInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CancelMetadataGenerationRunOutput { @@ -279,6 +292,19 @@ public struct DataZone: AWSService { ) } + /// Creates an action for the environment, for example, creates a console link for an analytics tool that is available in this environment. + @Sendable + public func createEnvironmentAction(_ input: CreateEnvironmentActionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEnvironmentActionOutput { + return try await self.client.execute( + operation: "CreateEnvironmentAction", + path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates an Amazon DataZone environment profile. @Sendable public func createEnvironmentProfile(_ input: CreateEnvironmentProfileInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEnvironmentProfileOutput { @@ -500,6 +526,19 @@ public struct DataZone: AWSService { ) } + /// Deletes an action for the environment, for example, deletes a console link for an analytics tool that is available in this environment. + @Sendable + public func deleteEnvironmentAction(_ input: DeleteEnvironmentActionInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteEnvironmentAction", + path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions/{identifier}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes the blueprint configuration in Amazon DataZone. @Sendable public func deleteEnvironmentBlueprintConfiguration(_ input: DeleteEnvironmentBlueprintConfigurationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteEnvironmentBlueprintConfigurationOutput { @@ -656,6 +695,19 @@ public struct DataZone: AWSService { ) } + /// Disassociates the environment role in Amazon DataZone. + @Sendable + public func disassociateEnvironmentRole(_ input: DisassociateEnvironmentRoleInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateEnvironmentRoleOutput { + return try await self.client.execute( + operation: "DisassociateEnvironmentRole", + path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/roles/{environmentRoleArn}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets an Amazon DataZone asset. @Sendable public func getAsset(_ input: GetAssetInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAssetOutput { @@ -734,6 +786,19 @@ public struct DataZone: AWSService { ) } + /// Gets the specified environment action. + @Sendable + public func getEnvironmentAction(_ input: GetEnvironmentActionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetEnvironmentActionOutput { + return try await self.client.execute( + operation: "GetEnvironmentAction", + path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions/{identifier}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets an Amazon DataZone blueprint. @Sendable public func getEnvironmentBlueprint(_ input: GetEnvironmentBlueprintInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetEnvironmentBlueprintOutput { @@ -1020,6 +1085,19 @@ public struct DataZone: AWSService { ) } + /// Lists existing environment actions. + @Sendable + public func listEnvironmentActions(_ input: ListEnvironmentActionsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListEnvironmentActionsOutput { + return try await self.client.execute( + operation: "ListEnvironmentActions", + path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists blueprint configurations for a Amazon DataZone environment. @Sendable public func listEnvironmentBlueprintConfigurations(_ input: ListEnvironmentBlueprintConfigurationsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListEnvironmentBlueprintConfigurationsOutput { @@ -1423,6 +1501,19 @@ public struct DataZone: AWSService { ) } + /// Updates an environment action. + @Sendable + public func updateEnvironmentAction(_ input: UpdateEnvironmentActionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEnvironmentActionOutput { + return try await self.client.execute( + operation: "UpdateEnvironmentAction", + path: "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions/{identifier}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates the specified environment profile in Amazon DataZone. @Sendable public func updateEnvironmentProfile(_ input: UpdateEnvironmentProfileInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEnvironmentProfileOutput { @@ -1630,6 +1721,25 @@ extension DataZone { ) } + /// Lists existing environment actions. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listEnvironmentActionsPaginator( + _ input: ListEnvironmentActionsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listEnvironmentActions, + inputKey: \ListEnvironmentActionsInput.nextToken, + outputKey: \ListEnvironmentActionsOutput.nextToken, + logger: logger + ) + } + /// Lists blueprint configurations for a Amazon DataZone environment. /// Return PaginatorSequence for operation. /// @@ -2022,6 +2132,17 @@ extension DataZone.ListDomainsInput: AWSPaginateToken { } } +extension DataZone.ListEnvironmentActionsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> DataZone.ListEnvironmentActionsInput { + return .init( + domainIdentifier: self.domainIdentifier, + environmentIdentifier: self.environmentIdentifier, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension DataZone.ListEnvironmentBlueprintConfigurationsInput: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> DataZone.ListEnvironmentBlueprintConfigurationsInput { return .init( diff --git a/Sources/Soto/Services/DataZone/DataZone_shapes.swift b/Sources/Soto/Services/DataZone/DataZone_shapes.swift index a3a40dd58c..bd611fa2b6 100644 --- a/Sources/Soto/Services/DataZone/DataZone_shapes.swift +++ b/Sources/Soto/Services/DataZone/DataZone_shapes.swift @@ -261,6 +261,17 @@ extension DataZone { public var description: String { return self.rawValue } } + public enum SelfGrantStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case grantFailed = "GRANT_FAILED" + case grantInProgress = "GRANT_IN_PROGRESS" + case grantPending = "GRANT_PENDING" + case granted = "GRANTED" + case revokeFailed = "REVOKE_FAILED" + case revokeInProgress = "REVOKE_IN_PROGRESS" + case revokePending = "REVOKE_PENDING" + public var description: String { return self.rawValue } + } + public enum SortFieldProject: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case name = "NAME" public var description: String { return self.rawValue } @@ -709,6 +720,37 @@ extension DataZone { } } + public enum SelfGrantStatusOutput: AWSDecodableShape, Sendable { + /// The details for the self granting status for a Glue data source. + case glueSelfGrantStatus(GlueSelfGrantStatusOutput) + /// The details for the self granting status for an Amazon Redshift data source. + case redshiftSelfGrantStatus(RedshiftSelfGrantStatusOutput) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .glueSelfGrantStatus: + let value = try container.decode(GlueSelfGrantStatusOutput.self, forKey: .glueSelfGrantStatus) + self = .glueSelfGrantStatus(value) + case .redshiftSelfGrantStatus: + let value = try container.decode(RedshiftSelfGrantStatusOutput.self, forKey: .redshiftSelfGrantStatus) + self = .redshiftSelfGrantStatus(value) + } + } + + private enum CodingKeys: String, CodingKey { + case glueSelfGrantStatus = "glueSelfGrantStatus" + case redshiftSelfGrantStatus = "redshiftSelfGrantStatus" + } + } + public enum UserProfileDetails: AWSDecodableShape, Sendable { /// The IAM details included in the user profile details. case iam(IamUserProfileDetails) @@ -1280,6 +1322,53 @@ extension DataZone { } } + public struct AssociateEnvironmentRoleInput: AWSEncodableShape { + /// The ID of the Amazon DataZone domain in which the environment role is associated. + public let domainIdentifier: String + /// The ID of the Amazon DataZone environment. + public let environmentIdentifier: String + /// The ARN of the environment role. + public let environmentRoleArn: String + + public init(domainIdentifier: String, environmentIdentifier: String, environmentRoleArn: String) { + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.environmentRoleArn = environmentRoleArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.environmentIdentifier, key: "environmentIdentifier") + request.encodePath(self.environmentRoleArn, key: "environmentRoleArn") + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct AssociateEnvironmentRoleOutput: AWSDecodableShape { + public init() {} + } + + public struct AwsConsoleLinkParameters: AWSEncodableShape & AWSDecodableShape { + /// The URI of the console link specified as part of the environment action. + public let uri: String? + + public init(uri: String? = nil) { + self.uri = uri + } + + private enum CodingKeys: String, CodingKey { + case uri = "uri" + } + } + public struct BusinessNameGenerationConfiguration: AWSEncodableShape & AWSDecodableShape { /// Specifies whether the business name generation is enabled. public let enabled: Bool? @@ -2192,11 +2281,92 @@ extension DataZone { } } + public struct CreateEnvironmentActionInput: AWSEncodableShape { + /// The description of the environment action that is being created in the environment. + public let description: String? + /// The ID of the Amazon DataZone domain in which the environment action is created. + public let domainIdentifier: String + /// The ID of the environment in which the environment action is created. + public let environmentIdentifier: String + /// The name of the environment action. + public let name: String + /// The parameters of the environment action. + public let parameters: ActionParameters + + public init(description: String? = nil, domainIdentifier: String, environmentIdentifier: String, name: String, parameters: ActionParameters) { + self.description = description + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.name = name + self.parameters = parameters + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.environmentIdentifier, key: "environmentIdentifier") + try container.encode(self.name, forKey: .name) + try container.encode(self.parameters, forKey: .parameters) + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case name = "name" + case parameters = "parameters" + } + } + + public struct CreateEnvironmentActionOutput: AWSDecodableShape { + /// The description of the environment action. + public let description: String? + /// The ID of the domain in which the environment action is created. + public let domainId: String + /// The ID of the environment in which the environment is created. + public let environmentId: String + /// The ID of the environment action. + public let id: String + /// The name of the environment action. + public let name: String + /// The parameters of the environment action. + public let parameters: ActionParameters + + public init(description: String? = nil, domainId: String, environmentId: String, id: String, name: String, parameters: ActionParameters) { + self.description = description + self.domainId = domainId + self.environmentId = environmentId + self.id = id + self.name = name + self.parameters = parameters + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case domainId = "domainId" + case environmentId = "environmentId" + case id = "id" + case name = "name" + case parameters = "parameters" + } + } + public struct CreateEnvironmentInput: AWSEncodableShape { /// The description of the Amazon DataZone environment. public let description: String? /// The identifier of the Amazon DataZone domain in which the environment is created. public let domainIdentifier: String + /// The ID of the account in which the environment is being created. + public let environmentAccountIdentifier: String? + /// The region of the account in which the environment is being created. + public let environmentAccountRegion: String? + /// The ID of the blueprint with which the environment is being created. + public let environmentBlueprintIdentifier: String? /// The identifier of the environment profile that is used to create this Amazon DataZone environment. public let environmentProfileIdentifier: String /// The glossary terms that can be used in this Amazon DataZone environment. @@ -2208,9 +2378,12 @@ extension DataZone { /// The user parameters of this Amazon DataZone environment. public let userParameters: [EnvironmentParameter]? - public init(description: String? = nil, domainIdentifier: String, environmentProfileIdentifier: String, glossaryTerms: [String]? = nil, name: String, projectIdentifier: String, userParameters: [EnvironmentParameter]? = nil) { + public init(description: String? = nil, domainIdentifier: String, environmentAccountIdentifier: String? = nil, environmentAccountRegion: String? = nil, environmentBlueprintIdentifier: String? = nil, environmentProfileIdentifier: String, glossaryTerms: [String]? = nil, name: String, projectIdentifier: String, userParameters: [EnvironmentParameter]? = nil) { self.description = description self.domainIdentifier = domainIdentifier + self.environmentAccountIdentifier = environmentAccountIdentifier + self.environmentAccountRegion = environmentAccountRegion + self.environmentBlueprintIdentifier = environmentBlueprintIdentifier self.environmentProfileIdentifier = environmentProfileIdentifier self.glossaryTerms = glossaryTerms self.name = name @@ -2223,6 +2396,9 @@ extension DataZone { var container = encoder.container(keyedBy: CodingKeys.self) try container.encodeIfPresent(self.description, forKey: .description) request.encodePath(self.domainIdentifier, key: "domainIdentifier") + try container.encodeIfPresent(self.environmentAccountIdentifier, forKey: .environmentAccountIdentifier) + try container.encodeIfPresent(self.environmentAccountRegion, forKey: .environmentAccountRegion) + try container.encodeIfPresent(self.environmentBlueprintIdentifier, forKey: .environmentBlueprintIdentifier) try container.encode(self.environmentProfileIdentifier, forKey: .environmentProfileIdentifier) try container.encodeIfPresent(self.glossaryTerms, forKey: .glossaryTerms) try container.encode(self.name, forKey: .name) @@ -2232,7 +2408,7 @@ extension DataZone { public func validate(name: String) throws { try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") - try self.validate(self.environmentProfileIdentifier, name: "environmentProfileIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentProfileIdentifier, name: "environmentProfileIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{0,36}$") try self.glossaryTerms?.forEach { try validate($0, name: "glossaryTerms[]", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") } @@ -2243,6 +2419,9 @@ extension DataZone { private enum CodingKeys: String, CodingKey { case description = "description" + case environmentAccountIdentifier = "environmentAccountIdentifier" + case environmentAccountRegion = "environmentAccountRegion" + case environmentBlueprintIdentifier = "environmentBlueprintIdentifier" case environmentProfileIdentifier = "environmentProfileIdentifier" case glossaryTerms = "glossaryTerms" case name = "name" @@ -2271,7 +2450,7 @@ extension DataZone { /// The ID of the blueprint with which this Amazon DataZone environment was created. public let environmentBlueprintId: String? /// The ID of the environment profile with which this Amazon DataZone environment was created. - public let environmentProfileId: String + public let environmentProfileId: String? /// The glossary terms that can be used in this Amazon DataZone environment. public let glossaryTerms: [String]? /// The ID of this Amazon DataZone environment. @@ -2295,7 +2474,7 @@ extension DataZone { /// The user parameters of this Amazon DataZone environment. public let userParameters: [CustomParameter]? - public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, deploymentProperties: DeploymentProperties? = nil, description: String? = nil, domainId: String, environmentActions: [ConfigurableEnvironmentAction]? = nil, environmentBlueprintId: String? = nil, environmentProfileId: String, glossaryTerms: [String]? = nil, id: String? = nil, lastDeployment: Deployment? = nil, name: String, projectId: String, provider: String, provisionedResources: [Resource]? = nil, provisioningProperties: ProvisioningProperties? = nil, status: EnvironmentStatus? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, deploymentProperties: DeploymentProperties? = nil, description: String? = nil, domainId: String, environmentActions: [ConfigurableEnvironmentAction]? = nil, environmentBlueprintId: String? = nil, environmentProfileId: String? = nil, glossaryTerms: [String]? = nil, id: String? = nil, lastDeployment: Deployment? = nil, name: String, projectId: String, provider: String, provisionedResources: [Resource]? = nil, provisioningProperties: ProvisioningProperties? = nil, status: EnvironmentStatus? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { self.awsAccountId = awsAccountId self.awsAccountRegion = awsAccountRegion self.createdAt = createdAt @@ -3799,11 +3978,14 @@ extension DataZone { public let domainIdentifier: String /// The identifier of the data source that is deleted. public let identifier: String + /// Specifies that the granted permissions are retained in case of a self-subscribe functionality failure for a data source. + public let retainPermissionsOnRevokeFailure: Bool? - public init(clientToken: String? = DeleteDataSourceInput.idempotencyToken(), domainIdentifier: String, identifier: String) { + public init(clientToken: String? = DeleteDataSourceInput.idempotencyToken(), domainIdentifier: String, identifier: String, retainPermissionsOnRevokeFailure: Bool? = nil) { self.clientToken = clientToken self.domainIdentifier = domainIdentifier self.identifier = identifier + self.retainPermissionsOnRevokeFailure = retainPermissionsOnRevokeFailure } public func encode(to encoder: Encoder) throws { @@ -3812,6 +3994,7 @@ extension DataZone { request.encodeQuery(self.clientToken, key: "clientToken") request.encodePath(self.domainIdentifier, key: "domainIdentifier") request.encodePath(self.identifier, key: "identifier") + request.encodeQuery(self.retainPermissionsOnRevokeFailure, key: "retainPermissionsOnRevokeFailure") } public func validate(name: String) throws { @@ -3855,8 +4038,12 @@ extension DataZone { public let projectId: String /// Specifies whether the assets that this data source creates in the inventory are to be also automatically published to the catalog. public let publishOnImport: Bool? + /// Specifies that the granted permissions are retained in case of a self-subscribe functionality failure for a data source. + public let retainPermissionsOnRevokeFailure: Bool? /// The schedule of runs for this data source. public let schedule: ScheduleConfiguration? + /// Specifies the status of the self-granting functionality. + public let selfGrantStatus: SelfGrantStatusOutput? /// The status of this data source. public let status: DataSourceStatus? /// The type of this data source. @@ -3865,7 +4052,7 @@ extension DataZone { @OptionalCustomCoding public var updatedAt: Date? - public init(assetFormsOutput: [FormOutput]? = nil, configuration: DataSourceConfigurationOutput? = nil, createdAt: Date? = nil, description: String? = nil, domainId: String, enableSetting: EnableSetting? = nil, environmentId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, lastRunAt: Date? = nil, lastRunErrorMessage: DataSourceErrorMessage? = nil, lastRunStatus: DataSourceRunStatus? = nil, name: String, projectId: String, publishOnImport: Bool? = nil, schedule: ScheduleConfiguration? = nil, status: DataSourceStatus? = nil, type: String? = nil, updatedAt: Date? = nil) { + public init(assetFormsOutput: [FormOutput]? = nil, configuration: DataSourceConfigurationOutput? = nil, createdAt: Date? = nil, description: String? = nil, domainId: String, enableSetting: EnableSetting? = nil, environmentId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, lastRunAt: Date? = nil, lastRunErrorMessage: DataSourceErrorMessage? = nil, lastRunStatus: DataSourceRunStatus? = nil, name: String, projectId: String, publishOnImport: Bool? = nil, retainPermissionsOnRevokeFailure: Bool? = nil, schedule: ScheduleConfiguration? = nil, selfGrantStatus: SelfGrantStatusOutput? = nil, status: DataSourceStatus? = nil, type: String? = nil, updatedAt: Date? = nil) { self.assetFormsOutput = assetFormsOutput self.configuration = configuration self.createdAt = createdAt @@ -3881,7 +4068,9 @@ extension DataZone { self.name = name self.projectId = projectId self.publishOnImport = publishOnImport + self.retainPermissionsOnRevokeFailure = retainPermissionsOnRevokeFailure self.schedule = schedule + self.selfGrantStatus = selfGrantStatus self.status = status self.type = type self.updatedAt = updatedAt @@ -3903,7 +4092,9 @@ extension DataZone { case name = "name" case projectId = "projectId" case publishOnImport = "publishOnImport" + case retainPermissionsOnRevokeFailure = "retainPermissionsOnRevokeFailure" case schedule = "schedule" + case selfGrantStatus = "selfGrantStatus" case status = "status" case type = "type" case updatedAt = "updatedAt" @@ -3952,6 +4143,36 @@ extension DataZone { } } + public struct DeleteEnvironmentActionInput: AWSEncodableShape { + /// The ID of the Amazon DataZone domain in which an environment action is deleted. + public let domainIdentifier: String + /// The ID of the environment where an environment action is deleted. + public let environmentIdentifier: String + /// The ID of the environment action that is deleted. + public let identifier: String + + public init(domainIdentifier: String, environmentIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.identifier = identifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.environmentIdentifier, key: "environmentIdentifier") + request.encodePath(self.identifier, key: "identifier") + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + public struct DeleteEnvironmentBlueprintConfigurationInput: AWSEncodableShape { /// The ID of the Amazon DataZone domain in which the blueprint configuration is deleted. public let domainIdentifier: String @@ -4028,7 +4249,7 @@ extension DataZone { public func validate(name: String) throws { try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") - try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{0,36}$") } private enum CodingKeys: CodingKey {} @@ -4476,6 +4697,40 @@ extension DataZone { } } + public struct DisassociateEnvironmentRoleInput: AWSEncodableShape { + /// The ID of the Amazon DataZone domain in which an environment role is disassociated. + public let domainIdentifier: String + /// The ID of the environment. + public let environmentIdentifier: String + /// The ARN of the environment role. + public let environmentRoleArn: String + + public init(domainIdentifier: String, environmentIdentifier: String, environmentRoleArn: String) { + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.environmentRoleArn = environmentRoleArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.environmentIdentifier, key: "environmentIdentifier") + request.encodePath(self.environmentRoleArn, key: "environmentRoleArn") + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DisassociateEnvironmentRoleOutput: AWSDecodableShape { + public init() {} + } + public struct DomainSummary: AWSDecodableShape { /// The ARN of the Amazon DataZone domain. public let arn: String @@ -4521,6 +4776,39 @@ extension DataZone { } } + public struct EnvironmentActionSummary: AWSDecodableShape { + /// The environment action description. + public let description: String? + /// The Amazon DataZone domain ID of the environment action. + public let domainId: String + /// The environment ID of the environment action. + public let environmentId: String + /// The ID of the environment action. + public let id: String + /// The name of the environment action. + public let name: String + /// The parameters of the environment action. + public let parameters: ActionParameters + + public init(description: String? = nil, domainId: String, environmentId: String, id: String, name: String, parameters: ActionParameters) { + self.description = description + self.domainId = domainId + self.environmentId = environmentId + self.id = id + self.name = name + self.parameters = parameters + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case domainId = "domainId" + case environmentId = "environmentId" + case id = "id" + case name = "name" + case parameters = "parameters" + } + } + public struct EnvironmentBlueprintConfigurationItem: AWSDecodableShape { /// The timestamp of when an environment blueprint was created. public let createdAt: Date? @@ -4700,7 +4988,7 @@ extension DataZone { /// The identifier of the Amazon DataZone domain in which the environment exists. public let domainId: String /// The identifier of the environment profile with which the environment was created. - public let environmentProfileId: String + public let environmentProfileId: String? /// The identifier of the environment. public let id: String? /// The name of the environment. @@ -4714,7 +5002,7 @@ extension DataZone { /// The timestamp of when the environment was updated. public let updatedAt: Date? - public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, description: String? = nil, domainId: String, environmentProfileId: String, id: String? = nil, name: String, projectId: String, provider: String, status: EnvironmentStatus? = nil, updatedAt: Date? = nil) { + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, description: String? = nil, domainId: String, environmentProfileId: String? = nil, id: String? = nil, name: String, projectId: String, provider: String, status: EnvironmentStatus? = nil, updatedAt: Date? = nil) { self.awsAccountId = awsAccountId self.awsAccountRegion = awsAccountRegion self.createdAt = createdAt @@ -5236,6 +5524,8 @@ extension DataZone { public let recommendation: RecommendationConfiguration? /// The schedule of the data source runs. public let schedule: ScheduleConfiguration? + /// Specifies the status of the self-granting functionality. + public let selfGrantStatus: SelfGrantStatusOutput? /// The status of the data source. public let status: DataSourceStatus? /// The type of the data source. @@ -5244,7 +5534,7 @@ extension DataZone { @OptionalCustomCoding public var updatedAt: Date? - public init(assetFormsOutput: [FormOutput]? = nil, configuration: DataSourceConfigurationOutput? = nil, createdAt: Date? = nil, description: String? = nil, domainId: String, enableSetting: EnableSetting? = nil, environmentId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, lastRunAssetCount: Int? = nil, lastRunAt: Date? = nil, lastRunErrorMessage: DataSourceErrorMessage? = nil, lastRunStatus: DataSourceRunStatus? = nil, name: String, projectId: String, publishOnImport: Bool? = nil, recommendation: RecommendationConfiguration? = nil, schedule: ScheduleConfiguration? = nil, status: DataSourceStatus? = nil, type: String? = nil, updatedAt: Date? = nil) { + public init(assetFormsOutput: [FormOutput]? = nil, configuration: DataSourceConfigurationOutput? = nil, createdAt: Date? = nil, description: String? = nil, domainId: String, enableSetting: EnableSetting? = nil, environmentId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, lastRunAssetCount: Int? = nil, lastRunAt: Date? = nil, lastRunErrorMessage: DataSourceErrorMessage? = nil, lastRunStatus: DataSourceRunStatus? = nil, name: String, projectId: String, publishOnImport: Bool? = nil, recommendation: RecommendationConfiguration? = nil, schedule: ScheduleConfiguration? = nil, selfGrantStatus: SelfGrantStatusOutput? = nil, status: DataSourceStatus? = nil, type: String? = nil, updatedAt: Date? = nil) { self.assetFormsOutput = assetFormsOutput self.configuration = configuration self.createdAt = createdAt @@ -5263,6 +5553,7 @@ extension DataZone { self.publishOnImport = publishOnImport self.recommendation = recommendation self.schedule = schedule + self.selfGrantStatus = selfGrantStatus self.status = status self.type = type self.updatedAt = updatedAt @@ -5287,6 +5578,7 @@ extension DataZone { case publishOnImport = "publishOnImport" case recommendation = "recommendation" case schedule = "schedule" + case selfGrantStatus = "selfGrantStatus" case status = "status" case type = "type" case updatedAt = "updatedAt" @@ -5462,6 +5754,69 @@ extension DataZone { } } + public struct GetEnvironmentActionInput: AWSEncodableShape { + /// The ID of the Amazon DataZone domain in which the GetEnvironmentAction API is invoked. + public let domainIdentifier: String + /// The environment ID of the environment action. + public let environmentIdentifier: String + /// The ID of the environment action + public let identifier: String + + public init(domainIdentifier: String, environmentIdentifier: String, identifier: String) { + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.identifier = identifier + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.environmentIdentifier, key: "environmentIdentifier") + request.encodePath(self.identifier, key: "identifier") + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetEnvironmentActionOutput: AWSDecodableShape { + /// The description of the environment action. + public let description: String? + /// The ID of the Amazon DataZone domain in which the environment action lives. + public let domainId: String + /// The environment ID of the environment action. + public let environmentId: String + /// The ID of the environment action. + public let id: String + /// The name of the environment action. + public let name: String + /// The parameters of the environment action. + public let parameters: ActionParameters + + public init(description: String? = nil, domainId: String, environmentId: String, id: String, name: String, parameters: ActionParameters) { + self.description = description + self.domainId = domainId + self.environmentId = environmentId + self.id = id + self.name = name + self.parameters = parameters + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case domainId = "domainId" + case environmentId = "environmentId" + case id = "id" + case name = "name" + case parameters = "parameters" + } + } + public struct GetEnvironmentBlueprintConfigurationInput: AWSEncodableShape { /// The ID of the Amazon DataZone domain where this blueprint exists. public let domainIdentifier: String @@ -5650,7 +6005,7 @@ extension DataZone { /// The blueprint with which the environment is created. public let environmentBlueprintId: String? /// The ID of the environment profile with which the environment is created. - public let environmentProfileId: String + public let environmentProfileId: String? /// The business glossary terms that can be used in this environment. public let glossaryTerms: [String]? /// The ID of the environment. @@ -5674,7 +6029,7 @@ extension DataZone { /// The user parameters of this Amazon DataZone environment. public let userParameters: [CustomParameter]? - public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, deploymentProperties: DeploymentProperties? = nil, description: String? = nil, domainId: String, environmentActions: [ConfigurableEnvironmentAction]? = nil, environmentBlueprintId: String? = nil, environmentProfileId: String, glossaryTerms: [String]? = nil, id: String? = nil, lastDeployment: Deployment? = nil, name: String, projectId: String, provider: String, provisionedResources: [Resource]? = nil, provisioningProperties: ProvisioningProperties? = nil, status: EnvironmentStatus? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, deploymentProperties: DeploymentProperties? = nil, description: String? = nil, domainId: String, environmentActions: [ConfigurableEnvironmentAction]? = nil, environmentBlueprintId: String? = nil, environmentProfileId: String? = nil, glossaryTerms: [String]? = nil, id: String? = nil, lastDeployment: Deployment? = nil, name: String, projectId: String, provider: String, provisionedResources: [Resource]? = nil, provisioningProperties: ProvisioningProperties? = nil, status: EnvironmentStatus? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { self.awsAccountId = awsAccountId self.awsAccountRegion = awsAccountRegion self.createdAt = createdAt @@ -5743,7 +6098,7 @@ extension DataZone { public func validate(name: String) throws { try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") - try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{0,36}$") } private enum CodingKeys: CodingKey {} @@ -6996,6 +7351,19 @@ extension DataZone { } } + public struct GlueSelfGrantStatusOutput: AWSDecodableShape { + /// The details for the self granting status for a Glue data source. + public let selfGrantStatusDetails: [SelfGrantStatusDetail] + + public init(selfGrantStatusDetails: [SelfGrantStatusDetail]) { + self.selfGrantStatusDetails = selfGrantStatusDetails + } + + private enum CodingKeys: String, CodingKey { + case selfGrantStatusDetails = "selfGrantStatusDetails" + } + } + public struct GroupDetails: AWSDecodableShape { /// The identifier of the group in Amazon DataZone. public let groupId: String @@ -7360,6 +7728,61 @@ extension DataZone { } } + public struct ListEnvironmentActionsInput: AWSEncodableShape { + /// The ID of the Amazon DataZone domain in which the environment actions are listed. + public let domainIdentifier: String + /// The ID of the envrironment whose environment actions are listed. + public let environmentIdentifier: String + /// The maximum number of environment actions to return in a single call to ListEnvironmentActions. When the number of environment actions to be listed is greater than the value of MaxResults, the response contains a NextToken value that you can use in a subsequent call to ListEnvironmentActions to list the next set of environment actions. + public let maxResults: Int? + /// When the number of environment actions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of environment actions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironmentActions to list the next set of environment actions. + public let nextToken: String? + + public init(domainIdentifier: String, environmentIdentifier: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.environmentIdentifier, key: "environmentIdentifier") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListEnvironmentActionsOutput: AWSDecodableShape { + /// The results of ListEnvironmentActions. + public let items: [EnvironmentActionSummary]? + /// When the number of environment actions is greater than the default value for the MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than the number of environment actions, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListEnvironmentActions to list the next set of environment actions. + public let nextToken: String? + + public init(items: [EnvironmentActionSummary]? = nil, nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + public struct ListEnvironmentBlueprintConfigurationsInput: AWSEncodableShape { /// The identifier of the Amazon DataZone domain. public let domainIdentifier: String @@ -7606,7 +8029,7 @@ extension DataZone { try self.validate(self.awsAccountRegion, name: "awsAccountRegion", parent: name, pattern: "^[a-z]{2}-[a-z]{4,10}-\\d$") try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") try self.validate(self.environmentBlueprintIdentifier, name: "environmentBlueprintIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") - try self.validate(self.environmentProfileIdentifier, name: "environmentProfileIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentProfileIdentifier, name: "environmentProfileIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{0,36}$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) @@ -8831,6 +9254,19 @@ extension DataZone { } } + public struct RedshiftSelfGrantStatusOutput: AWSDecodableShape { + /// The details for the self granting status for an Amazon Redshift data source. + public let selfGrantStatusDetails: [SelfGrantStatusDetail] + + public init(selfGrantStatusDetails: [SelfGrantStatusDetail]) { + self.selfGrantStatusDetails = selfGrantStatusDetails + } + + private enum CodingKeys: String, CodingKey { + case selfGrantStatusDetails = "selfGrantStatusDetails" + } + } + public struct RedshiftServerlessStorage: AWSEncodableShape & AWSDecodableShape { /// The name of the Amazon Redshift Serverless workgroup. public let workgroupName: String @@ -9679,6 +10115,31 @@ extension DataZone { } } + public struct SelfGrantStatusDetail: AWSDecodableShape { + /// The name of the database used for the data source. + public let databaseName: String + /// The reason for why the operation failed. + public let failureCause: String? + /// The name of the schema used in the data source. + public let schemaName: String? + /// The self granting status of the data source. + public let status: SelfGrantStatus + + public init(databaseName: String, failureCause: String? = nil, schemaName: String? = nil, status: SelfGrantStatus) { + self.databaseName = databaseName + self.failureCause = failureCause + self.schemaName = schemaName + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case databaseName = "databaseName" + case failureCause = "failureCause" + case schemaName = "schemaName" + case status = "status" + } + } + public struct SingleSignOn: AWSEncodableShape & AWSDecodableShape { /// The type of single sign-on in Amazon DataZone. public let type: AuthType? @@ -10551,10 +11012,12 @@ extension DataZone { public let publishOnImport: Bool? /// The recommendation to be updated as part of the UpdateDataSource action. public let recommendation: RecommendationConfiguration? + /// Specifies that the granted permissions are retained in case of a self-subscribe functionality failure for a data source. + public let retainPermissionsOnRevokeFailure: Bool? /// The schedule to be updated as part of the UpdateDataSource action. public let schedule: ScheduleConfiguration? - public init(assetFormsInput: [FormInput]? = nil, configuration: DataSourceConfigurationInput? = nil, description: String? = nil, domainIdentifier: String, enableSetting: EnableSetting? = nil, identifier: String, name: String? = nil, publishOnImport: Bool? = nil, recommendation: RecommendationConfiguration? = nil, schedule: ScheduleConfiguration? = nil) { + public init(assetFormsInput: [FormInput]? = nil, configuration: DataSourceConfigurationInput? = nil, description: String? = nil, domainIdentifier: String, enableSetting: EnableSetting? = nil, identifier: String, name: String? = nil, publishOnImport: Bool? = nil, recommendation: RecommendationConfiguration? = nil, retainPermissionsOnRevokeFailure: Bool? = nil, schedule: ScheduleConfiguration? = nil) { self.assetFormsInput = assetFormsInput self.configuration = configuration self.description = description @@ -10564,6 +11027,7 @@ extension DataZone { self.name = name self.publishOnImport = publishOnImport self.recommendation = recommendation + self.retainPermissionsOnRevokeFailure = retainPermissionsOnRevokeFailure self.schedule = schedule } @@ -10579,6 +11043,7 @@ extension DataZone { try container.encodeIfPresent(self.name, forKey: .name) try container.encodeIfPresent(self.publishOnImport, forKey: .publishOnImport) try container.encodeIfPresent(self.recommendation, forKey: .recommendation) + try container.encodeIfPresent(self.retainPermissionsOnRevokeFailure, forKey: .retainPermissionsOnRevokeFailure) try container.encodeIfPresent(self.schedule, forKey: .schedule) } @@ -10603,6 +11068,7 @@ extension DataZone { case name = "name" case publishOnImport = "publishOnImport" case recommendation = "recommendation" + case retainPermissionsOnRevokeFailure = "retainPermissionsOnRevokeFailure" case schedule = "schedule" } } @@ -10642,8 +11108,12 @@ extension DataZone { public let publishOnImport: Bool? /// The recommendation to be updated as part of the UpdateDataSource action. public let recommendation: RecommendationConfiguration? + /// Specifies that the granted permissions are retained in case of a self-subscribe functionality failure for a data source. + public let retainPermissionsOnRevokeFailure: Bool? /// The schedule to be updated as part of the UpdateDataSource action. public let schedule: ScheduleConfiguration? + /// Specifies the status of the self-granting functionality. + public let selfGrantStatus: SelfGrantStatusOutput? /// The status to be updated as part of the UpdateDataSource action. public let status: DataSourceStatus? /// The type to be updated as part of the UpdateDataSource action. @@ -10652,7 +11122,7 @@ extension DataZone { @OptionalCustomCoding public var updatedAt: Date? - public init(assetFormsOutput: [FormOutput]? = nil, configuration: DataSourceConfigurationOutput? = nil, createdAt: Date? = nil, description: String? = nil, domainId: String, enableSetting: EnableSetting? = nil, environmentId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, lastRunAt: Date? = nil, lastRunErrorMessage: DataSourceErrorMessage? = nil, lastRunStatus: DataSourceRunStatus? = nil, name: String, projectId: String, publishOnImport: Bool? = nil, recommendation: RecommendationConfiguration? = nil, schedule: ScheduleConfiguration? = nil, status: DataSourceStatus? = nil, type: String? = nil, updatedAt: Date? = nil) { + public init(assetFormsOutput: [FormOutput]? = nil, configuration: DataSourceConfigurationOutput? = nil, createdAt: Date? = nil, description: String? = nil, domainId: String, enableSetting: EnableSetting? = nil, environmentId: String, errorMessage: DataSourceErrorMessage? = nil, id: String, lastRunAt: Date? = nil, lastRunErrorMessage: DataSourceErrorMessage? = nil, lastRunStatus: DataSourceRunStatus? = nil, name: String, projectId: String, publishOnImport: Bool? = nil, recommendation: RecommendationConfiguration? = nil, retainPermissionsOnRevokeFailure: Bool? = nil, schedule: ScheduleConfiguration? = nil, selfGrantStatus: SelfGrantStatusOutput? = nil, status: DataSourceStatus? = nil, type: String? = nil, updatedAt: Date? = nil) { self.assetFormsOutput = assetFormsOutput self.configuration = configuration self.createdAt = createdAt @@ -10669,7 +11139,9 @@ extension DataZone { self.projectId = projectId self.publishOnImport = publishOnImport self.recommendation = recommendation + self.retainPermissionsOnRevokeFailure = retainPermissionsOnRevokeFailure self.schedule = schedule + self.selfGrantStatus = selfGrantStatus self.status = status self.type = type self.updatedAt = updatedAt @@ -10692,7 +11164,9 @@ extension DataZone { case projectId = "projectId" case publishOnImport = "publishOnImport" case recommendation = "recommendation" + case retainPermissionsOnRevokeFailure = "retainPermissionsOnRevokeFailure" case schedule = "schedule" + case selfGrantStatus = "selfGrantStatus" case status = "status" case type = "type" case updatedAt = "updatedAt" @@ -10779,6 +11253,85 @@ extension DataZone { } } + public struct UpdateEnvironmentActionInput: AWSEncodableShape { + /// The description of the environment action. + public let description: String? + /// The domain ID of the environment action. + public let domainIdentifier: String + /// The environment ID of the environment action. + public let environmentIdentifier: String + /// The ID of the environment action. + public let identifier: String + /// The name of the environment action. + public let name: String? + /// The parameters of the environment action. + public let parameters: ActionParameters? + + public init(description: String? = nil, domainIdentifier: String, environmentIdentifier: String, identifier: String, name: String? = nil, parameters: ActionParameters? = nil) { + self.description = description + self.domainIdentifier = domainIdentifier + self.environmentIdentifier = environmentIdentifier + self.identifier = identifier + self.name = name + self.parameters = parameters + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.domainIdentifier, key: "domainIdentifier") + request.encodePath(self.environmentIdentifier, key: "environmentIdentifier") + request.encodePath(self.identifier, key: "identifier") + try container.encodeIfPresent(self.name, forKey: .name) + try container.encodeIfPresent(self.parameters, forKey: .parameters) + } + + public func validate(name: String) throws { + try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") + try self.validate(self.environmentIdentifier, name: "environmentIdentifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case name = "name" + case parameters = "parameters" + } + } + + public struct UpdateEnvironmentActionOutput: AWSDecodableShape { + /// The description of the environment action. + public let description: String? + /// The domain ID of the environment action. + public let domainId: String + /// The environment ID of the environment action. + public let environmentId: String + /// The ID of the environment action. + public let id: String + /// The name of the environment action. + public let name: String + /// The parameters of the environment action. + public let parameters: ActionParameters + + public init(description: String? = nil, domainId: String, environmentId: String, id: String, name: String, parameters: ActionParameters) { + self.description = description + self.domainId = domainId + self.environmentId = environmentId + self.id = id + self.name = name + self.parameters = parameters + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case domainId = "domainId" + case environmentId = "environmentId" + case id = "id" + case name = "name" + case parameters = "parameters" + } + } + public struct UpdateEnvironmentInput: AWSEncodableShape { /// The description to be updated as part of the UpdateEnvironment action. public let description: String? @@ -10846,7 +11399,7 @@ extension DataZone { /// The blueprint identifier of the environment. public let environmentBlueprintId: String? /// The profile identifier of the environment. - public let environmentProfileId: String + public let environmentProfileId: String? /// The glossary terms to be updated as part of the UpdateEnvironment action. public let glossaryTerms: [String]? /// The identifier of the environment that is to be updated. @@ -10870,7 +11423,7 @@ extension DataZone { /// The user parameters to be updated as part of the UpdateEnvironment action. public let userParameters: [CustomParameter]? - public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, deploymentProperties: DeploymentProperties? = nil, description: String? = nil, domainId: String, environmentActions: [ConfigurableEnvironmentAction]? = nil, environmentBlueprintId: String? = nil, environmentProfileId: String, glossaryTerms: [String]? = nil, id: String? = nil, lastDeployment: Deployment? = nil, name: String, projectId: String, provider: String, provisionedResources: [Resource]? = nil, provisioningProperties: ProvisioningProperties? = nil, status: EnvironmentStatus? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { + public init(awsAccountId: String? = nil, awsAccountRegion: String? = nil, createdAt: Date? = nil, createdBy: String, deploymentProperties: DeploymentProperties? = nil, description: String? = nil, domainId: String, environmentActions: [ConfigurableEnvironmentAction]? = nil, environmentBlueprintId: String? = nil, environmentProfileId: String? = nil, glossaryTerms: [String]? = nil, id: String? = nil, lastDeployment: Deployment? = nil, name: String, projectId: String, provider: String, provisionedResources: [Resource]? = nil, provisioningProperties: ProvisioningProperties? = nil, status: EnvironmentStatus? = nil, updatedAt: Date? = nil, userParameters: [CustomParameter]? = nil) { self.awsAccountId = awsAccountId self.awsAccountRegion = awsAccountRegion self.createdAt = createdAt @@ -10961,7 +11514,7 @@ extension DataZone { try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^\\d{12}$") try self.validate(self.awsAccountRegion, name: "awsAccountRegion", parent: name, pattern: "^[a-z]{2}-[a-z]{4,10}-\\d$") try self.validate(self.domainIdentifier, name: "domainIdentifier", parent: name, pattern: "^dzd[-_][a-zA-Z0-9_-]{1,36}$") - try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{1,36}$") + try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9_-]{0,36}$") try self.validate(self.name, name: "name", parent: name, max: 64) try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, pattern: "^[\\w -]+$") @@ -11824,6 +12377,19 @@ extension DataZone { } } + public struct ActionParameters: AWSEncodableShape & AWSDecodableShape { + /// The console link specified as part of the environment action. + public let awsConsoleLink: AwsConsoleLinkParameters? + + public init(awsConsoleLink: AwsConsoleLinkParameters? = nil) { + self.awsConsoleLink = awsConsoleLink + } + + private enum CodingKeys: String, CodingKey { + case awsConsoleLink = "awsConsoleLink" + } + } + public struct GrantedEntity: AWSDecodableShape { /// The listing for which a subscription is granted. public let listing: ListingRevision? @@ -11876,7 +12442,7 @@ extension DataZone { } public func validate(name: String) throws { - try self.validate(self.smithy, name: "smithy", parent: name, max: 10000) + try self.validate(self.smithy, name: "smithy", parent: name, max: 100000) try self.validate(self.smithy, name: "smithy", parent: name, min: 1) } diff --git a/Sources/Soto/Services/DirectConnect/DirectConnect_api.swift b/Sources/Soto/Services/DirectConnect/DirectConnect_api.swift index a4cf1316de..b1f2cddf4d 100644 --- a/Sources/Soto/Services/DirectConnect/DirectConnect_api.swift +++ b/Sources/Soto/Services/DirectConnect/DirectConnect_api.swift @@ -82,6 +82,8 @@ public struct DirectConnect: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.fips]: .init(endpoints: [ + "ca-central-1": "directconnect-fips.ca-central-1.amazonaws.com", + "ca-west-1": "directconnect-fips.ca-west-1.amazonaws.com", "us-east-1": "directconnect-fips.us-east-1.amazonaws.com", "us-east-2": "directconnect-fips.us-east-2.amazonaws.com", "us-west-1": "directconnect-fips.us-west-1.amazonaws.com", diff --git a/Sources/Soto/Services/DocDB/DocDB_api.swift b/Sources/Soto/Services/DocDB/DocDB_api.swift index 1438ec2708..35befc9f29 100644 --- a/Sources/Soto/Services/DocDB/DocDB_api.swift +++ b/Sources/Soto/Services/DocDB/DocDB_api.swift @@ -81,9 +81,9 @@ public struct DocDB: AWSService { "us-east-2": "rds-fips.us-east-2.amazonaws.com", "us-gov-east-1": "rds.us-gov-east-1.amazonaws.com", "us-gov-west-1": "rds.us-gov-west-1.amazonaws.com", - "us-iso-east-1": "rds-fips.us-iso-east-1.c2s.ic.gov", - "us-iso-west-1": "rds-fips.us-iso-west-1.c2s.ic.gov", - "us-isob-east-1": "rds-fips.us-isob-east-1.sc2s.sgov.gov", + "us-iso-east-1": "rds.us-iso-east-1.c2s.ic.gov", + "us-iso-west-1": "rds.us-iso-west-1.c2s.ic.gov", + "us-isob-east-1": "rds.us-isob-east-1.sc2s.sgov.gov", "us-west-1": "rds-fips.us-west-1.amazonaws.com", "us-west-2": "rds-fips.us-west-2.amazonaws.com" ]) diff --git a/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift b/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift index cee918f162..f34a6794bd 100644 --- a/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift +++ b/Sources/Soto/Services/DynamoDB/DynamoDB_api.swift @@ -125,7 +125,7 @@ public struct DynamoDB: AWSService { ) } - /// The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types. BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action. The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response. If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application. Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write operation: One or more tables specified in the BatchWriteItem request does not exist. Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema. You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request. Your request contains at least two items with identical hash and range keys (which essentially is two put operations). There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. The total request size exceeds 16 MB. + /// The BatchWriteItem operation puts or deletes multiple items in one or more tables. A single call to BatchWriteItem can transmit up to 16MB of data over the network, consisting of up to 25 item put or delete operations. While individual items can be up to 400 KB once stored, it's important to note that an item's representation might be greater than 400KB while being sent in DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types. BatchWriteItem cannot update items. If you perform a BatchWriteItem operation on an existing item, that item's values will be overwritten by the operation and it will appear like it was updated. To update items, we recommend you use the UpdateItem action. The individual PutItem and DeleteItem operations specified in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any requested operations fail because the table's provisioned throughput is exceeded or an internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items until all items have been processed. If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. With BatchWriteItem, you can efficiently write or delete large amounts of data, such as from Amazon EMR, or copy data from another database into DynamoDB. In order to improve performance with these large-scale operations, BatchWriteItem does not behave in the same way as individual PutItem and DeleteItem calls would. For example, you cannot specify conditions on individual put and delete requests, and BatchWriteItem does not return deleted items in the response. If you use a programming language that supports concurrency, you can use threads to write items in parallel. Your application must include the necessary logic to manage the threads. With languages that don't support threading, you must update or delete the specified items one at a time. In both situations, BatchWriteItem performs the specified put and delete operations in parallel, giving you the power of the thread pool approach without having to introduce complexity into your application. Parallel processing reduces latency, but each specified put and delete request consumes the same number of write capacity units whether it is processed in parallel or not. Delete operations on nonexistent items consume one write capacity unit. If one or more of the following is true, DynamoDB rejects the entire batch write operation: One or more tables specified in the BatchWriteItem request does not exist. Primary key attributes specified on an item in the request do not match those in the corresponding table's primary key schema. You try to perform multiple operations on the same item in the same BatchWriteItem request. For example, you cannot put and delete the same item in the same BatchWriteItem request. Your request contains at least two items with identical hash and range keys (which essentially is two put operations). There are more than 25 requests in the batch. Any individual item in a batch exceeds 400 KB. The total request size exceeds 16 MB. Any individual items with keys exceeding the key length limits. For a partition key, the limit is 2048 bytes and for a sort key, the limit is 1024 bytes. @Sendable public func batchWriteItem(_ input: BatchWriteItemInput, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchWriteItemOutput { return try await self.client.execute( @@ -155,7 +155,7 @@ public struct DynamoDB: AWSService { ) } - /// Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided Regions. This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables. If you want to add a new replica table to a global table, each of the following conditions must be true: The table must have the same primary key as all of the other replicas. The table must have the same name as all of the other replicas. The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item. None of the replica tables in the global table can contain any data. If global secondary indexes are specified, then the following conditions must also be met: The global secondary indexes must have the same name. The global secondary indexes must have the same hash key and sort key (if present). If local secondary indexes are specified, then the following conditions must also be met: The local secondary indexes must have the same name. The local secondary indexes must have the same hash key and sort key (if present). Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes. If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table. + /// Creates a global table from an existing table. A global table creates a replication relationship between two or more DynamoDB tables with the same table name in the provided Regions. This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables. If you want to add a new replica table to a global table, each of the following conditions must be true: The table must have the same primary key as all of the other replicas. The table must have the same name as all of the other replicas. The table must have DynamoDB Streams enabled, with the stream containing both the new and the old images of the item. None of the replica tables in the global table can contain any data. If global secondary indexes are specified, then the following conditions must also be met: The global secondary indexes must have the same name. The global secondary indexes must have the same hash key and sort key (if present). If local secondary indexes are specified, then the following conditions must also be met: The local secondary indexes must have the same name. The local secondary indexes must have the same hash key and sort key (if present). Write capacity settings should be set consistently across your replica tables and secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the write capacity settings for all of your global tables replicas and indexes. If you prefer to manage write capacity settings manually, you should provision equal replicated write capacity units to your replica tables. You should also provision equal replicated write capacity units to matching secondary indexes across your global table. @Sendable public func createGlobalTable(_ input: CreateGlobalTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateGlobalTableOutput { return try await self.client.execute( @@ -230,7 +230,7 @@ public struct DynamoDB: AWSService { ) } - /// The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned. This operation only applies to Version 2019.11.21 (Current) of global tables. DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. When you delete a table, any indexes on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use the DescribeTable action to check the status of the table. + /// The DeleteTable operation deletes a table and all of its items. After a DeleteTable request, the specified table is in the DELETING state until DynamoDB completes the deletion. If the table is in the ACTIVE state, you can delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. If table is already in the DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the DELETING state until the table deletion is complete. When you delete a table, any indexes on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the corresponding stream on that table goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use the DescribeTable action to check the status of the table. @Sendable public func deleteTable(_ input: DeleteTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTableOutput { return try await self.client.execute( @@ -314,7 +314,7 @@ public struct DynamoDB: AWSService { ) } - /// Returns information about the specified global table. This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables. + /// Returns information about the specified global table. This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables. @Sendable public func describeGlobalTable(_ input: DescribeGlobalTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeGlobalTableOutput { return try await self.client.execute( @@ -329,7 +329,7 @@ public struct DynamoDB: AWSService { ) } - /// Describes Region-specific settings for a global table. This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables. + /// Describes Region-specific settings for a global table. This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables. @Sendable public func describeGlobalTableSettings(_ input: DescribeGlobalTableSettingsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeGlobalTableSettingsOutput { return try await self.client.execute( @@ -387,7 +387,7 @@ public struct DynamoDB: AWSService { ) } - /// Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table. This operation only applies to Version 2019.11.21 (Current) of global tables. If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again. + /// Returns information about the table, including the current status of the table, when it was created, the primary key schema, and any indexes on the table. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). If you issue a DescribeTable request immediately after a CreateTable request, DynamoDB might return a ResourceNotFoundException. This is because DescribeTable uses an eventually consistent query, and the metadata for your table might not be available at that moment. Wait for a few seconds, and then try the DescribeTable request again. @Sendable public func describeTable(_ input: DescribeTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeTableOutput { return try await self.client.execute( @@ -402,7 +402,7 @@ public struct DynamoDB: AWSService { ) } - /// Describes auto scaling settings across replicas of the global table at once. This operation only applies to Version 2019.11.21 (Current) of global tables. + /// Describes auto scaling settings across replicas of the global table at once. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). @Sendable public func describeTableReplicaAutoScaling(_ input: DescribeTableReplicaAutoScalingInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeTableReplicaAutoScalingOutput { return try await self.client.execute( @@ -583,7 +583,7 @@ public struct DynamoDB: AWSService { ) } - /// Lists all global tables that have a replica in the specified Region. This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables. + /// Lists all global tables that have a replica in the specified Region. This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables. @Sendable public func listGlobalTables(_ input: ListGlobalTablesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListGlobalTablesOutput { return try await self.client.execute( @@ -819,7 +819,7 @@ public struct DynamoDB: AWSService { ) } - /// Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units. This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables. This operation only applies to Version 2017.11.29 of global tables. If you are using global tables Version 2019.11.21 you can use UpdateTable instead. Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas. If global secondary indexes are specified, then the following conditions must also be met: The global secondary indexes must have the same name. The global secondary indexes must have the same hash key and sort key (if present). The global secondary indexes must have the same provisioned and maximum write capacity units. + /// Adds or removes replicas in the specified global table. The global table must already exist to be able to use this operation. Any replica to be added must be empty, have the same name as the global table, have the same key schema, have DynamoDB Streams enabled, and have the same provisioned and maximum write capacity units. This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). If you are using global tables Version 2019.11.21 you can use UpdateTable instead. Although you can use UpdateGlobalTable to add replicas and remove replicas in a single request, for simplicity we recommend that you issue separate requests for adding or removing replicas. If global secondary indexes are specified, then the following conditions must also be met: The global secondary indexes must have the same name. The global secondary indexes must have the same hash key and sort key (if present). The global secondary indexes must have the same provisioned and maximum write capacity units. @Sendable public func updateGlobalTable(_ input: UpdateGlobalTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateGlobalTableOutput { return try await self.client.execute( @@ -834,7 +834,7 @@ public struct DynamoDB: AWSService { ) } - /// Updates settings for a global table. This operation only applies to Version 2017.11.29 (Legacy) of global tables. We recommend using Version 2019.11.21 (Current) when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you are using, see Determining the version. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Updating global tables. + /// Updates settings for a global table. This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy). To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables. @Sendable public func updateGlobalTableSettings(_ input: UpdateGlobalTableSettingsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateGlobalTableSettingsOutput { return try await self.client.execute( @@ -879,7 +879,7 @@ public struct DynamoDB: AWSService { ) } - /// Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table. This operation only applies to Version 2019.11.21 (Current) of global tables. You can only perform one of the following operations at once: Modify the provisioned throughput settings of the table. Remove a global secondary index from the table. Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable to perform other operations. UpdateTable is an asynchronous operation; while it's executing, the table status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete. + /// Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). You can only perform one of the following operations at once: Modify the provisioned throughput settings of the table. Remove a global secondary index from the table. Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable to perform other operations. UpdateTable is an asynchronous operation; while it's executing, the table status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete. @Sendable public func updateTable(_ input: UpdateTableInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateTableOutput { return try await self.client.execute( @@ -894,7 +894,7 @@ public struct DynamoDB: AWSService { ) } - /// Updates auto scaling settings on your global tables at once. This operation only applies to Version 2019.11.21 (Current) of global tables. + /// Updates auto scaling settings on your global tables at once. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). @Sendable public func updateTableReplicaAutoScaling(_ input: UpdateTableReplicaAutoScalingInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateTableReplicaAutoScalingOutput { return try await self.client.execute( diff --git a/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift b/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift index 1e8625036d..ce778aec8b 100644 --- a/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift +++ b/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift @@ -1396,7 +1396,7 @@ extension DynamoDB { public struct CreateTableInput: AWSEncodableShape { /// An array of attributes that describe the key schema for the table and indexes. public let attributeDefinitions: [AttributeDefinition] - /// Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later. PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode. PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode. + /// Controls how you are charged for read and write throughput and how you manage capacity. This setting can be changed later. PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode. PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity mode. public let billingMode: BillingMode? /// Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table. public let deletionProtectionEnabled: Bool? @@ -1410,7 +1410,7 @@ extension DynamoDB { public let onDemandThroughput: OnDemandThroughput? /// Represents the provisioned throughput settings for a specified table or index. The settings can be modified using the UpdateTable operation. If you set BillingMode as PROVISIONED, you must specify this property. If you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide. public let provisionedThroughput: ProvisionedThroughput? - /// An Amazon Web Services resource-based policy document in JSON format that will be attached to the table. When you attach a resource-based policy while creating a table, the policy application is strongly consistent. The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that apply for resource-based policies, see Resource-based policy considerations. + /// An Amazon Web Services resource-based policy document in JSON format that will be attached to the table. When you attach a resource-based policy while creating a table, the policy application is strongly consistent. The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that apply for resource-based policies, see Resource-based policy considerations. You need to specify the CreateTable and PutResourcePolicy IAM actions for authorizing a user to create a table with a resource-based policy. public let resourcePolicy: String? /// Represents the settings used to enable server-side encryption. public let sseSpecification: SSESpecification? @@ -1696,7 +1696,7 @@ extension DynamoDB { public struct DeleteItemOutput: AWSDecodableShape { /// A map of attribute names to AttributeValue objects, representing the item as it appeared before the DeleteItem operation. This map appears in the response only if ReturnValues was specified as ALL_OLD in the request. public let attributes: [String: AttributeValue]? - /// The capacity units consumed by the DeleteItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide. + /// The capacity units consumed by the DeleteItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned capacity mode in the Amazon DynamoDB Developer Guide. public let consumedCapacity: ConsumedCapacity? /// Information about item collections, if any, that were affected by the DeleteItem operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics parameter was specified. If the table does not have any local secondary indexes, this information is not returned in the response. Each ItemCollectionMetrics element consists of: ItemCollectionKey - The partition key value of the item collection. This is the same as the partition key value of the item itself. SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. This value is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on that table. Use this estimate to measure whether a local secondary index is approaching its size limit. The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate. public let itemCollectionMetrics: ItemCollectionMetrics? @@ -2732,7 +2732,7 @@ extension DynamoDB { } public struct GetItemOutput: AWSDecodableShape { - /// The capacity units consumed by the GetItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide. + /// The capacity units consumed by the GetItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide. public let consumedCapacity: ConsumedCapacity? /// A map of attribute names to AttributeValue objects, as specified by ProjectionExpression. public let item: [String: AttributeValue]? @@ -4134,7 +4134,7 @@ extension DynamoDB { public struct PutItemOutput: AWSDecodableShape { /// The attribute values as they appeared before the PutItem operation, but only if ReturnValues is specified as ALL_OLD in the request. Each element consists of an attribute name and an attribute value. public let attributes: [String: AttributeValue]? - /// The capacity units consumed by the PutItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide. + /// The capacity units consumed by the PutItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer Guide. public let consumedCapacity: ConsumedCapacity? /// Information about item collections, if any, that were affected by the PutItem operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics parameter was specified. If the table does not have any local secondary indexes, this information is not returned in the response. Each ItemCollectionMetrics element consists of: ItemCollectionKey - The partition key value of the item collection. This is the same as the partition key value of the item itself. SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. This value is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on that table. Use this estimate to measure whether a local secondary index is approaching its size limit. The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate. public let itemCollectionMetrics: ItemCollectionMetrics? @@ -4333,7 +4333,7 @@ extension DynamoDB { } public struct QueryOutput: AWSDecodableShape { - /// The capacity units consumed by the Query operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide. + /// The capacity units consumed by the Query operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide. public let consumedCapacity: ConsumedCapacity? /// The number of items in the response. If you used a QueryFilter in the request, then Count is the number of items returned after the filter was applied, and ScannedCount is the number of matching items before the filter was applied. If you did not use a filter in the request, then Count and ScannedCount are the same. public let count: Int? @@ -5113,7 +5113,7 @@ extension DynamoDB { } public struct ScanOutput: AWSDecodableShape { - /// The capacity units consumed by the Scan operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide. + /// The capacity units consumed by the Scan operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide. public let consumedCapacity: ConsumedCapacity? /// The number of items in the response. If you set ScanFilter in the request, then Count is the number of items returned after the filter was applied, and ScannedCount is the number of matching items before the filter was applied. If you did not use a filter in the request, then Count is the same as ScannedCount. public let count: Int? @@ -5859,7 +5859,7 @@ extension DynamoDB { } public struct UpdateGlobalTableSettingsInput: AWSEncodableShape { - /// The billing mode of the global table. If GlobalTableBillingMode is not specified, the global table defaults to PROVISIONED capacity billing mode. PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode. PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode. + /// The billing mode of the global table. If GlobalTableBillingMode is not specified, the global table defaults to PROVISIONED capacity billing mode. PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode. PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity mode. public let globalTableBillingMode: BillingMode? /// Represents the settings of a global secondary index for a global table that will be modified. public let globalTableGlobalSecondaryIndexSettingsUpdate: [GlobalTableGlobalSecondaryIndexSettingsUpdate]? @@ -6012,7 +6012,7 @@ extension DynamoDB { public struct UpdateItemOutput: AWSDecodableShape { /// A map of attribute values as they appear before or after the UpdateItem operation, as determined by the ReturnValues parameter. The Attributes map is only present if the update was successful and ReturnValues was specified as something other than NONE in the request. Each element represents one attribute. public let attributes: [String: AttributeValue]? - /// The capacity units consumed by the UpdateItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer Guide. + /// The capacity units consumed by the UpdateItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer Guide. public let consumedCapacity: ConsumedCapacity? /// Information about item collections, if any, that were affected by the UpdateItem operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics parameter was specified. If the table does not have any local secondary indexes, this information is not returned in the response. Each ItemCollectionMetrics element consists of: ItemCollectionKey - The partition key value of the item collection. This is the same as the partition key value of the item itself. SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. This value is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on that table. Use this estimate to measure whether a local secondary index is approaching its size limit. The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate. public let itemCollectionMetrics: ItemCollectionMetrics? @@ -6140,7 +6140,7 @@ extension DynamoDB { public struct UpdateTableInput: AWSEncodableShape { /// An array of attributes that describe the key schema for the table and indexes. If you are adding a new global secondary index to the table, AttributeDefinitions must include the key element(s) of the new index. public let attributeDefinitions: [AttributeDefinition]? - /// Controls how you are charged for read and write throughput and how you manage capacity. When switching from pay-per-request to provisioned capacity, initial provisioned capacity values must be set. The initial provisioned capacity values are estimated based on the consumed read and write capacity of your table and global secondary indexes over the past 30 minutes. PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode. PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode. + /// Controls how you are charged for read and write throughput and how you manage capacity. When switching from pay-per-request to provisioned capacity, initial provisioned capacity values must be set. The initial provisioned capacity values are estimated based on the consumed read and write capacity of your table and global secondary indexes over the past 30 minutes. PROVISIONED - We recommend using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode. PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity mode. public let billingMode: BillingMode? /// Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table. public let deletionProtectionEnabled: Bool? @@ -6150,7 +6150,7 @@ extension DynamoDB { public let onDemandThroughput: OnDemandThroughput? /// The new provisioned throughput settings for the specified table or index. public let provisionedThroughput: ProvisionedThroughput? - /// A list of replica update actions (create, delete, or update) for the table. This property only applies to Version 2019.11.21 (Current) of global tables. + /// A list of replica update actions (create, delete, or update) for the table. For global tables, this property only applies to global tables using Version 2019.11.21 (Current version). public let replicaUpdates: [ReplicationGroupUpdate]? /// The new server-side encryption settings for the specified table. public let sseSpecification: SSESpecification? diff --git a/Sources/Soto/Services/EC2/EC2_api.swift b/Sources/Soto/Services/EC2/EC2_api.swift index 59b461d01c..1e123098fa 100644 --- a/Sources/Soto/Services/EC2/EC2_api.swift +++ b/Sources/Soto/Services/EC2/EC2_api.swift @@ -101,7 +101,7 @@ public struct EC2: AWSService { // MARK: API Calls - /// Accepts an Elastic IP address transfer. For more information, see Accept a transferred Elastic IP address in the Amazon Virtual Private Cloud User Guide. + /// Accepts an Elastic IP address transfer. For more information, see Accept a transferred Elastic IP address in the Amazon VPC User Guide. @Sendable public func acceptAddressTransfer(_ input: AcceptAddressTransferRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AcceptAddressTransferResult { return try await self.client.execute( @@ -205,7 +205,7 @@ public struct EC2: AWSService { ) } - /// Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different Amazon Web Services account. You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide. If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another Amazon Web Services account. To attempt to recover an Elastic IP address that you released, specify it in this operation. For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide. You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance). + /// Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address pool and can be allocated to a different Amazon Web Services account. You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon EC2 User Guide. If you release an Elastic IP address, you might be able to recover it. You cannot recover an Elastic IP address that you released after it is allocated to another Amazon Web Services account. To attempt to recover an Elastic IP address that you released, specify it in this operation. For more information, see Elastic IP Addresses in the Amazon EC2 User Guide. You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance). @Sendable public func allocateAddress(_ input: AllocateAddressRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AllocateAddressResult { return try await self.client.execute( @@ -258,7 +258,7 @@ public struct EC2: AWSService { ) } - /// Assigns one or more IPv6 addresses to the specified network interface. You can specify one or more specific IPv6 addresses, or you can specify the number of IPv6 addresses to be automatically assigned from within the subnet's IPv6 CIDR block range. You can assign as many IPv6 addresses to a network interface as you can assign private IPv4 addresses, and the limit varies per instance type. For information, see IP Addresses Per Network Interface Per Instance Type in the Amazon Elastic Compute Cloud User Guide. You must specify either the IPv6 addresses or the IPv6 address count in the request. You can optionally use Prefix Delegation on the network interface. You must specify either the IPV6 Prefix Delegation prefixes, or the IPv6 Prefix Delegation count. For information, see Assigning prefixes to Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide. + /// Assigns one or more IPv6 addresses to the specified network interface. You can specify one or more specific IPv6 addresses, or you can specify the number of IPv6 addresses to be automatically assigned from within the subnet's IPv6 CIDR block range. You can assign as many IPv6 addresses to a network interface as you can assign private IPv4 addresses, and the limit varies per instance type. You must specify either the IPv6 addresses or the IPv6 address count in the request. You can optionally use Prefix Delegation on the network interface. You must specify either the IPV6 Prefix Delegation prefixes, or the IPv6 Prefix Delegation count. For information, see Assigning prefixes to network interfaces in the Amazon EC2 User Guide. @Sendable public func assignIpv6Addresses(_ input: AssignIpv6AddressesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssignIpv6AddressesResult { return try await self.client.execute( @@ -271,7 +271,7 @@ public struct EC2: AWSService { ) } - /// Assigns one or more secondary private IP addresses to the specified network interface. You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide. When you move a secondary private IP address to another network interface, any Elastic IP address that is associated with the IP address is also moved. Remapping an IP address is an asynchronous operation. When you move an IP address from one network interface to another, check network/interfaces/macs/mac/local-ipv4s in the instance metadata to confirm that the remapping is complete. You must specify either the IP addresses or the IP address count in the request. You can optionally use Prefix Delegation on the network interface. You must specify either the IPv4 Prefix Delegation prefixes, or the IPv4 Prefix Delegation count. For information, see Assigning prefixes to Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide. + /// Assigns one or more secondary private IP addresses to the specified network interface. You can specify one or more specific secondary IP addresses, or you can specify the number of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. The number of secondary IP addresses that you can assign to an instance varies by instance type. For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon EC2 User Guide. When you move a secondary private IP address to another network interface, any Elastic IP address that is associated with the IP address is also moved. Remapping an IP address is an asynchronous operation. When you move an IP address from one network interface to another, check network/interfaces/macs/mac/local-ipv4s in the instance metadata to confirm that the remapping is complete. You must specify either the IP addresses or the IP address count in the request. You can optionally use Prefix Delegation on the network interface. You must specify either the IPv4 Prefix Delegation prefixes, or the IPv4 Prefix Delegation count. For information, see Assigning prefixes to network interfaces in the Amazon EC2 User Guide. @Sendable public func assignPrivateIpAddresses(_ input: AssignPrivateIpAddressesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssignPrivateIpAddressesResult { return try await self.client.execute( @@ -284,7 +284,7 @@ public struct EC2: AWSService { ) } - /// Assigns one or more private IPv4 addresses to a private NAT gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide. + /// Assigns private IPv4 addresses to a private NAT gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide. @Sendable public func assignPrivateNatGatewayAddress(_ input: AssignPrivateNatGatewayAddressRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssignPrivateNatGatewayAddressResult { return try await self.client.execute( @@ -323,7 +323,7 @@ public struct EC2: AWSService { ) } - /// Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC. After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance. For more information, see DHCP options sets in the Amazon VPC User Guide. + /// Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC. After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance. For more information, see DHCP option sets in the Amazon VPC User Guide. @Sendable public func associateDhcpOptions(_ input: AssociateDhcpOptionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -411,7 +411,7 @@ public struct EC2: AWSService { ) } - /// Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide. By default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can increase the limit by requesting a quota adjustment. For more information, see Elastic IP address quotas in the Amazon VPC User Guide. When you associate an EIP or secondary EIPs with a public NAT gateway, the network border group of the EIPs must match the network border group of the Availability Zone (AZ) that the public NAT gateway is in. If it's not the same, the EIP will fail to associate. You can see the network border group for the subnet's AZ by viewing the details of the subnet. Similarly, you can view the network border group of an EIP by viewing the details of the EIP address. For more information about network border groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC User Guide. + /// Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT gateway. For more information, see Work with NAT gateways in the Amazon VPC User Guide. By default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can increase the limit by requesting a quota adjustment. For more information, see Elastic IP address quotas in the Amazon VPC User Guide. When you associate an EIP or secondary EIPs with a public NAT gateway, the network border group of the EIPs must match the network border group of the Availability Zone (AZ) that the public NAT gateway is in. If it's not the same, the EIP will fail to associate. You can see the network border group for the subnet's AZ by viewing the details of the subnet. Similarly, you can view the network border group of an EIP by viewing the details of the EIP address. For more information about network border groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC User Guide. @Sendable public func associateNatGatewayAddress(_ input: AssociateNatGatewayAddressRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateNatGatewayAddressResult { return try await self.client.execute( @@ -489,7 +489,7 @@ public struct EC2: AWSService { ) } - /// Associates a branch network interface with a trunk network interface. Before you create the association, run the create-network-interface command and set --interface-type to trunk. You must also create a network interface for each branch network interface that you want to associate with the trunk network interface. + /// Associates a branch network interface with a trunk network interface. Before you create the association, use CreateNetworkInterface command and set the interface type to trunk. You must also create a network interface for each branch network interface that you want to associate with the trunk network interface. @Sendable public func associateTrunkInterface(_ input: AssociateTrunkInterfaceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateTrunkInterfaceResult { return try await self.client.execute( @@ -502,7 +502,7 @@ public struct EC2: AWSService { ) } - /// Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through bring your own IP addresses (BYOIP). You must specify one of the following in the request: an IPv4 CIDR block, an IPv6 pool, or an Amazon-provided IPv6 CIDR block. For more information about associating CIDR blocks with your VPC and applicable restrictions, see IP addressing for your VPCs and subnets in the Amazon VPC User Guide. + /// Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block, an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from an IPv6 address pool that you provisioned through bring your own IP addresses (BYOIP). You must specify one of the following in the request: an IPv4 CIDR block, an IPv6 pool, or an Amazon-provided IPv6 CIDR block. For more information about associating CIDR blocks with your VPC and applicable restrictions, see IP addressing for your VPCs and subnets in the Amazon VPC User Guide. @Sendable public func associateVpcCidrBlock(_ input: AssociateVpcCidrBlockRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateVpcCidrBlockResult { return try await self.client.execute( @@ -751,7 +751,7 @@ public struct EC2: AWSService { ) } - /// Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace. For more information, see Reserved Instance Marketplace in the Amazon EC2 User Guide. + /// Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace. For more information, see Sell in the Reserved Instance Marketplace in the Amazon EC2 User Guide. @Sendable public func cancelReservedInstancesListing(_ input: CancelReservedInstancesListingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CancelReservedInstancesListingResult { return try await self.client.execute( @@ -829,7 +829,7 @@ public struct EC2: AWSService { ) } - /// Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy a snapshot within the same Region, from one Region to another, or from a Region to an Outpost. You can't copy a snapshot from an Outpost to a Region, from one Outpost to another, or within the same Outpost. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). When copying snapshots to a Region, copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable encryption for the snapshot copy operation. By default, encrypted snapshot copies use the default Key Management Service (KMS) KMS key; however, you can specify a different KMS key. To copy an encrypted snapshot that has been shared from another account, you must have permissions for the KMS key used to encrypt the snapshot. Snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region, or a different key that you specify in the request using KmsKeyId. Outposts do not support unencrypted snapshots. For more information, Amazon EBS local snapshots on Outposts in the Amazon EBS User Guide. Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose. For more information, see Copy an Amazon EBS snapshot in the Amazon EBS User Guide. + /// Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy a snapshot within the same Region, from one Region to another, or from a Region to an Outpost. You can't copy a snapshot from an Outpost to a Region, from one Outpost to another, or within the same Outpost. You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs). When copying snapshots to a Region, copies of encrypted EBS snapshots remain encrypted. Copies of unencrypted snapshots remain unencrypted, unless you enable encryption for the snapshot copy operation. By default, encrypted snapshot copies use the default KMS key; however, you can specify a different KMS key. To copy an encrypted snapshot that has been shared from another account, you must have permissions for the KMS key used to encrypt the snapshot. Snapshots copied to an Outpost are encrypted by default using the default encryption key for the Region, or a different key that you specify in the request using KmsKeyId. Outposts do not support unencrypted snapshots. For more information, Amazon EBS local snapshots on Outposts in the Amazon EBS User Guide. Snapshots created by copying another snapshot have an arbitrary volume ID that should not be used for any purpose. For more information, see Copy an Amazon EBS snapshot in the Amazon EBS User Guide. @Sendable public func copySnapshot(_ input: CopySnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CopySnapshotResult { return try await self.client.execute( @@ -866,8 +866,9 @@ public struct EC2: AWSService { ) } - /// Creates a Capacity Reservation Fleet. For more information, see Create a Capacity - /// Reservation Fleet in the Amazon EC2 User Guide. + /// Creates a Capacity Reservation Fleet. For more information, see Create a + /// Capacity Reservation Fleet in the + /// Amazon EC2 User Guide. @Sendable public func createCapacityReservationFleet(_ input: CreateCapacityReservationFleetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCapacityReservationFleetResult { return try await self.client.execute( @@ -994,7 +995,7 @@ public struct EC2: AWSService { /// Creates a custom set of DHCP options. After you create a DHCP option set, you associate /// it with a VPC. After you associate a DHCP option set with a VPC, all existing and newly - /// launched instances in the VPC use this set of DHCP options. The following are the individual DHCP options you can specify. For more information, see DHCP options sets in the Amazon VPC User Guide. domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in any other Region, specify region.compute.internal. Otherwise, specify a custom domain name. This value is used to complete unqualified DNS hostnames. Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP option set is associated with a VPC that has instances running operating systems that treat the value as a single domain, specify only one domain name. domain-name-servers - The IP addresses of up to four DNS servers, or AmazonProvidedDNS. To specify multiple domain name servers in a single parameter, separate the IP addresses using commas. To have your instances receive custom DNS hostnames as specified in domain-name, you must specify a custom DNS server. ntp-servers - The IP addresses of up to eight Network Time Protocol (NTP) servers (four IPv4 addresses and four IPv6 addresses). netbios-name-servers - The IP addresses of up to four NetBIOS name servers. netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2. Broadcast and multicast are not supported. For more information about NetBIOS node types, see RFC 2132. ipv6-address-preferred-lease-time - A value (in seconds, minutes, hours, or years) for how frequently a running instance with an IPv6 assigned to it goes through DHCPv6 lease renewal. Acceptable values are between 140 and 2147483647 seconds (approximately 68 years). If no value is entered, the default lease time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase the lease time and avoid frequent lease renewal requests. Lease renewal typically occurs when half of the lease time has elapsed. + /// launched instances in the VPC use this set of DHCP options. The following are the individual DHCP options you can specify. For more information, see DHCP option sets in the Amazon VPC User Guide. domain-name - If you're using AmazonProvidedDNS in us-east-1, specify ec2.internal. If you're using AmazonProvidedDNS in any other Region, specify region.compute.internal. Otherwise, specify a custom domain name. This value is used to complete unqualified DNS hostnames. Some Linux operating systems accept multiple domain names separated by spaces. However, Windows and other Linux operating systems treat the value as a single domain, which results in unexpected behavior. If your DHCP option set is associated with a VPC that has instances running operating systems that treat the value as a single domain, specify only one domain name. domain-name-servers - The IP addresses of up to four DNS servers, or AmazonProvidedDNS. To specify multiple domain name servers in a single parameter, separate the IP addresses using commas. To have your instances receive custom DNS hostnames as specified in domain-name, you must specify a custom DNS server. ntp-servers - The IP addresses of up to eight Network Time Protocol (NTP) servers (four IPv4 addresses and four IPv6 addresses). netbios-name-servers - The IP addresses of up to four NetBIOS name servers. netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that you specify 2. Broadcast and multicast are not supported. For more information about NetBIOS node types, see RFC 2132. ipv6-address-preferred-lease-time - A value (in seconds, minutes, hours, or years) for how frequently a running instance with an IPv6 assigned to it goes through DHCPv6 lease renewal. Acceptable values are between 140 and 2147483647 seconds (approximately 68 years). If no value is entered, the default lease time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase the lease time and avoid frequent lease renewal requests. Lease renewal typically occurs when half of the lease time has elapsed. @Sendable public func createDhcpOptions(_ input: CreateDhcpOptionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDhcpOptionsResult { return try await self.client.execute( @@ -1036,7 +1037,7 @@ public struct EC2: AWSService { ) } - /// Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC. Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow log records in the Amazon Virtual Private Cloud User Guide. When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket. For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide. + /// Creates one or more flow logs to capture information about IP traffic for a specific network interface, subnet, or VPC. Flow log data for a monitored network interface is recorded as flow log records, which are log events consisting of fields that describe the traffic flow. For more information, see Flow log records in the Amazon VPC User Guide. When publishing to CloudWatch Logs, flow log records are published to a log group, and each network interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all of the monitored network interfaces are published to a single log file object that is stored in the specified bucket. For more information, see VPC Flow Logs in the Amazon VPC User Guide. @Sendable public func createFlowLogs(_ input: CreateFlowLogsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateFlowLogsResult { return try await self.client.execute( @@ -1193,7 +1194,7 @@ public struct EC2: AWSService { ) } - /// Creates a launch template. A launch template contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify a launch template instead of providing the launch parameters in the request. For more information, see Launch an instance from a launch template in the Amazon Elastic Compute Cloud User Guide. To clone an existing launch template as the basis for a new launch template, use the Amazon EC2 console. The API, SDKs, and CLI do not support cloning a template. For more information, see Create a launch template from an existing launch template in the Amazon Elastic Compute Cloud User Guide. + /// Creates a launch template. A launch template contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify a launch template instead of providing the launch parameters in the request. For more information, see Launch an instance from a launch template in the Amazon EC2 User Guide. To clone an existing launch template as the basis for a new launch template, use the Amazon EC2 console. The API, SDKs, and CLI do not support cloning a template. For more information, see Create a launch template from an existing launch template in the Amazon EC2 User Guide. @Sendable public func createLaunchTemplate(_ input: CreateLaunchTemplateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateLaunchTemplateResult { return try await self.client.execute( @@ -1206,7 +1207,7 @@ public struct EC2: AWSService { ) } - /// Creates a new version of a launch template. You must specify an existing launch template, either by name or ID. You can determine whether the new version inherits parameters from a source version, and add or overwrite parameters as needed. Launch template versions are numbered in the order in which they are created. You can't specify, change, or replace the numbering of launch template versions. Launch templates are immutable; after you create a launch template, you can't modify it. Instead, you can create a new version of the launch template that includes the changes that you require. For more information, see Modify a launch template (manage launch template versions) in the Amazon Elastic Compute Cloud User Guide. + /// Creates a new version of a launch template. You must specify an existing launch template, either by name or ID. You can determine whether the new version inherits parameters from a source version, and add or overwrite parameters as needed. Launch template versions are numbered in the order in which they are created. You can't specify, change, or replace the numbering of launch template versions. Launch templates are immutable; after you create a launch template, you can't modify it. Instead, you can create a new version of the launch template that includes the changes that you require. For more information, see Modify a launch template (manage launch template versions) in the Amazon EC2 User Guide. @Sendable public func createLaunchTemplateVersion(_ input: CreateLaunchTemplateVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateLaunchTemplateVersionResult { return try await self.client.execute( @@ -1354,7 +1355,7 @@ public struct EC2: AWSService { ) } - /// Creates a network interface in the specified subnet. The number of IP addresses you can assign to a network interface varies by instance type. For more information, see IP Addresses Per ENI Per Instance Type in the Amazon Virtual Private Cloud User Guide. For more information about network interfaces, see Elastic network interfaces in the Amazon Elastic Compute Cloud User Guide. + /// Creates a network interface in the specified subnet. The number of IP addresses you can assign to a network interface varies by instance type. For more information about network interfaces, see Elastic network interfaces in the Amazon EC2 User Guide. @Sendable public func createNetworkInterface(_ input: CreateNetworkInterfaceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateNetworkInterfaceResult { return try await self.client.execute( @@ -1406,7 +1407,7 @@ public struct EC2: AWSService { ) } - /// Replaces the EBS-backed root volume for a running instance with a new volume that is restored to the original root volume's launch state, that is restored to a specific snapshot taken from the original root volume, or that is restored from an AMI that has the same key characteristics as that of the instance. For more information, see Replace a root volume in the Amazon Elastic Compute Cloud User Guide. + /// Replaces the EBS-backed root volume for a running instance with a new volume that is restored to the original root volume's launch state, that is restored to a specific snapshot taken from the original root volume, or that is restored from an AMI that has the same key characteristics as that of the instance. For more information, see Replace a root volume in the Amazon EC2 User Guide. @Sendable public func createReplaceRootVolumeTask(_ input: CreateReplaceRootVolumeTaskRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateReplaceRootVolumeTaskResult { return try await self.client.execute( @@ -1421,8 +1422,7 @@ public struct EC2: AWSService { /// Creates a listing for Amazon EC2 Standard Reserved Instances to be sold in the Reserved Instance /// Marketplace. You can submit one Standard Reserved Instance listing at a time. To get a list of your - /// Standard Reserved Instances, you can use the DescribeReservedInstances operation. Only Standard Reserved Instances can be sold in the Reserved Instance Marketplace. Convertible Reserved Instances cannot be sold. The Reserved Instance Marketplace matches sellers who want to resell Standard Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances. To sell your Standard Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Standard Reserved Instances, and specify the upfront price to receive for them. Your Standard Reserved Instance listings then become available for purchase. To view the details of your Standard Reserved Instance listing, you can use the DescribeReservedInstancesListings operation. For more information, see Reserved Instance Marketplace in the - /// Amazon EC2 User Guide. + /// Standard Reserved Instances, you can use the DescribeReservedInstances operation. Only Standard Reserved Instances can be sold in the Reserved Instance Marketplace. Convertible Reserved Instances cannot be sold. The Reserved Instance Marketplace matches sellers who want to resell Standard Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances. To sell your Standard Reserved Instances, you must first register as a seller in the Reserved Instance Marketplace. After completing the registration process, you can create a Reserved Instance Marketplace listing of some or all of your Standard Reserved Instances, and specify the upfront price to receive for them. Your Standard Reserved Instance listings then become available for purchase. To view the details of your Standard Reserved Instance listing, you can use the DescribeReservedInstancesListings operation. For more information, see Sell in the Reserved Instance Marketplace in the Amazon EC2 User Guide. @Sendable public func createReservedInstancesListing(_ input: CreateReservedInstancesListingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateReservedInstancesListingResult { return try await self.client.execute( @@ -1498,7 +1498,7 @@ public struct EC2: AWSService { ) } - /// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance. You can create snapshots of volumes in a Region and volumes on an Outpost. If you create a snapshot of a volume in a Region, the snapshot must be stored in the same Region as the volume. If you create a snapshot of a volume on an Outpost, the snapshot can be stored on the same Outpost as the volume, or in the Region for that Outpost. When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the source volume are propagated to the snapshot. You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your Amazon EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending. When you create a snapshot for an EBS volume that serves as a root device, we recommend that you stop the instance before taking the snapshot. Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected. You can tag your snapshots during creation. For more information, see Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide. For more information, see Amazon Elastic Block Store and Amazon EBS encryption in the Amazon EBS User Guide. + /// Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance. You can create snapshots of volumes in a Region and volumes on an Outpost. If you create a snapshot of a volume in a Region, the snapshot must be stored in the same Region as the volume. If you create a snapshot of a volume on an Outpost, the snapshot can be stored on the same Outpost as the volume, or in the Region for that Outpost. When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the source volume are propagated to the snapshot. You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your Amazon EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending. When you create a snapshot for an EBS volume that serves as a root device, we recommend that you stop the instance before taking the snapshot. Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected. You can tag your snapshots during creation. For more information, see Tag your Amazon EC2 resources in the Amazon EC2 User Guide. For more information, see Amazon EBS and Amazon EBS encryption in the Amazon EBS User Guide. @Sendable public func createSnapshot(_ input: CreateSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> Snapshot { return try await self.client.execute( @@ -1524,7 +1524,7 @@ public struct EC2: AWSService { ) } - /// Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per Amazon Web Services account. For more information, see Spot Instance data feed in the Amazon EC2 User Guide for Linux Instances. + /// Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per Amazon Web Services account. For more information, see Spot Instance data feed in the Amazon EC2 User Guide. @Sendable public func createSpotDatafeedSubscription(_ input: CreateSpotDatafeedSubscriptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSpotDatafeedSubscriptionResult { return try await self.client.execute( @@ -1563,7 +1563,7 @@ public struct EC2: AWSService { ) } - /// Creates a subnet CIDR reservation. For more information, see Subnet CIDR reservations in the Amazon Virtual Private Cloud User Guide and Assign prefixes to network interfaces in the Amazon Elastic Compute Cloud User Guide. + /// Creates a subnet CIDR reservation. For more information, see Subnet CIDR reservations in the Amazon VPC User Guide and Assign prefixes to network interfaces in the Amazon EC2 User Guide. @Sendable public func createSubnetCidrReservation(_ input: CreateSubnetCidrReservationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSubnetCidrReservationResult { return try await self.client.execute( @@ -1589,7 +1589,7 @@ public struct EC2: AWSService { ) } - /// Creates a Traffic Mirror filter. A Traffic Mirror filter is a set of rules that defines the traffic to mirror. By default, no traffic is mirrored. To mirror traffic, use CreateTrafficMirrorFilterRule to add Traffic Mirror rules to the filter. The rules you add define what traffic gets mirrored. You can also use ModifyTrafficMirrorFilterNetworkServices to mirror supported network services. + /// Creates a Traffic Mirror filter. A Traffic Mirror filter is a set of rules that defines the traffic to mirror. By default, no traffic is mirrored. To mirror traffic, use CreateTrafficMirrorFilterRule to add Traffic Mirror rules to the filter. The rules you add define what traffic gets mirrored. You can also use ModifyTrafficMirrorFilterNetworkServices to mirror supported network services. @Sendable public func createTrafficMirrorFilter(_ input: CreateTrafficMirrorFilterRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTrafficMirrorFilterResult { return try await self.client.execute( @@ -1667,7 +1667,7 @@ public struct EC2: AWSService { ) } - /// Creates a Connect peer for a specified transit gateway Connect attachment between a transit gateway and an appliance. The peer address and transit gateway address must be the same IP address family (IPv4 or IPv6). For more information, see Connect peers in the Transit Gateways Guide. + /// Creates a Connect peer for a specified transit gateway Connect attachment between a transit gateway and an appliance. The peer address and transit gateway address must be the same IP address family (IPv4 or IPv6). For more information, see Connect peers in the Amazon Web Services Transit Gateways Guide. @Sendable public func createTransitGatewayConnectPeer(_ input: CreateTransitGatewayConnectPeerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTransitGatewayConnectPeerResult { return try await self.client.execute( @@ -1836,7 +1836,7 @@ public struct EC2: AWSService { ) } - /// Creates an EBS volume that can be attached to an instance in the same Availability Zone. You can create a new empty volume or restore a volume from an EBS snapshot. Any Amazon Web Services Marketplace product codes from the snapshot are propagated to the volume. You can create encrypted volumes. Encrypted volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS encryption in the Amazon EBS User Guide. You can tag your volumes during creation. For more information, see Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide. For more information, see Create an Amazon EBS volume in the Amazon EBS User Guide. + /// Creates an EBS volume that can be attached to an instance in the same Availability Zone. You can create a new empty volume or restore a volume from an EBS snapshot. Any Amazon Web Services Marketplace product codes from the snapshot are propagated to the volume. You can create encrypted volumes. Encrypted volumes must be attached to instances that support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically encrypted. For more information, see Amazon EBS encryption in the Amazon EBS User Guide. You can tag your volumes during creation. For more information, see Tag your Amazon EC2 resources in the Amazon EC2 User Guide. For more information, see Create an Amazon EBS volume in the Amazon EBS User Guide. @Sendable public func createVolume(_ input: CreateVolumeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> Volume { return try await self.client.execute( @@ -1877,7 +1877,7 @@ public struct EC2: AWSService { ) } - /// Creates a connection notification for a specified VPC endpoint or VPC endpoint service. A connection notification notifies you of specific endpoint events. You must create an SNS topic to receive notifications. For more information, see Create a Topic in the Amazon Simple Notification Service Developer Guide. You can create a connection notification for interface endpoints only. + /// Creates a connection notification for a specified VPC endpoint or VPC endpoint service. A connection notification notifies you of specific endpoint events. You must create an SNS topic to receive notifications. For more information, see Creating an Amazon SNS topic in the Amazon SNS Developer Guide. You can create a connection notification for interface endpoints only. @Sendable public func createVpcEndpointConnectionNotification(_ input: CreateVpcEndpointConnectionNotificationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateVpcEndpointConnectionNotificationResult { return try await self.client.execute( @@ -1906,7 +1906,7 @@ public struct EC2: AWSService { /// Requests a VPC peering connection between two VPCs: a requester VPC that you own and /// an accepter VPC with which to create the connection. The accepter VPC can belong to - /// another Amazon Web Services account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks. Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide. The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected. If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed. + /// another Amazon Web Services account and can be in a different Region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks. Limitations and rules apply to a VPC peering connection. For more information, see the VPC peering limitations in the VPC Peering Guide. The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected. If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed. @Sendable public func createVpcPeeringConnection(_ input: CreateVpcPeeringConnectionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateVpcPeeringConnectionResult { return try await self.client.execute( @@ -2223,7 +2223,7 @@ public struct EC2: AWSService { ) } - /// Deletes one or more versions of a launch template. You can't delete the default version of a launch template; you must first assign a different version as the default. If the default version is the only version for the launch template, you must delete the entire launch template using DeleteLaunchTemplate. You can delete up to 200 launch template versions in a single request. To delete more than 200 versions in a single request, use DeleteLaunchTemplate, which deletes the launch template and all of its versions. For more information, see Delete a launch template version in the EC2 User Guide. + /// Deletes one or more versions of a launch template. You can't delete the default version of a launch template; you must first assign a different version as the default. If the default version is the only version for the launch template, you must delete the entire launch template using DeleteLaunchTemplate. You can delete up to 200 launch template versions in a single request. To delete more than 200 versions in a single request, use DeleteLaunchTemplate, which deletes the launch template and all of its versions. For more information, see Delete a launch template version in the Amazon EC2 User Guide. @Sendable public func deleteLaunchTemplateVersions(_ input: DeleteLaunchTemplateVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteLaunchTemplateVersionsResult { return try await self.client.execute( @@ -3051,7 +3051,7 @@ public struct EC2: AWSService { ) } - /// Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide. When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for three days after the transfers have been accepted. + /// Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide. When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for three days after the transfers have been accepted. @Sendable public func describeAddressTransfers(_ input: DescribeAddressTransfersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAddressTransfersResult { return try await self.client.execute( @@ -3103,7 +3103,7 @@ public struct EC2: AWSService { ) } - /// Describes the Availability Zones, Local Zones, and Wavelength Zones that are available to you. If there is an event impacting a zone, you can use this request to view the state and any provided messages for that zone. For more information about Availability Zones, Local Zones, and Wavelength Zones, see Regions and zones in the Amazon Elastic Compute Cloud User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. + /// Describes the Availability Zones, Local Zones, and Wavelength Zones that are available to you. If there is an event impacting a zone, you can use this request to view the state and any provided messages for that zone. For more information about Availability Zones, Local Zones, and Wavelength Zones, see Regions and zones in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. @Sendable public func describeAvailabilityZones(_ input: DescribeAvailabilityZonesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAvailabilityZonesResult { return try await self.client.execute( @@ -3208,7 +3208,7 @@ public struct EC2: AWSService { ) } - /// This action is deprecated. Describes one or more of your linked EC2-Classic instances. This request only returns + /// This action is deprecated. Describes your linked EC2-Classic instances. This request only returns /// information about EC2-Classic instances linked to a VPC through ClassicLink. You cannot /// use this request to return information about other instances. @Sendable @@ -3328,7 +3328,9 @@ public struct EC2: AWSService { ) } - /// Describes one or more of your DHCP options sets. For more information, see DHCP options sets in the + /// Describes your DHCP option sets. The default is to describe all your DHCP option sets. + /// Alternatively, you can specify specific DHCP option set IDs or filter the results to + /// include only the DHCP option sets that match specific criteria. For more information, see DHCP option sets in the /// Amazon VPC User Guide. @Sendable public func describeDhcpOptions(_ input: DescribeDhcpOptionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeDhcpOptionsResult { @@ -3342,7 +3344,7 @@ public struct EC2: AWSService { ) } - /// Describes one or more of your egress-only internet gateways. + /// Describes your egress-only internet gateways. The default is to describe all your egress-only internet gateways. Alternatively, you can specify specific egress-only internet gateway IDs or filter the results to include only the egress-only internet gateways that match specific criteria. @Sendable public func describeEgressOnlyInternetGateways(_ input: DescribeEgressOnlyInternetGatewaysRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeEgressOnlyInternetGatewaysResult { return try await self.client.execute( @@ -3355,7 +3357,7 @@ public struct EC2: AWSService { ) } - /// Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances. Describes the Elastic Graphics accelerator associated with your instances. For more information about Elastic Graphics, see Amazon Elastic Graphics. + /// Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4, G5, or G6 instances. Describes the Elastic Graphics accelerator associated with your instances. @Sendable public func describeElasticGpus(_ input: DescribeElasticGpusRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeElasticGpusResult { return try await self.client.execute( @@ -3760,7 +3762,7 @@ public struct EC2: AWSService { ) } - /// Describes one or more of your internet gateways. + /// Describes your internet gateways. The default is to describe all your internet gateways. Alternatively, you can specify specific internet gateway IDs or filter the results to include only the internet gateways that match specific criteria. @Sendable public func describeInternetGateways(_ input: DescribeInternetGatewaysRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeInternetGatewaysResult { return try await self.client.execute( @@ -4034,7 +4036,7 @@ public struct EC2: AWSService { ) } - /// Describes one or more of your NAT gateways. + /// Describes your NAT gateways. The default is to describe all your NAT gateways. Alternatively, you can specify specific NAT gateway IDs or filter the results to include only the NAT gateways that match specific criteria. @Sendable public func describeNatGateways(_ input: DescribeNatGatewaysRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeNatGatewaysResult { return try await self.client.execute( @@ -4047,7 +4049,7 @@ public struct EC2: AWSService { ) } - /// Describes one or more of your network ACLs. For more information, see Network ACLs in the + /// Describes your network ACLs. The default is to describe all your network ACLs. Alternatively, you can specify specific network ACL IDs or filter the results to include only the network ACLs that match specific criteria. For more information, see Network ACLs in the /// Amazon VPC User Guide. @Sendable public func describeNetworkAcls(_ input: DescribeNetworkAclsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeNetworkAclsResult { @@ -4204,7 +4206,7 @@ public struct EC2: AWSService { ) } - /// Describes the Regions that are enabled for your account, or all Regions. For a list of the Regions supported by Amazon EC2, see Amazon Elastic Compute Cloud endpoints and quotas. For information about enabling and disabling Regions for your account, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. + /// Describes the Regions that are enabled for your account, or all Regions. For a list of the Regions supported by Amazon EC2, see Amazon EC2 service endpoints. For information about enabling and disabling Regions for your account, see Specify which Amazon Web Services Regions your account can use in the Amazon Web Services Account Management Reference Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. @Sendable public func describeRegions(_ input: DescribeRegionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeRegionsResult { return try await self.client.execute( @@ -4217,7 +4219,7 @@ public struct EC2: AWSService { ) } - /// Describes a root volume replacement task. For more information, see Replace a root volume in the Amazon Elastic Compute Cloud User Guide. + /// Describes a root volume replacement task. For more information, see Replace a root volume in the Amazon EC2 User Guide. @Sendable public func describeReplaceRootVolumeTasks(_ input: DescribeReplaceRootVolumeTasksRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeReplaceRootVolumeTasksResult { return try await self.client.execute( @@ -4244,7 +4246,7 @@ public struct EC2: AWSService { ) } - /// Describes your account's Reserved Instance listings in the Reserved Instance Marketplace. The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances. As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase. As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase. For more information, see Reserved Instance Marketplace in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. + /// Describes your account's Reserved Instance listings in the Reserved Instance Marketplace. The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances. As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase. As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase. For more information, see Sell in the Reserved Instance Marketplace in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. @Sendable public func describeReservedInstancesListings(_ input: DescribeReservedInstancesListingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeReservedInstancesListingsResult { return try await self.client.execute( @@ -4257,7 +4259,7 @@ public struct EC2: AWSService { ) } - /// Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned. For more information, see Modifying Reserved Instances in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. + /// Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned. For more information, see Modify Reserved Instances in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. @Sendable public func describeReservedInstancesModifications(_ input: DescribeReservedInstancesModificationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeReservedInstancesModificationsResult { return try await self.client.execute( @@ -4270,8 +4272,7 @@ public struct EC2: AWSService { ) } - /// Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used. If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances. For more information, see Reserved Instance Marketplace - /// in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. + /// Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used. If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances. For more information, see Sell in the Reserved Instance Marketplace in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. @Sendable public func describeReservedInstancesOfferings(_ input: DescribeReservedInstancesOfferingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeReservedInstancesOfferingsResult { return try await self.client.execute( @@ -4284,7 +4285,7 @@ public struct EC2: AWSService { ) } - /// Describes one or more of your route tables. Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations. For more information, see Route tables in the + /// Describes your route tables. The default is to describe all your route tables. Alternatively, you can specify specific route table IDs or filter the results to include only the route tables that match specific criteria. Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations. For more information, see Route tables in the /// Amazon VPC User Guide. @Sendable public func describeRouteTables(_ input: DescribeRouteTablesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeRouteTablesResult { @@ -4402,7 +4403,7 @@ public struct EC2: AWSService { ) } - /// Describes the data feed for Spot Instances. For more information, see Spot Instance data feed in the Amazon EC2 User Guide for Linux Instances. + /// Describes the data feed for Spot Instances. For more information, see Spot Instance data feed in the Amazon EC2 User Guide. @Sendable public func describeSpotDatafeedSubscription(_ input: DescribeSpotDatafeedSubscriptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeSpotDatafeedSubscriptionResult { return try await self.client.execute( @@ -4467,7 +4468,7 @@ public struct EC2: AWSService { ) } - /// Describes the Spot price history. For more information, see Spot Instance pricing history in the Amazon EC2 User Guide for Linux Instances. When you specify a start and end time, the operation returns the prices of the instance types within that time range. It also returns the last price change before the start time, which is the effective price as of the start time. + /// Describes the Spot price history. For more information, see Spot Instance pricing history in the Amazon EC2 User Guide. When you specify a start and end time, the operation returns the prices of the instance types within that time range. It also returns the last price change before the start time, which is the effective price as of the start time. @Sendable public func describeSpotPriceHistory(_ input: DescribeSpotPriceHistoryRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeSpotPriceHistoryResult { return try await self.client.execute( @@ -4506,7 +4507,7 @@ public struct EC2: AWSService { ) } - /// Describes one or more of your subnets. For more information, see Subnets in the + /// Describes your subnets. The default is to describe all your subnets. Alternatively, you can specify specific subnet IDs or filter the results to include only the subnets that match specific criteria. For more information, see Subnets in the /// Amazon VPC User Guide. @Sendable public func describeSubnets(_ input: DescribeSubnetsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeSubnetsResult { @@ -4533,6 +4534,19 @@ public struct EC2: AWSService { ) } + /// Describe traffic mirror filters that determine the traffic that is mirrored. + @Sendable + public func describeTrafficMirrorFilterRules(_ input: DescribeTrafficMirrorFilterRulesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeTrafficMirrorFilterRulesResult { + return try await self.client.execute( + operation: "DescribeTrafficMirrorFilterRules", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Describes one or more Traffic Mirror filters. @Sendable public func describeTrafficMirrorFilters(_ input: DescribeTrafficMirrorFiltersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeTrafficMirrorFiltersResult { @@ -4819,7 +4833,7 @@ public struct EC2: AWSService { ) } - /// Describes the most recent volume modification request for the specified EBS volumes. If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification request. You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide. + /// Describes the most recent volume modification request for the specified EBS volumes. If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification request. For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide. @Sendable public func describeVolumesModifications(_ input: DescribeVolumesModificationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeVolumesModificationsResult { return try await self.client.execute( @@ -4936,7 +4950,7 @@ public struct EC2: AWSService { ) } - /// Describes your VPC endpoints. + /// Describes your VPC endpoints. The default is to describe all your VPC endpoints. Alternatively, you can specify specific VPC endpoint IDs or filter the results to include only the VPC endpoints that match specific criteria. @Sendable public func describeVpcEndpoints(_ input: DescribeVpcEndpointsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeVpcEndpointsResult { return try await self.client.execute( @@ -4949,7 +4963,7 @@ public struct EC2: AWSService { ) } - /// Describes one or more of your VPC peering connections. + /// Describes your VPC peering connections. The default is to describe all your VPC peering connections. Alternatively, you can specify specific VPC peering connection IDs or filter the results to include only the VPC peering connections that match specific criteria. @Sendable public func describeVpcPeeringConnections(_ input: DescribeVpcPeeringConnectionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeVpcPeeringConnectionsResult { return try await self.client.execute( @@ -4962,7 +4976,7 @@ public struct EC2: AWSService { ) } - /// Describes one or more of your VPCs. + /// Describes your VPCs. The default is to describe all your VPCs. Alternatively, you can specify specific VPC IDs or filter the results to include only the VPCs that match specific criteria. @Sendable public func describeVpcs(_ input: DescribeVpcsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeVpcsResult { return try await self.client.execute( @@ -5083,7 +5097,7 @@ public struct EC2: AWSService { ) } - /// Disables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide. + /// Disables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide. @Sendable public func disableAddressTransfer(_ input: DisableAddressTransferRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisableAddressTransferResult { return try await self.client.execute( @@ -5502,7 +5516,7 @@ public struct EC2: AWSService { ) } - /// Enables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide. + /// Enables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide. @Sendable public func enableAddressTransfer(_ input: EnableAddressTransferRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> EnableAddressTransferResult { return try await self.client.execute( @@ -5793,7 +5807,7 @@ public struct EC2: AWSService { ) } - /// Exports routes from the specified transit gateway route table to the specified S3 bucket. By default, all routes are exported. Alternatively, you can filter by CIDR range. The routes are saved to the specified bucket in a JSON file. For more information, see Export Route Tables to Amazon S3 in Transit Gateways. + /// Exports routes from the specified transit gateway route table to the specified S3 bucket. By default, all routes are exported. Alternatively, you can filter by CIDR range. The routes are saved to the specified bucket in a JSON file. For more information, see Export route tables to Amazon S3 in the Amazon Web Services Transit Gateways Guide. @Sendable public func exportTransitGatewayRoutes(_ input: ExportTransitGatewayRoutesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ExportTransitGatewayRoutesResult { return try await self.client.execute( @@ -6582,7 +6596,7 @@ public struct EC2: AWSService { ) } - /// Changes the opt-in status of the Local Zone and Wavelength Zone group for your account. Use DescribeAvailabilityZones to view the value for GroupName. + /// Changes the opt-in status of the specified zone group for your account. @Sendable public func modifyAvailabilityZoneGroup(_ input: ModifyAvailabilityZoneGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ModifyAvailabilityZoneGroupResult { return try await self.client.execute( @@ -6994,8 +7008,7 @@ public struct EC2: AWSService { ) } - /// Modifies the configuration of your Reserved Instances, such as the Availability Zone, instance count, or instance type. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type. For more information, see Modifying Reserved - /// Instances in the Amazon EC2 User Guide. + /// Modifies the configuration of your Reserved Instances, such as the Availability Zone, instance count, or instance type. The Reserved Instances to be modified must be identical, except for Availability Zone, network platform, and instance type. For more information, see Modify Reserved Instances in the Amazon EC2 User Guide. @Sendable public func modifyReservedInstances(_ input: ModifyReservedInstancesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ModifyReservedInstancesResult { return try await self.client.execute( @@ -7242,7 +7255,7 @@ public struct EC2: AWSService { ) } - /// You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you might be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying EBS volumes, see Amazon EBS Elastic Volumes in the Amazon EBS User Guide. When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For more information, see Extend the file system. You can use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a modification using DescribeVolumesModifications. For information about tracking status changes using either method, see Monitor the progress of volume modifications. With previous-generation instance types, resizing an EBS volume might require detaching and reattaching the volume or stopping and restarting the instance. After modifying a volume, you must wait at least six hours and ensure that the volume is in the in-use or available state before you can modify the same volume. This is sometimes referred to as a cooldown period. + /// You can modify several parameters of an existing EBS volume, including volume size, volume type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance type, you might be able to apply these changes without stopping the instance or detaching the volume from it. For more information about modifying EBS volumes, see Amazon EBS Elastic Volumes in the Amazon EBS User Guide. When you complete a resize operation on your volume, you need to extend the volume's file-system size to take advantage of the new storage capacity. For more information, see Extend the file system. For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide. With previous-generation instance types, resizing an EBS volume might require detaching and reattaching the volume or stopping and restarting the instance. After modifying a volume, you must wait at least six hours and ensure that the volume is in the in-use or available state before you can modify the same volume. This is sometimes referred to as a cooldown period. @Sendable public func modifyVolume(_ input: ModifyVolumeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ModifyVolumeResult { return try await self.client.execute( @@ -7466,7 +7479,7 @@ public struct EC2: AWSService { ) } - /// Provisions an IPv4 or IPv6 address range for use with your Amazon Web Services resources through bring your own IP addresses (BYOIP) and creates a corresponding address pool. After the address range is provisioned, it is ready to be advertised using AdvertiseByoipCidr. Amazon Web Services verifies that you own the address range and are authorized to advertise it. You must ensure that the address range is registered to you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. For more information, see Bring your own IP addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide. Provisioning an address range is an asynchronous operation, so the call returns immediately, but the address range is not ready to use until its status changes from pending-provision to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. To allocate an Elastic IP address from your IPv4 address pool, use AllocateAddress with either the specific address from the address pool or the ID of the address pool. + /// Provisions an IPv4 or IPv6 address range for use with your Amazon Web Services resources through bring your own IP addresses (BYOIP) and creates a corresponding address pool. After the address range is provisioned, it is ready to be advertised using AdvertiseByoipCidr. Amazon Web Services verifies that you own the address range and are authorized to advertise it. You must ensure that the address range is registered to you and that you created an RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. For more information, see Bring your own IP addresses (BYOIP) in the Amazon EC2 User Guide. Provisioning an address range is an asynchronous operation, so the call returns immediately, but the address range is not ready to use until its status changes from pending-provision to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. To allocate an Elastic IP address from your IPv4 address pool, use AllocateAddress with either the specific address from the address pool or the ID of the address pool. @Sendable public func provisionByoipCidr(_ input: ProvisionByoipCidrRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ProvisionByoipCidrResult { return try await self.client.execute( @@ -7547,7 +7560,7 @@ public struct EC2: AWSService { /// Purchases a Reserved Instance for use with your account. With Reserved Instances, you pay a lower hourly rate compared to On-Demand instance pricing. Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings /// that match your specifications. After you've purchased a Reserved Instance, you can check for your - /// new Reserved Instance with DescribeReservedInstances. To queue a purchase for a future date and time, specify a purchase time. If you do not specify a purchase time, the default is the current time. For more information, see Reserved Instances and Reserved Instance Marketplace in the Amazon EC2 User Guide. + /// new Reserved Instance with DescribeReservedInstances. To queue a purchase for a future date and time, specify a purchase time. If you do not specify a purchase time, the default is the current time. For more information, see Reserved Instances and Sell in the Reserved Instance Marketplace in the Amazon EC2 User Guide. @Sendable public func purchaseReservedInstancesOffering(_ input: PurchaseReservedInstancesOfferingRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PurchaseReservedInstancesOfferingResult { return try await self.client.execute( @@ -7612,7 +7625,7 @@ public struct EC2: AWSService { ) } - /// Registers members (network interfaces) with the transit gateway multicast group. A member is a network interface associated with a supported EC2 instance that receives multicast traffic. For information about supported instances, see Multicast Consideration in Amazon VPC Transit Gateways. After you add the members, use SearchTransitGatewayMulticastGroups to verify that the members were added to the transit gateway multicast group. + /// Registers members (network interfaces) with the transit gateway multicast group. A member is a network interface associated with a supported EC2 instance that receives multicast traffic. For more information, see Multicast on transit gateways in the Amazon Web Services Transit Gateways Guide. After you add the members, use SearchTransitGatewayMulticastGroups to verify that the members were added to the transit gateway multicast group. @Sendable public func registerTransitGatewayMulticastGroupMembers(_ input: RegisterTransitGatewayMulticastGroupMembersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterTransitGatewayMulticastGroupMembersResult { return try await self.client.execute( @@ -7625,7 +7638,7 @@ public struct EC2: AWSService { ) } - /// Registers sources (network interfaces) with the specified transit gateway multicast group. A multicast source is a network interface attached to a supported instance that sends multicast traffic. For information about supported instances, see Multicast Considerations in Amazon VPC Transit Gateways. After you add the source, use SearchTransitGatewayMulticastGroups to verify that the source was added to the multicast group. + /// Registers sources (network interfaces) with the specified transit gateway multicast group. A multicast source is a network interface attached to a supported instance that sends multicast traffic. For more information about supported instances, see Multicast on transit gateways in the Amazon Web Services Transit Gateways Guide. After you add the source, use SearchTransitGatewayMulticastGroups to verify that the source was added to the multicast group. @Sendable public func registerTransitGatewayMulticastGroupSources(_ input: RegisterTransitGatewayMulticastGroupSourcesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterTransitGatewayMulticastGroupSourcesResult { return try await self.client.execute( @@ -7868,7 +7881,7 @@ public struct EC2: AWSService { ) } - /// Creates a Spot Instance request. For more information, see Spot Instance requests in the Amazon EC2 User Guide for Linux Instances. We strongly discourage using the RequestSpotInstances API because it is a legacy API with no planned investment. For options for requesting Spot Instances, see Which is the best Spot request method to use? in the Amazon EC2 User Guide for Linux Instances. + /// Creates a Spot Instance request. For more information, see Work with Spot Instance in the Amazon EC2 User Guide. We strongly discourage using the RequestSpotInstances API because it is a legacy API with no planned investment. For options for requesting Spot Instances, see Which is the best Spot request method to use? in the Amazon EC2 User Guide. @Sendable public func requestSpotInstances(_ input: RequestSpotInstancesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RequestSpotInstancesResult { return try await self.client.execute( @@ -7934,7 +7947,7 @@ public struct EC2: AWSService { ) } - /// Resets an attribute of an instance to its default value. To reset the kernel or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck, the instance can be either running or stopped. The sourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT Instances in the Amazon VPC User Guide. + /// Resets an attribute of an instance to its default value. To reset the kernel or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck, the instance can be either running or stopped. The sourceDestCheck attribute controls whether source/destination checking is enabled. The default value is true, which means checking is enabled. This value must be false for a NAT instance to perform NAT. For more information, see NAT instances in the Amazon VPC User Guide. @Sendable public func resetInstanceAttribute(_ input: ResetInstanceAttributeRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -8077,7 +8090,7 @@ public struct EC2: AWSService { ) } - /// Launches the specified number of instances using an AMI for which you have permissions. You can specify a number of options, or leave the default options. The following rules apply: If you don't specify a subnet ID, we choose a default subnet from your default VPC for you. If you don't have a default VPC, you must specify a subnet ID in the request. All instances have a network interface with a primary private IPv4 address. If you don't specify this address, we choose one from the IPv4 range of your subnet. Not all instance types support IPv6 addresses. For more information, see Instance types. If you don't specify a security group ID, we use the default security group. For more information, see Security groups. If any of the AMIs have a product code attached for which the user has not subscribed, the request fails. You can create a launch template, which is a resource that contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify the launch template instead of specifying the launch parameters. To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead of one launch request for 500 instances. An instance is ready for you to use when it's in the running state. You can check the state of your instance using DescribeInstances. You can tag instances and EBS volumes during launch, after launch, or both. For more information, see CreateTags and Tagging your Amazon EC2 resources. Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key pairs. For troubleshooting, see What to do if an instance immediately terminates, and Troubleshooting connecting to your instance. + /// Launches the specified number of instances using an AMI for which you have permissions. You can specify a number of options, or leave the default options. The following rules apply: If you don't specify a subnet ID, we choose a default subnet from your default VPC for you. If you don't have a default VPC, you must specify a subnet ID in the request. All instances have a network interface with a primary private IPv4 address. If you don't specify this address, we choose one from the IPv4 range of your subnet. Not all instance types support IPv6 addresses. For more information, see Instance types. If you don't specify a security group ID, we use the default security group for the VPC. For more information, see Security groups. If any of the AMIs have a product code attached for which the user has not subscribed, the request fails. You can create a launch template, which is a resource that contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify the launch template instead of specifying the launch parameters. To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead of one launch request for 500 instances. RunInstances is subject to both request rate limiting and resource rate limiting. For more information, see Request throttling. An instance is ready for you to use when it's in the running state. You can check the state of your instance using DescribeInstances. You can tag instances and EBS volumes during launch, after launch, or both. For more information, see CreateTags and Tagging your Amazon EC2 resources. Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key pairs. For troubleshooting, see What to do if an instance immediately terminates, and Troubleshooting connecting to your instance. @Sendable public func runInstances(_ input: RunInstancesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> Reservation { return try await self.client.execute( @@ -8090,7 +8103,7 @@ public struct EC2: AWSService { ) } - /// Launches the specified Scheduled Instances. Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances. You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a Scheduled Instance before the current scheduled time period ends, you can launch it again after a few minutes. For more information, see Scheduled Instances in the Amazon EC2 User Guide. + /// Launches the specified Scheduled Instances. Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances. You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a Scheduled Instance before the current scheduled time period ends, you can launch it again after a few minutes. @Sendable public func runScheduledInstances(_ input: RunScheduledInstancesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RunScheduledInstancesResult { return try await self.client.execute( @@ -8142,7 +8155,7 @@ public struct EC2: AWSService { ) } - /// Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI). In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace. Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks. For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a diagnostic interrupt (for advanced users) (Linux instances) or Send a diagnostic interrupt (for advanced users) (Windows instances). + /// Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI). In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace. Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks. For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a diagnostic interrupt (for advanced users) in the Amazon EC2 User Guide. @Sendable public func sendDiagnosticInterrupt(_ input: SendDiagnosticInterruptRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -8155,7 +8168,7 @@ public struct EC2: AWSService { ) } - /// Starts an Amazon EBS-backed instance that you've previously stopped. Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage. Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM. Performing this operation on an instance that uses an instance store as its root device returns an error. If you attempt to start a T3 instance with host tenancy and the unlimited CPU credit option, the request fails. The unlimited CPU credit option is not supported on Dedicated Hosts. Before you start the instance, either change its CPU credit option to standard, or change its tenancy to default or dedicated. For more information, see Stop and start your instance in the Amazon EC2 User Guide. + /// Starts an Amazon EBS-backed instance that you've previously stopped. Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage. Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM. Performing this operation on an instance that uses an instance store as its root device returns an error. If you attempt to start a T3 instance with host tenancy and the unlimited CPU credit option, the request fails. The unlimited CPU credit option is not supported on Dedicated Hosts. Before you start the instance, either change its CPU credit option to standard, or change its tenancy to default or dedicated. For more information, see Stop and start Amazon EC2 instances in the Amazon EC2 User Guide. @Sendable public func startInstances(_ input: StartInstancesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartInstancesResult { return try await self.client.execute( @@ -8207,7 +8220,7 @@ public struct EC2: AWSService { ) } - /// Stops an Amazon EBS-backed instance. For more information, see Stop and start your instance in the Amazon EC2 User Guide. You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide. We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage. You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide. When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs. Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide. When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshoot stopping your instance in the Amazon EC2 User Guide. + /// Stops an Amazon EBS-backed instance. For more information, see Stop and start Amazon EC2 instances in the Amazon EC2 User Guide. You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide. We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage. You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide. When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs. Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide. When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshoot stopping your instance in the Amazon EC2 User Guide. @Sendable public func stopInstances(_ input: StopInstancesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopInstancesResult { return try await self.client.execute( @@ -8370,7 +8383,7 @@ extension EC2 { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension EC2 { - /// Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide. When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for three days after the transfers have been accepted. + /// Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide. When you transfer an Elastic IP address, there is a two-step handshake between the source and transfer Amazon Web Services accounts. When the source account starts the transfer, the transfer account has seven days to accept the Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to the source account for three days after the transfers have been accepted. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -8523,7 +8536,7 @@ extension EC2 { ) } - /// This action is deprecated. Describes one or more of your linked EC2-Classic instances. This request only returns + /// This action is deprecated. Describes your linked EC2-Classic instances. This request only returns /// information about EC2-Classic instances linked to a VPC through ClassicLink. You cannot /// use this request to return information about other instances. /// Return PaginatorSequence for operation. @@ -8659,7 +8672,9 @@ extension EC2 { ) } - /// Describes one or more of your DHCP options sets. For more information, see DHCP options sets in the + /// Describes your DHCP option sets. The default is to describe all your DHCP option sets. + /// Alternatively, you can specify specific DHCP option set IDs or filter the results to + /// include only the DHCP option sets that match specific criteria. For more information, see DHCP option sets in the /// Amazon VPC User Guide. /// Return PaginatorSequence for operation. /// @@ -8679,7 +8694,7 @@ extension EC2 { ) } - /// Describes one or more of your egress-only internet gateways. + /// Describes your egress-only internet gateways. The default is to describe all your egress-only internet gateways. Alternatively, you can specify specific egress-only internet gateway IDs or filter the results to include only the egress-only internet gateways that match specific criteria. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -9099,7 +9114,7 @@ extension EC2 { ) } - /// Describes one or more of your internet gateways. + /// Describes your internet gateways. The default is to describe all your internet gateways. Alternatively, you can specify specific internet gateway IDs or filter the results to include only the internet gateways that match specific criteria. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -9441,7 +9456,7 @@ extension EC2 { ) } - /// Describes one or more of your NAT gateways. + /// Describes your NAT gateways. The default is to describe all your NAT gateways. Alternatively, you can specify specific NAT gateway IDs or filter the results to include only the NAT gateways that match specific criteria. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -9460,7 +9475,7 @@ extension EC2 { ) } - /// Describes one or more of your network ACLs. For more information, see Network ACLs in the + /// Describes your network ACLs. The default is to describe all your network ACLs. Alternatively, you can specify specific network ACL IDs or filter the results to include only the network ACLs that match specific criteria. For more information, see Network ACLs in the /// Amazon VPC User Guide. /// Return PaginatorSequence for operation. /// @@ -9651,7 +9666,7 @@ extension EC2 { ) } - /// Describes a root volume replacement task. For more information, see Replace a root volume in the Amazon Elastic Compute Cloud User Guide. + /// Describes a root volume replacement task. For more information, see Replace a root volume in the Amazon EC2 User Guide. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -9670,7 +9685,7 @@ extension EC2 { ) } - /// Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned. For more information, see Modifying Reserved Instances in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. + /// Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned. For more information, see Modify Reserved Instances in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -9689,8 +9704,7 @@ extension EC2 { ) } - /// Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used. If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances. For more information, see Reserved Instance Marketplace - /// in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. + /// Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used. If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances. For more information, see Sell in the Reserved Instance Marketplace in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -9709,7 +9723,7 @@ extension EC2 { ) } - /// Describes one or more of your route tables. Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations. For more information, see Route tables in the + /// Describes your route tables. The default is to describe all your route tables. Alternatively, you can specify specific route table IDs or filter the results to include only the route tables that match specific criteria. Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations. For more information, see Route tables in the /// Amazon VPC User Guide. /// Return PaginatorSequence for operation. /// @@ -9881,7 +9895,7 @@ extension EC2 { ) } - /// Describes the Spot price history. For more information, see Spot Instance pricing history in the Amazon EC2 User Guide for Linux Instances. When you specify a start and end time, the operation returns the prices of the instance types within that time range. It also returns the last price change before the start time, which is the effective price as of the start time. + /// Describes the Spot price history. For more information, see Spot Instance pricing history in the Amazon EC2 User Guide. When you specify a start and end time, the operation returns the prices of the instance types within that time range. It also returns the last price change before the start time, which is the effective price as of the start time. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -9938,7 +9952,7 @@ extension EC2 { ) } - /// Describes one or more of your subnets. For more information, see Subnets in the + /// Describes your subnets. The default is to describe all your subnets. Alternatively, you can specify specific subnet IDs or filter the results to include only the subnets that match specific criteria. For more information, see Subnets in the /// Amazon VPC User Guide. /// Return PaginatorSequence for operation. /// @@ -10376,7 +10390,7 @@ extension EC2 { ) } - /// Describes the most recent volume modification request for the specified EBS volumes. If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification request. You can also use CloudWatch Events to check the status of a modification to an EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide. + /// Describes the most recent volume modification request for the specified EBS volumes. If a volume has never been modified, some information in the output will be null. If a volume has been modified more than once, the output includes only the most recent modification request. For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -10490,7 +10504,7 @@ extension EC2 { ) } - /// Describes your VPC endpoints. + /// Describes your VPC endpoints. The default is to describe all your VPC endpoints. Alternatively, you can specify specific VPC endpoint IDs or filter the results to include only the VPC endpoints that match specific criteria. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -10509,7 +10523,7 @@ extension EC2 { ) } - /// Describes one or more of your VPC peering connections. + /// Describes your VPC peering connections. The default is to describe all your VPC peering connections. Alternatively, you can specify specific VPC peering connection IDs or filter the results to include only the VPC peering connections that match specific criteria. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -10528,7 +10542,7 @@ extension EC2 { ) } - /// Describes one or more of your VPCs. + /// Describes your VPCs. The default is to describe all your VPCs. Alternatively, you can specify specific VPC IDs or filter the results to include only the VPCs that match specific criteria. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/EC2/EC2_shapes.swift b/Sources/Soto/Services/EC2/EC2_shapes.swift index 5e27b9ec1a..1efe059762 100644 --- a/Sources/Soto/Services/EC2/EC2_shapes.swift +++ b/Sources/Soto/Services/EC2/EC2_shapes.swift @@ -1253,6 +1253,7 @@ extension EC2 { public static var c7gn8Xlarge: Self { .init(rawValue: "c7gn.8xlarge") } public static var c7gnLarge: Self { .init(rawValue: "c7gn.large") } public static var c7gnMedium: Self { .init(rawValue: "c7gn.medium") } + public static var c7gnMetal: Self { .init(rawValue: "c7gn.metal") } public static var c7gnXlarge: Self { .init(rawValue: "c7gn.xlarge") } public static var c7i12Xlarge: Self { .init(rawValue: "c7i.12xlarge") } public static var c7i16Xlarge: Self { .init(rawValue: "c7i.16xlarge") } @@ -1261,6 +1262,11 @@ extension EC2 { public static var c7i48Xlarge: Self { .init(rawValue: "c7i.48xlarge") } public static var c7i4Xlarge: Self { .init(rawValue: "c7i.4xlarge") } public static var c7i8Xlarge: Self { .init(rawValue: "c7i.8xlarge") } + public static var c7iFlex2Xlarge: Self { .init(rawValue: "c7i-flex.2xlarge") } + public static var c7iFlex4Xlarge: Self { .init(rawValue: "c7i-flex.4xlarge") } + public static var c7iFlex8Xlarge: Self { .init(rawValue: "c7i-flex.8xlarge") } + public static var c7iFlexLarge: Self { .init(rawValue: "c7i-flex.large") } + public static var c7iFlexXlarge: Self { .init(rawValue: "c7i-flex.xlarge") } public static var c7iLarge: Self { .init(rawValue: "c7i.large") } public static var c7iMetal24Xl: Self { .init(rawValue: "c7i.metal-24xl") } public static var c7iMetal48Xl: Self { .init(rawValue: "c7i.metal-48xl") } @@ -1592,6 +1598,7 @@ extension EC2 { public static var m7iMetal48Xl: Self { .init(rawValue: "m7i.metal-48xl") } public static var m7iXlarge: Self { .init(rawValue: "m7i.xlarge") } public static var mac1Metal: Self { .init(rawValue: "mac1.metal") } + public static var mac2M1UltraMetal: Self { .init(rawValue: "mac2-m1ultra.metal") } public static var mac2M2Metal: Self { .init(rawValue: "mac2-m2.metal") } public static var mac2M2ProMetal: Self { .init(rawValue: "mac2-m2pro.metal") } public static var mac2Metal: Self { .init(rawValue: "mac2.metal") } @@ -1797,6 +1804,18 @@ extension EC2 { public static var r7izMetal16Xl: Self { .init(rawValue: "r7iz.metal-16xl") } public static var r7izMetal32Xl: Self { .init(rawValue: "r7iz.metal-32xl") } public static var r7izXlarge: Self { .init(rawValue: "r7iz.xlarge") } + public static var r8g12Xlarge: Self { .init(rawValue: "r8g.12xlarge") } + public static var r8g16Xlarge: Self { .init(rawValue: "r8g.16xlarge") } + public static var r8g24Xlarge: Self { .init(rawValue: "r8g.24xlarge") } + public static var r8g2Xlarge: Self { .init(rawValue: "r8g.2xlarge") } + public static var r8g48Xlarge: Self { .init(rawValue: "r8g.48xlarge") } + public static var r8g4Xlarge: Self { .init(rawValue: "r8g.4xlarge") } + public static var r8g8Xlarge: Self { .init(rawValue: "r8g.8xlarge") } + public static var r8gLarge: Self { .init(rawValue: "r8g.large") } + public static var r8gMedium: Self { .init(rawValue: "r8g.medium") } + public static var r8gMetal24Xl: Self { .init(rawValue: "r8g.metal-24xl") } + public static var r8gMetal48Xl: Self { .init(rawValue: "r8g.metal-48xl") } + public static var r8gXlarge: Self { .init(rawValue: "r8g.xlarge") } public static var t1Micro: Self { .init(rawValue: "t1.micro") } public static var t22Xlarge: Self { .init(rawValue: "t2.2xlarge") } public static var t2Large: Self { .init(rawValue: "t2.large") } @@ -1839,6 +1858,11 @@ extension EC2 { public static var u6Tb1112Xlarge: Self { .init(rawValue: "u-6tb1.112xlarge") } public static var u6Tb156Xlarge: Self { .init(rawValue: "u-6tb1.56xlarge") } public static var u6Tb1Metal: Self { .init(rawValue: "u-6tb1.metal") } + public static var u7i12Tb224Xlarge: Self { .init(rawValue: "u7i-12tb.224xlarge") } + public static var u7ib12Tb224Xlarge: Self { .init(rawValue: "u7ib-12tb.224xlarge") } + public static var u7in16Tb224Xlarge: Self { .init(rawValue: "u7in-16tb.224xlarge") } + public static var u7in24Tb224Xlarge: Self { .init(rawValue: "u7in-24tb.224xlarge") } + public static var u7in32Tb224Xlarge: Self { .init(rawValue: "u7in-32tb.224xlarge") } public static var u9Tb1112Xlarge: Self { .init(rawValue: "u-9tb1.112xlarge") } public static var u9Tb1Metal: Self { .init(rawValue: "u-9tb1.metal") } public static var vt124Xlarge: Self { .init(rawValue: "vt1.24xlarge") } @@ -2468,6 +2492,12 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum PhcSupport: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case supported = "supported" + case unsupported = "unsupported" + public var description: String { return self.rawValue } + } + public enum PlacementGroupState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case available = "available" case deleted = "deleted" @@ -2685,6 +2715,7 @@ extension EC2 { case volume = "volume" case vpc = "vpc" case vpcBlockPublicAccessExclusion = "vpc-block-public-access-exclusion" + case vpcEncryptionControl = "vpc-encryption-control" case vpcEndpoint = "vpc-endpoint" case vpcEndpointConnection = "vpc-endpoint-connection" case vpcEndpointConnectionDeviceType = "vpc-endpoint-connection-device-type" @@ -4117,7 +4148,7 @@ extension EC2 { public let domain: DomainType? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups. Use DescribeAvailabilityZones to view the network border groups. + /// A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups. public let networkBorderGroup: String? /// The ID of an address pool that you own. Use this parameter to let Amazon EC2 select an address from the address pool. To specify a specific address from the address pool, use the Address parameter instead. public let publicIpv4Pool: String? @@ -4193,7 +4224,7 @@ extension EC2 { /// The IDs of the Outpost hardware assets on which to allocate the Dedicated Hosts. Targeting specific hardware assets on an Outpost can help to minimize latency between your workloads. This parameter is supported only if you specify OutpostArn. If you are allocating the Dedicated Hosts in a Region, omit this parameter. If you specify this parameter, you can omit Quantity. In this case, Amazon EC2 allocates a Dedicated Host on each specified hardware asset. If you specify both AssetIds and Quantity, then the value for Quantity must be equal to the number of asset IDs specified. @OptionalCustomCoding> public var assetIds: [String]? - /// Indicates whether the host accepts any untargeted instance launches that match its instance type configuration, or if it only accepts Host tenancy instance launches that specify its unique host ID. For more information, see Understanding auto-placement and affinity in the Amazon EC2 User Guide. Default: on + /// Indicates whether the host accepts any untargeted instance launches that match its instance type configuration, or if it only accepts Host tenancy instance launches that specify its unique host ID. For more information, see Understanding auto-placement and affinity in the Amazon EC2 User Guide. Default: off public let autoPlacement: AutoPlacement? /// The Availability Zone in which to allocate the Dedicated Host. public let availabilityZone: String? @@ -4269,7 +4300,7 @@ extension EC2 { public var allowedCidrs: [String]? /// The CIDR you would like to allocate from the IPAM pool. Note the following: If there is no DefaultNetmaskLength allocation rule set on the pool, you must specify either the NetmaskLength or the CIDR. If the DefaultNetmaskLength allocation rule is set on the pool, you can specify either the NetmaskLength or the CIDR and the DefaultNetmaskLength allocation rule will be ignored. Possible values: Any available IPv4 or IPv6 CIDR. public let cidr: String? - /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the allocation. public let description: String? @@ -4932,7 +4963,8 @@ extension EC2 { } public struct AssociateClientVpnTargetNetworkRequest: AWSEncodableShape { - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. + /// For more information, see Ensuring idempotency. public let clientToken: String? /// The ID of the Client VPN endpoint. public let clientVpnEndpointId: String? @@ -5428,7 +5460,7 @@ extension EC2 { public struct AssociateTrunkInterfaceRequest: AWSEncodableShape { /// The ID of the branch network interface. public let branchInterfaceId: String? - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -5459,7 +5491,7 @@ extension EC2 { } public struct AssociateTrunkInterfaceResult: AWSDecodableShape { - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// Information about the association between the trunk network interface and branch network interface. public let interfaceAssociation: TrunkInterfaceAssociation? @@ -5744,7 +5776,7 @@ extension EC2 { } public struct AttachVerifiedAccessTrustProviderRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -5939,7 +5971,8 @@ extension EC2 { public let accessGroupId: String? /// Indicates whether to grant access to all clients. Specify true to grant all clients who successfully establish a VPN connection access to the network. Must be set to true if AccessGroupId is not specified. public let authorizeAllGroups: Bool? - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. + /// For more information, see Ensuring idempotency. public let clientToken: String? /// The ID of the Client VPN endpoint. public let clientVpnEndpointId: String? @@ -7068,9 +7101,8 @@ extension EC2 { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "item" } /// The strategy used by the Capacity Reservation Fleet to determine which of the specified - /// instance types to use. For more information, see For more information, see - /// - /// Allocation strategy in the Amazon EC2 User Guide. + /// instance types to use. For more information, see For more information, see Allocation + /// strategy in the Amazon EC2 User Guide. public let allocationStrategy: String? /// The ARN of the Capacity Reservation Fleet. public let capacityReservationFleetArn: String? @@ -7115,9 +7147,9 @@ extension EC2 { public let tenancy: FleetCapacityReservationTenancy? /// The capacity units that have been fulfilled. public let totalFulfilledCapacity: Double? - /// The total number of capacity units for which the Capacity Reservation Fleet reserves capacity. - /// For more information, see Total target capacity - /// in the Amazon EC2 User Guide. + /// The total number of capacity units for which the Capacity Reservation Fleet reserves + /// capacity. For more information, see Total target + /// capacity in the Amazon EC2 User Guide. public let totalTargetCapacity: Int? public init(allocationStrategy: String? = nil, capacityReservationFleetArn: String? = nil, capacityReservationFleetId: String? = nil, createTime: Date? = nil, endDate: Date? = nil, instanceMatchCriteria: FleetInstanceMatchCriteria? = nil, instanceTypeSpecifications: [FleetCapacityReservation]? = nil, state: CapacityReservationFleetState? = nil, tags: [Tag]? = nil, tenancy: FleetCapacityReservationTenancy? = nil, totalFulfilledCapacity: Double? = nil, totalTargetCapacity: Int? = nil) { @@ -8359,7 +8391,7 @@ extension EC2 { } public struct CopyFpgaImageRequest: AWSEncodableShape { - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// The description for the new AFI. public let description: String? @@ -8492,9 +8524,9 @@ extension EC2 { public let dryRun: Bool? /// To encrypt a copy of an unencrypted snapshot if encryption by default is not enabled, enable encryption using this parameter. Otherwise, omit this parameter. Encrypted snapshots are encrypted, even if you omit this parameter and encryption by default is not enabled. You cannot set this parameter to false. For more information, see Amazon EBS encryption in the Amazon EBS User Guide. public let encrypted: Bool? - /// The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true. You can specify the KMS key using any of the following: Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, alias/ExampleAlias. Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails. + /// The identifier of the KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true. You can specify the KMS key using any of the following: Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, alias/ExampleAlias. Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails. public let kmsKeyId: String? - /// When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query requests. The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using Amazon Web Services Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state. + /// When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query requests. The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using Amazon Web Services Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) in the Amazon S3 API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state. public let presignedUrl: String? /// The ID of the Region that contains the snapshot to be copied. public let sourceRegion: String? @@ -8596,10 +8628,10 @@ extension EC2 { public struct CreateCapacityReservationFleetRequest: AWSEncodableShape { public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// The strategy used by the Capacity Reservation Fleet to determine which of the - /// specified instance types to use. Currently, only the prioritized - /// allocation strategy is supported. For more information, see - /// Allocation strategy in the Amazon EC2 User Guide. Valid values: prioritized + /// The strategy used by the Capacity Reservation Fleet to determine which of the specified + /// instance types to use. Currently, only the prioritized allocation strategy + /// is supported. For more information, see Allocation + /// strategy in the Amazon EC2 User Guide. Valid values: prioritized public let allocationStrategy: String? /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency. public let clientToken: String? @@ -8633,10 +8665,11 @@ extension EC2 { /// hardware that is dedicated to a single Amazon Web Services account. public let tenancy: FleetCapacityReservationTenancy? /// The total number of capacity units to be reserved by the Capacity Reservation Fleet. This - /// value, together with the instance type weights that you assign to each instance type used by - /// the Fleet determine the number of instances for which the Fleet reserves capacity. Both values - /// are based on units that make sense for your workload. For more information, see - /// Total target capacity in the Amazon EC2 User Guide. + /// value, together with the instance type weights that you assign to each instance type + /// used by the Fleet determine the number of instances for which the Fleet reserves + /// capacity. Both values are based on units that make sense for your workload. For more + /// information, see Total target + /// capacity in the Amazon EC2 User Guide. public let totalTargetCapacity: Int? public init(allocationStrategy: String? = nil, clientToken: String? = CreateCapacityReservationFleetRequest.idempotencyToken(), dryRun: Bool? = nil, endDate: Date? = nil, instanceMatchCriteria: FleetInstanceMatchCriteria? = nil, instanceTypeSpecifications: [ReservationFleetInstanceSpecification]? = nil, tagSpecifications: [TagSpecification]? = nil, tenancy: FleetCapacityReservationTenancy? = nil, totalTargetCapacity: Int? = nil) { @@ -8900,7 +8933,8 @@ extension EC2 { /// Options for enabling a customizable text banner that will be displayed on /// Amazon Web Services provided clients when a VPN session is established. public let clientLoginBannerOptions: ClientLoginBannerOptions? - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. + /// For more information, see Ensuring idempotency. public let clientToken: String? /// Information about the client connection logging options. If you enable client connection logging, data about client connections is sent to a /// Cloudwatch Logs log stream. The following information is logged: Client connection requests Client connection results (successful and unsuccessful) Reasons for unsuccessful client connection requests Client connection termination time @@ -9001,7 +9035,8 @@ extension EC2 { } public struct CreateClientVpnRouteRequest: AWSEncodableShape { - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. + /// For more information, see Ensuring idempotency. public let clientToken: String? /// The ID of the Client VPN endpoint to which to add the route. public let clientVpnEndpointId: String? @@ -9121,15 +9156,17 @@ extension EC2 { public struct CreateCustomerGatewayRequest: AWSEncodableShape { public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// For devices that support BGP, the customer gateway's BGP ASN. Default: 65000 + /// For customer gateway devices that support BGP, specify the device's ASN. You must specify either BgpAsn or BgpAsnExtended when creating the customer gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended. Default: 65000 Valid values: 1 to 2,147,483,647 public let bgpAsn: Int? + /// For customer gateway devices that support BGP, specify the device's ASN. You must specify either BgpAsn or BgpAsnExtended when creating the customer gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended. Valid values: 2,147,483,648 to 4,294,967,295 + public let bgpAsnExtended: Int64? /// The Amazon Resource Name (ARN) for the customer gateway certificate. public let certificateArn: String? /// A name for the customer gateway device. Length Constraints: Up to 255 characters. public let deviceName: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// IPv4 address for the customer gateway device's outside interface. The address must be static. + /// IPv4 address for the customer gateway device's outside interface. The address must be static. If OutsideIpAddressType in your VPN connection options is set to PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. If OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address. public let ipAddress: String? /// This member has been deprecated. The Internet-routable IP address for the customer gateway's outside interface. The address must be static. public let publicIp: String? @@ -9139,8 +9176,9 @@ extension EC2 { /// The type of VPN connection that this customer gateway supports (ipsec.1). public let type: GatewayType? - public init(bgpAsn: Int? = nil, certificateArn: String? = nil, deviceName: String? = nil, dryRun: Bool? = nil, ipAddress: String? = nil, publicIp: String? = nil, tagSpecifications: [TagSpecification]? = nil, type: GatewayType? = nil) { + public init(bgpAsn: Int? = nil, bgpAsnExtended: Int64? = nil, certificateArn: String? = nil, deviceName: String? = nil, dryRun: Bool? = nil, ipAddress: String? = nil, publicIp: String? = nil, tagSpecifications: [TagSpecification]? = nil, type: GatewayType? = nil) { self.bgpAsn = bgpAsn + self.bgpAsnExtended = bgpAsnExtended self.certificateArn = certificateArn self.deviceName = deviceName self.dryRun = dryRun @@ -9152,6 +9190,7 @@ extension EC2 { private enum CodingKeys: String, CodingKey { case bgpAsn = "BgpAsn" + case bgpAsnExtended = "BgpAsnExtended" case certificateArn = "CertificateArn" case deviceName = "DeviceName" case dryRun = "dryRun" @@ -9580,7 +9619,7 @@ extension EC2 { public struct CreateFpgaImageRequest: AWSEncodableShape { public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency. public let clientToken: String? /// A description for the AFI. public let description: String? @@ -9899,7 +9938,7 @@ extension EC2 { public let autoImport: Bool? /// Limits which service in Amazon Web Services that the pool can be used in. "ec2", for example, allows users to use space for Elastic IP addresses and VPCs. public let awsService: IpamPoolAwsService? - /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the IPAM pool. public let description: String? @@ -9987,7 +10026,7 @@ extension EC2 { public struct CreateIpamRequest: AWSEncodableShape { public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the IPAM. public let description: String? @@ -10091,7 +10130,7 @@ extension EC2 { public struct CreateIpamScopeRequest: AWSEncodableShape { public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the scope you're creating. public let description: String? @@ -10237,7 +10276,7 @@ extension EC2 { public let launchTemplateId: String? /// The name of the launch template. You must specify either the launch template ID or the launch template name, but not both. public let launchTemplateName: String? - /// If true, and if a Systems Manager parameter is specified for ImageId, the AMI ID is displayed in the response for imageID. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide. Default: false + /// If true, and if a Systems Manager parameter is specified for ImageId, the AMI ID is displayed in the response for imageID. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. Default: false public let resolveAlias: Bool? /// The version of the launch template on which to base the new version. Snapshots applied to the block device mapping are ignored when creating a new version unless they are explicitly included. If you specify this parameter, the new version inherits the launch parameters from the source version. If you specify additional launch parameters for the new version, they overwrite any corresponding launch parameters inherited from the source version. If you omit this parameter, the new version contains only the launch parameters that you specify for the new version. public let sourceVersion: String? @@ -10466,7 +10505,7 @@ extension EC2 { /// The IP address type. Valid Values: IPv4 | IPv6 public let addressFamily: String? - /// Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency. Constraints: Up to 255 UTF-8 characters in length. + /// Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. Constraints: Up to 255 UTF-8 characters in length. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -10540,7 +10579,7 @@ extension EC2 { public var secondaryAllocationIds: [String]? /// [Private NAT gateway only] The number of secondary private IPv4 addresses you want to assign to the NAT gateway. For more information about secondary addresses, see Create a NAT gateway in the Amazon VPC User Guide. public let secondaryPrivateIpAddressCount: Int? - /// Secondary private IPv4 addresses. For more information about secondary addresses, see Create a NAT gateway in the Amazon VPC User Guide. + /// Secondary private IPv4 addresses. For more information about secondary addresses, see Create a NAT gateway in the Amazon VPC User Guide. @OptionalCustomCoding> public var secondaryPrivateIpAddresses: [String]? /// The ID of the subnet in which to create the NAT gateway. @@ -10884,7 +10923,7 @@ extension EC2 { public struct _PrivateIpAddressesEncoding: ArrayCoderProperties { public static let member = "item" } public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// A connection tracking specification for the network interface. public let connectionTrackingSpecification: ConnectionTrackingSpecificationRequest? @@ -11612,7 +11651,7 @@ extension EC2 { public struct CreateSubnetRequest: AWSEncodableShape { public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// The Availability Zone or Local Zone for the subnet. Default: Amazon Web Services selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet. To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about the Regions that support Local Zones, see Local Zones locations. To create a subnet in an Outpost, set this value to the Availability Zone for the Outpost and specify the Outpost ARN. + /// The Availability Zone or Local Zone for the subnet. Default: Amazon Web Services selects one for you. If you create more than one subnet in your VPC, we do not necessarily select a different zone for each subnet. To create a subnet in a Local Zone, set this value to the Local Zone ID, for example us-west-2-lax-1a. For information about the Regions that support Local Zones, see Available Local Zones. To create a subnet in an Outpost, set this value to the Availability Zone for the Outpost and specify the Outpost ARN. public let availabilityZone: String? /// The AZ ID or the Local Zone ID of the subnet. public let availabilityZoneId: String? @@ -11757,6 +11796,8 @@ extension EC2 { } public struct CreateTrafficMirrorFilterRuleRequest: AWSEncodableShape { + public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency. public let clientToken: String? /// The description of the Traffic Mirror rule. @@ -11777,12 +11818,15 @@ extension EC2 { public let sourceCidrBlock: String? /// The source port range. public let sourcePortRange: TrafficMirrorPortRangeRequest? + /// Traffic Mirroring tags specifications. + @OptionalCustomCoding> + public var tagSpecifications: [TagSpecification]? /// The type of traffic. public let trafficDirection: TrafficDirection? /// The ID of the filter that this rule is associated with. public let trafficMirrorFilterId: String? - public init(clientToken: String? = CreateTrafficMirrorFilterRuleRequest.idempotencyToken(), description: String? = nil, destinationCidrBlock: String? = nil, destinationPortRange: TrafficMirrorPortRangeRequest? = nil, dryRun: Bool? = nil, protocol: Int? = nil, ruleAction: TrafficMirrorRuleAction? = nil, ruleNumber: Int? = nil, sourceCidrBlock: String? = nil, sourcePortRange: TrafficMirrorPortRangeRequest? = nil, trafficDirection: TrafficDirection? = nil, trafficMirrorFilterId: String? = nil) { + public init(clientToken: String? = CreateTrafficMirrorFilterRuleRequest.idempotencyToken(), description: String? = nil, destinationCidrBlock: String? = nil, destinationPortRange: TrafficMirrorPortRangeRequest? = nil, dryRun: Bool? = nil, protocol: Int? = nil, ruleAction: TrafficMirrorRuleAction? = nil, ruleNumber: Int? = nil, sourceCidrBlock: String? = nil, sourcePortRange: TrafficMirrorPortRangeRequest? = nil, tagSpecifications: [TagSpecification]? = nil, trafficDirection: TrafficDirection? = nil, trafficMirrorFilterId: String? = nil) { self.clientToken = clientToken self.description = description self.destinationCidrBlock = destinationCidrBlock @@ -11793,6 +11837,7 @@ extension EC2 { self.ruleNumber = ruleNumber self.sourceCidrBlock = sourceCidrBlock self.sourcePortRange = sourcePortRange + self.tagSpecifications = tagSpecifications self.trafficDirection = trafficDirection self.trafficMirrorFilterId = trafficMirrorFilterId } @@ -11808,6 +11853,7 @@ extension EC2 { case ruleNumber = "RuleNumber" case sourceCidrBlock = "SourceCidrBlock" case sourcePortRange = "SourcePortRange" + case tagSpecifications = "TagSpecification" case trafficDirection = "TrafficDirection" case trafficMirrorFilterId = "TrafficMirrorFilterId" } @@ -11852,7 +11898,7 @@ extension EC2 { public let trafficMirrorFilterId: String? /// The ID of the Traffic Mirror target. public let trafficMirrorTargetId: String? - /// The VXLAN ID for the Traffic Mirror session. For more information about the VXLAN protocol, see RFC 7348. If you do not specify a VirtualNetworkId, an account-wide unique id is chosen at random. + /// The VXLAN ID for the Traffic Mirror session. For more information about the VXLAN protocol, see RFC 7348. If you do not specify a VirtualNetworkId, an account-wide unique ID is chosen at random. public let virtualNetworkId: Int? public init(clientToken: String? = CreateTrafficMirrorSessionRequest.idempotencyToken(), description: String? = nil, dryRun: Bool? = nil, networkInterfaceId: String? = nil, packetLength: Int? = nil, sessionNumber: Int? = nil, tagSpecifications: [TagSpecification]? = nil, trafficMirrorFilterId: String? = nil, trafficMirrorTargetId: String? = nil, virtualNetworkId: Int? = nil) { @@ -12576,7 +12622,7 @@ extension EC2 { public let applicationDomain: String? /// The type of attachment. public let attachmentType: VerifiedAccessEndpointAttachmentType? - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the Verified Access endpoint. public let description: String? @@ -12663,7 +12709,7 @@ extension EC2 { public struct CreateVerifiedAccessGroupRequest: AWSEncodableShape { public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the Verified Access group. public let description: String? @@ -12716,7 +12762,7 @@ extension EC2 { public struct CreateVerifiedAccessInstanceRequest: AWSEncodableShape { public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the Verified Access instance. public let description: String? @@ -12815,7 +12861,7 @@ extension EC2 { public struct CreateVerifiedAccessTrustProviderRequest: AWSEncodableShape { public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the Verified Access trust provider. public let description: String? @@ -12936,9 +12982,9 @@ extension EC2 { /// instances /// built on the Nitro System. On other instances, you can achieve performance up to 32,000 IOPS. This parameter is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for gp2, st1, sc1, or standard volumes. public let iops: Int? - /// The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true. You can specify the KMS key using any of the following: Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, alias/ExampleAlias. Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails. + /// The identifier of the KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true. You can specify the KMS key using any of the following: Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, alias/ExampleAlias. Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails. public let kmsKeyId: String? - /// Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the volume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is supported with io1 and io2 volumes only. For more information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide. + /// Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the volume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is supported with io1 and io2 volumes only. For more information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide. public let multiAttachEnabled: Bool? /// The Amazon Resource Name (ARN) of the Outpost. public let outpostArn: String? @@ -13469,15 +13515,17 @@ extension EC2 { public struct CustomerGateway: AWSDecodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "item" } - /// The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN). + /// The customer gateway device's Border Gateway Protocol (BGP) Autonomous System Number (ASN). Valid values: 1 to 2,147,483,647 public let bgpAsn: String? + /// The customer gateway device's Border Gateway Protocol (BGP) Autonomous System Number (ASN). Valid values: 2,147,483,648 to 4,294,967,295 + public let bgpAsnExtended: String? /// The Amazon Resource Name (ARN) for the customer gateway certificate. public let certificateArn: String? /// The ID of the customer gateway. public let customerGatewayId: String? /// The name of customer gateway device. public let deviceName: String? - /// The IP address of the customer gateway device's outside interface. + /// IPv4 address for the customer gateway device's outside interface. The address must be static. If OutsideIpAddressType in your VPN connection options is set to PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. If OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address. public let ipAddress: String? /// The current state of the customer gateway (pending | available | deleting | deleted). public let state: String? @@ -13487,8 +13535,9 @@ extension EC2 { /// The type of VPN connection the customer gateway supports (ipsec.1). public let type: String? - public init(bgpAsn: String? = nil, certificateArn: String? = nil, customerGatewayId: String? = nil, deviceName: String? = nil, ipAddress: String? = nil, state: String? = nil, tags: [Tag]? = nil, type: String? = nil) { + public init(bgpAsn: String? = nil, bgpAsnExtended: String? = nil, certificateArn: String? = nil, customerGatewayId: String? = nil, deviceName: String? = nil, ipAddress: String? = nil, state: String? = nil, tags: [Tag]? = nil, type: String? = nil) { self.bgpAsn = bgpAsn + self.bgpAsnExtended = bgpAsnExtended self.certificateArn = certificateArn self.customerGatewayId = customerGatewayId self.deviceName = deviceName @@ -13500,6 +13549,7 @@ extension EC2 { private enum CodingKeys: String, CodingKey { case bgpAsn = "bgpAsn" + case bgpAsnExtended = "bgpAsnExtended" case certificateArn = "certificateArn" case customerGatewayId = "customerGatewayId" case deviceName = "deviceName" @@ -15506,7 +15556,7 @@ extension EC2 { } public struct DeleteVerifiedAccessEndpointRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -15540,7 +15590,7 @@ extension EC2 { } public struct DeleteVerifiedAccessGroupRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -15574,7 +15624,7 @@ extension EC2 { } public struct DeleteVerifiedAccessInstanceRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -15608,7 +15658,7 @@ extension EC2 { } public struct DeleteVerifiedAccessTrustProviderRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -17328,7 +17378,7 @@ extension EC2 { public struct _DhcpOptionsIdsEncoding: ArrayCoderProperties { public static let member = "DhcpOptionsId" } public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } - /// The IDs of one or more DHCP options sets. Default: Describes all your DHCP options sets. + /// The IDs of DHCP option sets. @OptionalCustomCoding> public var dhcpOptionsIds: [String]? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -17368,7 +17418,7 @@ extension EC2 { public struct DescribeDhcpOptionsResult: AWSDecodableShape { public struct _DhcpOptionsEncoding: ArrayCoderProperties { public static let member = "item" } - /// Information about one or more DHCP options sets. + /// Information about the DHCP options sets. @OptionalCustomCoding> public var dhcpOptions: [DhcpOptions]? /// The token to include in another request to get the next page of items. This value is null when there are no more items to return. @@ -19058,7 +19108,7 @@ extension EC2 { /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// One or more filters. Filter names and values are case-sensitive. instance-type - The instance type. For a list of possible values, see Instance. location - The location. For a list of possible identifiers, see Regions and Zones. + /// One or more filters. Filter names and values are case-sensitive. instance-type - The instance type. For a list of possible values, see Instance. location - The location. For a list of possible identifiers, see Regions and Zones. @OptionalCustomCoding> public var filters: [Filter]? /// The location type. availability-zone - The Availability Zone. When you specify a location filter, it must be an Availability Zone for the current Region. availability-zone-id - The AZ ID. When you specify a location filter, it must be an AZ ID for the current Region. outpost - The Outpost ARN. When you specify a location filter, it must be an Outpost ARN for the current Region. region - The current Region. If you specify a location filter, it must match the current Region. @@ -19116,7 +19166,7 @@ extension EC2 { /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// One or more filters. Filter names and values are case-sensitive. auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false). bare-metal - Indicates whether it is a bare metal instance type (true | false). burstable-performance-supported - Indicates whether the instance type is a burstable performance T instance type (true | false). current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default). ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported). ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported). free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false). hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false). hypervisor - The hypervisor (nitro | xen). instance-storage-info.disk.count - The number of local disks. instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB. instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd). instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported). instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported). instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB. instance-storage-supported - Indicates whether the instance type has local instance storage (true | false). instance-type - The instance type (for example c5.2xlarge or c5*). memory-info.size-in-mib - The memory size. network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance. network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false). network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported). network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false). network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface. network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface. network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false). network-info.maximum-network-cards - The maximum number of network cards per instance. network-info.maximum-network-interfaces - The maximum number of network interfaces per instance. network-info.network-performance - The network performance (for example, "25 Gigabit"). nitro-enclaves-support - Indicates whether Nitro Enclaves is supported (supported | unsupported). nitro-tpm-support - Indicates whether NitroTPM is supported (supported | unsupported). nitro-tpm-info.supported-versions - The supported NitroTPM version (2.0). processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz. processor-info.supported-features - The supported CPU features (amd-sev-snp). supported-boot-mode - The boot mode (legacy-bios | uefi). supported-root-device-type - The root device type (ebs | instance-store). supported-usage-class - The usage class (on-demand | spot). supported-virtualization-type - The virtualization type (hvm | paravirtual). vcpu-info.default-cores - The default number of cores for the instance type. vcpu-info.default-threads-per-core - The default number of threads per core for the instance type. vcpu-info.default-vcpus - The default number of vCPUs for the instance type. vcpu-info.valid-cores - The number of cores that can be configured for the instance type. vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, "1" or "1,2". + /// One or more filters. Filter names and values are case-sensitive. auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false). bare-metal - Indicates whether it is a bare metal instance type (true | false). burstable-performance-supported - Indicates whether the instance type is a burstable performance T instance type (true | false). current-generation - Indicates whether this instance type is the latest generation instance type of an instance family (true | false). ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth performance for an EBS-optimized instance type, in Mbps. ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage operations per second for an EBS-optimized instance type. ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput performance for an EBS-optimized instance type, in MB/s. ebs-info.ebs-optimized-support - Indicates whether the instance type is EBS-optimized (supported | unsupported | default). ebs-info.encryption-support - Indicates whether EBS encryption is supported (supported | unsupported). ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for EBS volumes (required | supported | unsupported). free-tier-eligible - Indicates whether the instance type is eligible to use in the free tier (true | false). hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false). hypervisor - The hypervisor (nitro | xen). instance-storage-info.disk.count - The number of local disks. instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in GB. instance-storage-info.disk.type - The storage technology for the local instance storage disks (hdd | ssd). instance-storage-info.encryption-support - Indicates whether data is encrypted at rest (required | supported | unsupported). instance-storage-info.nvme-support - Indicates whether non-volatile memory express (NVMe) is supported for instance store (required | supported | unsupported). instance-storage-info.total-size-in-gb - The total amount of storage available from all local instance storage, in GB. instance-storage-supported - Indicates whether the instance type has local instance storage (true | false). instance-type - The instance type (for example c5.2xlarge or c5*). memory-info.size-in-mib - The memory size. network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic Fabric Adapters (EFAs) per instance. network-info.efa-supported - Indicates whether the instance type supports Elastic Fabric Adapter (EFA) (true | false). network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is supported or required (required | supported | unsupported). network-info.encryption-in-transit-supported - Indicates whether the instance type automatically encrypts in-transit traffic between instances (true | false). network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per network interface. network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per network interface. network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false). network-info.maximum-network-cards - The maximum number of network cards per instance. network-info.maximum-network-interfaces - The maximum number of network interfaces per instance. network-info.network-performance - The network performance (for example, "25 Gigabit"). nitro-enclaves-support - Indicates whether Nitro Enclaves is supported (supported | unsupported). nitro-tpm-support - Indicates whether NitroTPM is supported (supported | unsupported). nitro-tpm-info.supported-versions - The supported NitroTPM version (2.0). processor-info.supported-architecture - The CPU architecture (arm64 | i386 | x86_64). processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz. processor-info.supported-features - The supported CPU features (amd-sev-snp). supported-boot-mode - The boot mode (legacy-bios | uefi). supported-root-device-type - The root device type (ebs | instance-store). supported-usage-class - The usage class (on-demand | spot). supported-virtualization-type - The virtualization type (hvm | paravirtual). vcpu-info.default-cores - The default number of cores for the instance type. vcpu-info.default-threads-per-core - The default number of threads per core for the instance type. vcpu-info.default-vcpus - The default number of vCPUs for the instance type. vcpu-info.valid-cores - The number of cores that can be configured for the instance type. vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type. For example, "1" or "1,2". @OptionalCustomCoding> public var filters: [Filter]? /// The instance types. @@ -19177,7 +19227,7 @@ extension EC2 { /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. iam-instance-profile.name - The instance profile associated with the instance. Specified as an name. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. iam-instance-profile.name - The instance profile associated with the instance. Specified as an name. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. @OptionalCustomCoding> public var filters: [Filter]? /// The instance IDs. Default: Describes all your instances. @@ -19270,7 +19320,7 @@ extension EC2 { public struct DescribeInternetGatewaysResult: AWSDecodableShape { public struct _InternetGatewaysEncoding: ArrayCoderProperties { public static let member = "item" } - /// Information about one or more internet gateways. + /// Information about the internet gateways. @OptionalCustomCoding> public var internetGateways: [InternetGateway]? /// The token to include in another request to get the next page of items. This value is null when there are no more items to return. @@ -19762,7 +19812,7 @@ extension EC2 { public let minVersion: String? /// The token to request the next page of results. public let nextToken: String? - /// If true, and if a Systems Manager parameter is specified for ImageId, the AMI ID is displayed in the response for imageId. If false, and if a Systems Manager parameter is specified for ImageId, the parameter is displayed in the response for imageId. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide. Default: false + /// If true, and if a Systems Manager parameter is specified for ImageId, the AMI ID is displayed in the response for imageId. If false, and if a Systems Manager parameter is specified for ImageId, the parameter is displayed in the response for imageId. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. Default: false public let resolveAlias: Bool? /// One or more versions of the launch template. Valid values depend on whether you are describing a specified launch template (by ID or name) or all launch templates in your account. To describe one or more versions of a specified launch template, valid values are $Latest, $Default, and numbers. To describe all launch templates in your account that are defined as the latest version, the valid value is $Latest. To describe all launch templates in your account that are defined as the default version, the valid value is $Default. You can specify $Latest and $Default in the same request. You cannot specify numbers. @OptionalCustomCoding> @@ -20561,7 +20611,7 @@ extension EC2 { /// To get the next page of items, make another request with the token returned in the output. /// For more information, see Pagination. public let maxResults: Int? - /// The IDs of the network ACLs. Default: Describes all your network ACLs. + /// The IDs of the network ACLs. @OptionalCustomCoding> public var networkAclIds: [String]? /// The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -20592,7 +20642,7 @@ extension EC2 { public struct DescribeNetworkAclsResult: AWSDecodableShape { public struct _NetworkAclsEncoding: ArrayCoderProperties { public static let member = "item" } - /// Information about one or more network ACLs. + /// Information about the network ACLs. @OptionalCustomCoding> public var networkAcls: [NetworkAcl]? /// The token to include in another request to get the next page of items. This value is null when there are no more items to return. @@ -21491,7 +21541,7 @@ extension EC2 { public let includeMarketplace: Bool? /// The tenancy of the instances covered by the reservation. A Reserved Instance with a tenancy of dedicated is applied to instances that run in a VPC on single-tenant hardware (i.e., Dedicated Instances). Important: The host value cannot be used with this parameter. Use the default or dedicated values only. Default: default public let instanceTenancy: Tenancy? - /// The instance type that the reservation will cover (for example, m1.small). For more information, see Instance types in the Amazon EC2 User Guide. + /// The instance type that the reservation will cover (for example, m1.small). For more information, see Amazon EC2 instance types in the Amazon EC2 User Guide. public let instanceType: InstanceType? /// The maximum duration (in seconds) to filter when searching for offerings. Default: 94608000 (3 years) public let maxDuration: Int64? @@ -21644,7 +21694,7 @@ extension EC2 { public let maxResults: Int? /// The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. public let nextToken: String? - /// The IDs of the route tables. Default: Describes all your route tables. + /// The IDs of the route tables. @OptionalCustomCoding> public var routeTableIds: [String]? @@ -21675,7 +21725,7 @@ extension EC2 { /// The token to include in another request to get the next page of items. This value is null when there are no more items to return. public let nextToken: String? - /// Information about one or more route tables. + /// Information about the route tables. @OptionalCustomCoding> public var routeTables: [RouteTable]? @@ -22086,7 +22136,9 @@ extension EC2 { /// The filters. description - A description of the snapshot. encrypted - Indicates whether the snapshot is encrypted (true | false) owner-alias - The owner alias, from an Amazon-maintained list (amazon). This is not the user-configured Amazon Web Services account alias set using the IAM console. We recommend that you use the related parameter instead of this filter. owner-id - The Amazon Web Services account ID of the owner. We recommend that you use the related parameter instead of this filter. progress - The progress of the snapshot, as a percentage (for example, 80%). snapshot-id - The snapshot ID. start-time - The time stamp when the snapshot was initiated. status - The status of the snapshot (pending | completed | error). storage-tier - The storage tier of the snapshot (archive | standard). tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. volume-id - The ID of the volume the snapshot is for. volume-size - The size of the volume, in GiB. @OptionalCustomCoding> public var filters: [Filter]? - /// The maximum number of snapshots to return for this request. This value can be between 5 and 1,000; if this value is larger than 1,000, only 1,000 results are returned. If this parameter is not used, then the request returns all snapshots. You cannot specify this parameter and the snapshot IDs parameter in the same request. For more information, see Pagination. + /// The maximum number of items to return for this request. + /// To get the next page of items, make another request with the token returned in the output. + /// For more information, see Pagination. public let maxResults: Int? /// The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. public let nextToken: String? @@ -22124,7 +22176,7 @@ extension EC2 { public struct DescribeSnapshotsResult: AWSDecodableShape { public struct _SnapshotsEncoding: ArrayCoderProperties { public static let member = "item" } - /// The token to include in another request to return the next page of snapshots. This value is null when there are no more snapshots to return. + /// The token to include in another request to get the next page of items. This value is null when there are no more items to return. public let nextToken: String? /// Information about the snapshots. @OptionalCustomCoding> @@ -22348,7 +22400,7 @@ extension EC2 { /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The filters. availability-zone-group - The Availability Zone group. create-time - The time stamp when the Spot Instance request was created. fault-code - The fault code related to the request. fault-message - The fault message related to the request. instance-id - The ID of the instance that fulfilled the request. launch-group - The Spot Instance launch group. launch.block-device-mapping.delete-on-termination - Indicates whether the EBS volume is deleted on instance termination. launch.block-device-mapping.device-name - The device name for the volume in the block device mapping (for example, /dev/sdh or xvdh). launch.block-device-mapping.snapshot-id - The ID of the snapshot for the EBS volume. launch.block-device-mapping.volume-size - The size of the EBS volume, in GiB. launch.block-device-mapping.volume-type - The type of EBS volume: gp2 or gp3 for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic. launch.group-id - The ID of the security group for the instance. launch.group-name - The name of the security group for the instance. launch.image-id - The ID of the AMI. launch.instance-type - The type of instance (for example, m3.medium). launch.kernel-id - The kernel ID. launch.key-name - The name of the key pair the instance launched with. launch.monitoring-enabled - Whether detailed monitoring is enabled for the Spot Instance. launch.ramdisk-id - The RAM disk ID. launched-availability-zone - The Availability Zone in which the request is launched. network-interface.addresses.primary - Indicates whether the IP address is the primary private IP address. network-interface.delete-on-termination - Indicates whether the network interface is deleted when the instance is terminated. network-interface.description - A description of the network interface. network-interface.device-index - The index of the device for the network interface attachment on the instance. network-interface.group-id - The ID of the security group associated with the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.private-ip-address - The primary private IP address of the network interface. network-interface.subnet-id - The ID of the subnet for the instance. product-description - The product description associated with the instance (Linux/UNIX | Windows). spot-instance-request-id - The Spot Instance request ID. spot-price - The maximum hourly price for any Spot Instance launched to fulfill the request. state - The state of the Spot Instance request (open | active | closed | cancelled | failed). Spot request status information can help you track your Amazon EC2 Spot Instance requests. For more information, see Spot request status in the Amazon EC2 User Guide for Linux Instances. status-code - The short code describing the most recent evaluation of your Spot Instance request. status-message - The message explaining the status of the Spot Instance request. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. type - The type of Spot Instance request (one-time | persistent). valid-from - The start date of the request. valid-until - The end date of the request. + /// The filters. availability-zone-group - The Availability Zone group. create-time - The time stamp when the Spot Instance request was created. fault-code - The fault code related to the request. fault-message - The fault message related to the request. instance-id - The ID of the instance that fulfilled the request. launch-group - The Spot Instance launch group. launch.block-device-mapping.delete-on-termination - Indicates whether the EBS volume is deleted on instance termination. launch.block-device-mapping.device-name - The device name for the volume in the block device mapping (for example, /dev/sdh or xvdh). launch.block-device-mapping.snapshot-id - The ID of the snapshot for the EBS volume. launch.block-device-mapping.volume-size - The size of the EBS volume, in GiB. launch.block-device-mapping.volume-type - The type of EBS volume: gp2 or gp3 for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic. launch.group-id - The ID of the security group for the instance. launch.group-name - The name of the security group for the instance. launch.image-id - The ID of the AMI. launch.instance-type - The type of instance (for example, m3.medium). launch.kernel-id - The kernel ID. launch.key-name - The name of the key pair the instance launched with. launch.monitoring-enabled - Whether detailed monitoring is enabled for the Spot Instance. launch.ramdisk-id - The RAM disk ID. launched-availability-zone - The Availability Zone in which the request is launched. network-interface.addresses.primary - Indicates whether the IP address is the primary private IP address. network-interface.delete-on-termination - Indicates whether the network interface is deleted when the instance is terminated. network-interface.description - A description of the network interface. network-interface.device-index - The index of the device for the network interface attachment on the instance. network-interface.group-id - The ID of the security group associated with the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.private-ip-address - The primary private IP address of the network interface. network-interface.subnet-id - The ID of the subnet for the instance. product-description - The product description associated with the instance (Linux/UNIX | Windows). spot-instance-request-id - The Spot Instance request ID. spot-price - The maximum hourly price for any Spot Instance launched to fulfill the request. state - The state of the Spot Instance request (open | active | closed | cancelled | failed). Spot request status information can help you track your Amazon EC2 Spot Instance requests. For more information, see Spot request status in the Amazon EC2 User Guide. status-code - The short code describing the most recent evaluation of your Spot Instance request. status-message - The message explaining the status of the Spot Instance request. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. type - The type of Spot Instance request (one-time | persistent). valid-from - The start date of the request. valid-until - The end date of the request. @OptionalCustomCoding> public var filters: [Filter]? /// The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. @@ -22628,7 +22680,7 @@ extension EC2 { /// The token to include in another request to get the next page of items. This value is null when there are no more items to return. public let nextToken: String? - /// Information about one or more subnets. + /// Information about the subnets. @OptionalCustomCoding> public var subnets: [Subnet]? @@ -22691,6 +22743,70 @@ extension EC2 { } } + public struct DescribeTrafficMirrorFilterRulesRequest: AWSEncodableShape { + public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } + public struct _TrafficMirrorFilterRuleIdsEncoding: ArrayCoderProperties { public static let member = "item" } + + /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// Traffic mirror filters. traffic-mirror-filter-rule-id: The ID of the Traffic Mirror rule. traffic-mirror-filter-id: The ID of the filter that this rule is associated with. rule-number: The number of the Traffic Mirror rule. rule-action: The action taken on the filtered traffic. Possible actions are accept and reject. traffic-direction: The traffic direction. Possible directions are ingress and egress. protocol: The protocol, for example UDP, assigned to the Traffic Mirror rule. source-cidr-block: The source CIDR block assigned to the Traffic Mirror rule. destination-cidr-block: The destination CIDR block assigned to the Traffic Mirror rule. description: The description of the Traffic Mirror rule. + @OptionalCustomCoding> + public var filters: [Filter]? + /// The maximum number of results to return with a single call. + /// To retrieve the remaining results, make another call with the returned nextToken value. + public let maxResults: Int? + /// The token for the next page of results. + public let nextToken: String? + /// Traffic filter ID. + public let trafficMirrorFilterId: String? + /// Traffic filter rule IDs. + @OptionalCustomCoding> + public var trafficMirrorFilterRuleIds: [String]? + + public init(dryRun: Bool? = nil, filters: [Filter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, trafficMirrorFilterId: String? = nil, trafficMirrorFilterRuleIds: [String]? = nil) { + self.dryRun = dryRun + self.filters = filters + self.maxResults = maxResults + self.nextToken = nextToken + self.trafficMirrorFilterId = trafficMirrorFilterId + self.trafficMirrorFilterRuleIds = trafficMirrorFilterRuleIds + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 5) + } + + private enum CodingKeys: String, CodingKey { + case dryRun = "DryRun" + case filters = "Filter" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case trafficMirrorFilterId = "TrafficMirrorFilterId" + case trafficMirrorFilterRuleIds = "TrafficMirrorFilterRuleId" + } + } + + public struct DescribeTrafficMirrorFilterRulesResult: AWSDecodableShape { + public struct _TrafficMirrorFilterRulesEncoding: ArrayCoderProperties { public static let member = "item" } + + /// The token to use to retrieve the next page of results. The value is null when there are no more results to return. + public let nextToken: String? + /// Traffic mirror rules. + @OptionalCustomCoding> + public var trafficMirrorFilterRules: [TrafficMirrorFilterRule]? + + public init(nextToken: String? = nil, trafficMirrorFilterRules: [TrafficMirrorFilterRule]? = nil) { + self.nextToken = nextToken + self.trafficMirrorFilterRules = trafficMirrorFilterRules + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case trafficMirrorFilterRules = "trafficMirrorFilterRuleSet" + } + } + public struct DescribeTrafficMirrorFiltersRequest: AWSEncodableShape { public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } public struct _TrafficMirrorFilterIdsEncoding: ArrayCoderProperties { public static let member = "item" } @@ -23892,7 +24008,9 @@ extension EC2 { /// The filters. action.code - The action code for the event (for example, enable-volume-io). action.description - A description of the action. action.event-id - The event ID associated with the action. availability-zone - The Availability Zone of the instance. event.description - A description of the event. event.event-id - The event ID. event.event-type - The event type (for io-enabled: passed | failed; for io-performance: io-performance:degraded | io-performance:severely-degraded | io-performance:stalled). event.not-after - The latest end time for the event. event.not-before - The earliest start time for the event. volume-status.details-name - The cause for volume-status.status (io-enabled | io-performance). volume-status.details-status - The status of volume-status.details-name (for io-enabled: passed | failed; for io-performance: normal | degraded | severely-degraded | stalled). volume-status.status - The status of the volume (ok | impaired | warning | insufficient-data). @OptionalCustomCoding> public var filters: [Filter]? - /// The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. This value can be between 5 and 1,000; if the value is larger than 1,000, only 1,000 results are returned. If this parameter is not used, then all items are returned. You cannot specify this parameter and the volume IDs parameter in the same request. For more information, see Pagination. + /// The maximum number of items to return for this request. + /// To get the next page of items, make another request with the token returned in the output. + /// For more information, see Pagination. public let maxResults: Int? /// The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. public let nextToken: String? @@ -23948,7 +24066,7 @@ extension EC2 { public var filters: [Filter]? /// The maximum number of results (up to a limit of 500) to be returned in a paginated request. For more information, see Pagination. public let maxResults: Int? - /// The token returned by a previous paginated request. Pagination continues from the end of the items returned by the previous request. + /// The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. public let nextToken: String? /// The IDs of the volumes. @OptionalCustomCoding> @@ -23974,7 +24092,7 @@ extension EC2 { public struct DescribeVolumesModificationsResult: AWSDecodableShape { public struct _VolumesModificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// The token to include in another request to get the next page of items. This value is null if there are no more items to return. + /// The token to include in another request to get the next page of items. This value is null when there are no more items to return. public let nextToken: String? /// Information about the volume modifications. @OptionalCustomCoding> @@ -24000,9 +24118,11 @@ extension EC2 { /// The filters. attachment.attach-time - The time stamp when the attachment initiated. attachment.delete-on-termination - Whether the volume is deleted on instance termination. attachment.device - The device name specified in the block device mapping (for example, /dev/sda1). attachment.instance-id - The ID of the instance the volume is attached to. attachment.status - The attachment state (attaching | attached | detaching). availability-zone - The Availability Zone in which the volume was created. create-time - The time stamp when the volume was created. encrypted - Indicates whether the volume is encrypted (true | false) multi-attach-enabled - Indicates whether the volume is enabled for Multi-Attach (true | false) fast-restored - Indicates whether the volume was created from a snapshot that is enabled for fast snapshot restore (true | false). size - The size of the volume, in GiB. snapshot-id - The snapshot from which the volume was created. status - The state of the volume (creating | available | in-use | deleting | deleted | error). tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value. volume-id - The volume ID. volume-type - The Amazon EBS volume type (gp2 | gp3 | io1 | io2 | st1 | sc1| standard) @OptionalCustomCoding> public var filters: [Filter]? - /// The maximum number of volumes to return for this request. This value can be between 5 and 500; if you specify a value larger than 500, only 500 items are returned. If this parameter is not used, then all items are returned. You cannot specify this parameter and the volume IDs parameter in the same request. For more information, see Pagination. + /// The maximum number of items to return for this request. + /// To get the next page of items, make another request with the token returned in the output. + /// For more information, see Pagination. public let maxResults: Int? - /// The token returned from a previous paginated request. Pagination continues from the end of the items returned from the previous request. + /// The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. public let nextToken: String? /// The volume IDs. @OptionalCustomCoding> @@ -24503,7 +24623,7 @@ extension EC2 { /// The token to use when requesting the next set of items. If there are no additional items to return, the string is empty. public let nextToken: String? - /// Information about the endpoints. + /// Information about the VPC endpoints. @OptionalCustomCoding> public var vpcEndpoints: [VpcEndpoint]? @@ -24594,7 +24714,7 @@ extension EC2 { public let maxResults: Int? /// The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. public let nextToken: String? - /// The IDs of the VPCs. Default: Describes all your VPCs. + /// The IDs of the VPCs. @OptionalCustomCoding> public var vpcIds: [String]? @@ -24625,7 +24745,7 @@ extension EC2 { /// The token to include in another request to get the next page of items. This value is null when there are no more items to return. public let nextToken: String? - /// Information about one or more VPCs. + /// Information about the VPCs. @OptionalCustomCoding> public var vpcs: [Vpc]? @@ -24843,7 +24963,7 @@ extension EC2 { } public struct DetachVerifiedAccessTrustProviderRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -26104,7 +26224,7 @@ extension EC2 { public struct DisassociateTrunkInterfaceRequest: AWSEncodableShape { /// The ID of the association public let associationId: String? - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -26123,7 +26243,7 @@ extension EC2 { } public struct DisassociateTrunkInterfaceResult: AWSDecodableShape { - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// Returns true if the request succeeds; otherwise, it returns an error. public let `return`: Bool? @@ -26661,7 +26781,7 @@ extension EC2 { } public struct ElasticGpuSpecification: AWSEncodableShape { - /// The type of Elastic Graphics accelerator. For more information about the values to specify for Type, see Elastic Graphics Basics, specifically the Elastic Graphics accelerator column, in the Amazon Elastic Compute Cloud User Guide for Windows Instances. + /// The type of Elastic Graphics accelerator. public let type: String? public init(type: String? = nil) { @@ -28506,23 +28626,23 @@ extension EC2 { public let createDate: Date? /// Indicates whether the Capacity Reservation reserves capacity for EBS-optimized instance types. public let ebsOptimized: Bool? - /// The number of capacity units fulfilled by the Capacity Reservation. For more information, see - /// - /// Total target capacity in the Amazon EC2 User Guide. + /// The number of capacity units fulfilled by the Capacity Reservation. For more information, + /// see Total target + /// capacity in the Amazon EC2 User Guide. public let fulfilledCapacity: Double? /// The type of operating system for which the Capacity Reservation reserves capacity. public let instancePlatform: CapacityReservationInstancePlatform? /// The instance type for which the Capacity Reservation reserves capacity. public let instanceType: InstanceType? /// The priority of the instance type in the Capacity Reservation Fleet. For more information, - /// see - /// Instance type priority in the Amazon EC2 User Guide. + /// see Instance type + /// priority in the Amazon EC2 User Guide. public let priority: Int? /// The total number of instances for which the Capacity Reservation reserves capacity. public let totalInstanceCount: Int? - /// The weight of the instance type in the Capacity Reservation Fleet. For more information, - /// see - /// Instance type weight in the Amazon EC2 User Guide. + /// The weight of the instance type in the Capacity Reservation Fleet. For more information, see + /// Instance type + /// weight in the Amazon EC2 User Guide. public let weight: Double? public init(availabilityZone: String? = nil, availabilityZoneId: String? = nil, capacityReservationId: String? = nil, createDate: Date? = nil, ebsOptimized: Bool? = nil, fulfilledCapacity: Double? = nil, instancePlatform: CapacityReservationInstancePlatform? = nil, instanceType: InstanceType? = nil, priority: Int? = nil, totalInstanceCount: Int? = nil, weight: Double? = nil) { @@ -34109,7 +34229,7 @@ extension EC2 { public let association: InstanceNetworkInterfaceAssociation? /// The network interface attachment. public let attachment: InstanceNetworkInterfaceAttachment? - /// A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide. + /// A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide. public let connectionTrackingConfiguration: ConnectionTrackingSpecificationResponse? /// The description. public let description: String? @@ -34273,7 +34393,7 @@ extension EC2 { /// Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true. Amazon Web Services charges for all public IPv4 addresses, including public IPv4 addresses /// associated with running instances and Elastic IP addresses. For more information, see the Public IPv4 Address tab on the Amazon VPC pricing page. public let associatePublicIpAddress: Bool? - /// A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide. + /// A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide. public let connectionTrackingSpecification: ConnectionTrackingSpecificationRequest? /// If set to true, the interface is deleted when the instance is terminated. You can specify true only if creating a new network interface when launching an instance. public let deleteOnTermination: Bool? @@ -34972,7 +35092,7 @@ extension EC2 { public let autoRecoverySupported: Bool? /// Indicates whether the instance is a bare metal instance type. public let bareMetal: Bool? - /// Indicates whether the instance type is a burstable performance T instance type. For more information, see Burstable performance instances. + /// Indicates whether the instance type is a burstable performance T instance type. For more information, see Burstable performance instances. public let burstablePerformanceSupported: Bool? /// Indicates whether the instance type is current generation. public let currentGeneration: Bool? @@ -35012,6 +35132,8 @@ extension EC2 { public let nitroTpmInfo: NitroTpmInfo? /// Indicates whether NitroTPM is supported. public let nitroTpmSupport: NitroTpmSupport? + /// Indicates whether a local Precision Time Protocol (PTP) hardware clock (PHC) is supported. + public let phcSupport: PhcSupport? /// Describes the placement group settings for the instance type. public let placementGroupInfo: PlacementGroupInfo? /// Describes the processor. @@ -35031,7 +35153,7 @@ extension EC2 { /// Describes the vCPU configurations for the instance type. public let vCpuInfo: VCpuInfo? - public init(autoRecoverySupported: Bool? = nil, bareMetal: Bool? = nil, burstablePerformanceSupported: Bool? = nil, currentGeneration: Bool? = nil, dedicatedHostsSupported: Bool? = nil, ebsInfo: EbsInfo? = nil, fpgaInfo: FpgaInfo? = nil, freeTierEligible: Bool? = nil, gpuInfo: GpuInfo? = nil, hibernationSupported: Bool? = nil, hypervisor: InstanceTypeHypervisor? = nil, inferenceAcceleratorInfo: InferenceAcceleratorInfo? = nil, instanceStorageInfo: InstanceStorageInfo? = nil, instanceStorageSupported: Bool? = nil, instanceType: InstanceType? = nil, mediaAcceleratorInfo: MediaAcceleratorInfo? = nil, memoryInfo: MemoryInfo? = nil, networkInfo: NetworkInfo? = nil, neuronInfo: NeuronInfo? = nil, nitroEnclavesSupport: NitroEnclavesSupport? = nil, nitroTpmInfo: NitroTpmInfo? = nil, nitroTpmSupport: NitroTpmSupport? = nil, placementGroupInfo: PlacementGroupInfo? = nil, processorInfo: ProcessorInfo? = nil, supportedBootModes: [BootModeType]? = nil, supportedRootDeviceTypes: [RootDeviceType]? = nil, supportedUsageClasses: [UsageClassType]? = nil, supportedVirtualizationTypes: [VirtualizationType]? = nil, vCpuInfo: VCpuInfo? = nil) { + public init(autoRecoverySupported: Bool? = nil, bareMetal: Bool? = nil, burstablePerformanceSupported: Bool? = nil, currentGeneration: Bool? = nil, dedicatedHostsSupported: Bool? = nil, ebsInfo: EbsInfo? = nil, fpgaInfo: FpgaInfo? = nil, freeTierEligible: Bool? = nil, gpuInfo: GpuInfo? = nil, hibernationSupported: Bool? = nil, hypervisor: InstanceTypeHypervisor? = nil, inferenceAcceleratorInfo: InferenceAcceleratorInfo? = nil, instanceStorageInfo: InstanceStorageInfo? = nil, instanceStorageSupported: Bool? = nil, instanceType: InstanceType? = nil, mediaAcceleratorInfo: MediaAcceleratorInfo? = nil, memoryInfo: MemoryInfo? = nil, networkInfo: NetworkInfo? = nil, neuronInfo: NeuronInfo? = nil, nitroEnclavesSupport: NitroEnclavesSupport? = nil, nitroTpmInfo: NitroTpmInfo? = nil, nitroTpmSupport: NitroTpmSupport? = nil, phcSupport: PhcSupport? = nil, placementGroupInfo: PlacementGroupInfo? = nil, processorInfo: ProcessorInfo? = nil, supportedBootModes: [BootModeType]? = nil, supportedRootDeviceTypes: [RootDeviceType]? = nil, supportedUsageClasses: [UsageClassType]? = nil, supportedVirtualizationTypes: [VirtualizationType]? = nil, vCpuInfo: VCpuInfo? = nil) { self.autoRecoverySupported = autoRecoverySupported self.bareMetal = bareMetal self.burstablePerformanceSupported = burstablePerformanceSupported @@ -35054,6 +35176,7 @@ extension EC2 { self.nitroEnclavesSupport = nitroEnclavesSupport self.nitroTpmInfo = nitroTpmInfo self.nitroTpmSupport = nitroTpmSupport + self.phcSupport = phcSupport self.placementGroupInfo = placementGroupInfo self.processorInfo = processorInfo self.supportedBootModes = supportedBootModes @@ -35086,6 +35209,7 @@ extension EC2 { case nitroEnclavesSupport = "nitroEnclavesSupport" case nitroTpmInfo = "nitroTpmInfo" case nitroTpmSupport = "nitroTpmSupport" + case phcSupport = "phcSupport" case placementGroupInfo = "placementGroupInfo" case processorInfo = "processorInfo" case supportedBootModes = "supportedBootModes" @@ -35666,7 +35790,7 @@ extension EC2 { public let ownerId: String? /// The depth of pools in your IPAM pool. The pool depth quota is 10. For more information, see Quotas in IPAM in the Amazon VPC IPAM User Guide. public let poolDepth: Int? - /// The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is BYOIP. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool. For information on increasing the default limit, see Quotas for your IPAM in the Amazon VPC IPAM User Guide. + /// The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is BYOIP. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool. For information on increasing the default limit, see Quotas for your IPAM in the Amazon VPC IPAM User Guide. public let publicIpSource: IpamPoolPublicIpSource? /// Determines if a pool is publicly advertisable. This option is not available for pools with AddressFamily set to ipv4. public let publiclyAdvertisable: Bool? @@ -36172,7 +36296,7 @@ extension EC2 { } public struct Ipv4PrefixSpecification: AWSDecodableShape { - /// The IPv4 prefix. For information, see Assigning prefixes to Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide. + /// The IPv4 prefix. For information, see Assigning prefixes to network interfaces in the Amazon EC2 User Guide. public let ipv4Prefix: String? public init(ipv4Prefix: String? = nil) { @@ -36185,7 +36309,7 @@ extension EC2 { } public struct Ipv4PrefixSpecificationRequest: AWSEncodableShape & AWSDecodableShape { - /// The IPv4 prefix. For information, see Assigning prefixes to Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide. + /// The IPv4 prefix. For information, see Assigning prefixes to network interfaces in the Amazon EC2 User Guide. public let ipv4Prefix: String? public init(ipv4Prefix: String? = nil) { @@ -37120,7 +37244,7 @@ extension EC2 { /// Indicates whether to associate a public IPv4 address with eth0 for a new network interface. Amazon Web Services charges for all public IPv4 addresses, including public IPv4 addresses /// associated with running instances and Elastic IP addresses. For more information, see the Public IPv4 Address tab on the Amazon VPC pricing page. public let associatePublicIpAddress: Bool? - /// A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide. + /// A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Idle connection tracking timeout in the Amazon EC2 User Guide. public let connectionTrackingSpecification: ConnectionTrackingSpecification? /// Indicates whether the network interface is deleted when the instance is terminated. public let deleteOnTermination: Bool? @@ -37229,7 +37353,7 @@ extension EC2 { /// Associates a public IPv4 address with eth0 for a new network interface. Amazon Web Services charges for all public IPv4 addresses, including public IPv4 addresses /// associated with running instances and Elastic IP addresses. For more information, see the Public IPv4 Address tab on the Amazon VPC pricing page. public let associatePublicIpAddress: Bool? - /// A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide. + /// A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Idle connection tracking timeout in the Amazon EC2 User Guide. public let connectionTrackingSpecification: ConnectionTrackingSpecificationRequest? /// Indicates whether the network interface is deleted when the instance is terminated. public let deleteOnTermination: Bool? @@ -37242,7 +37366,7 @@ extension EC2 { /// The IDs of one or more security groups. @OptionalCustomCoding> public var groups: [String]? - /// The type of network interface. To create an Elastic Fabric Adapter (EFA), specify efa. For more information, see Elastic Fabric Adapter in the Amazon Elastic Compute Cloud User Guide. If you are not creating an EFA, specify interface or omit this parameter. Valid values: interface | efa + /// The type of network interface. To create an Elastic Fabric Adapter (EFA), specify efa. For more information, see Elastic Fabric Adapter in the Amazon EC2 User Guide. If you are not creating an EFA, specify interface or omit this parameter. Valid values: interface | efa public let interfaceType: String? /// The number of IPv4 prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv4Prefix option. public let ipv4PrefixCount: Int? @@ -38650,7 +38774,7 @@ extension EC2 { public let dryRun: Bool? /// The name of the Availability Zone group, Local Zone group, or Wavelength Zone group. public let groupName: String? - /// Indicates whether you are opted in to the Local Zone group or Wavelength Zone group. The only valid value is opted-in. You must contact Amazon Web Services Support to opt out of a Local Zone or Wavelength Zone group. + /// Indicates whether to opt in to the zone group. The only valid value is opted-in. You must contact Amazon Web Services Support to opt out of a Local Zone or Wavelength Zone group. public let optInStatus: ModifyAvailabilityZoneOptInStatus? public init(dryRun: Bool? = nil, groupName: String? = nil, optInStatus: ModifyAvailabilityZoneOptInStatus? = nil) { @@ -38696,11 +38820,12 @@ extension EC2 { /// cancel it using the CancelCapacityReservationFleet action. You can't specify RemoveEndDate and /// EndDate in the same request. public let removeEndDate: Bool? - /// The total number of capacity units to be reserved by the Capacity Reservation Fleet. This value, - /// together with the instance type weights that you assign to each instance type used by the Fleet - /// determine the number of instances for which the Fleet reserves capacity. Both values are based on - /// units that make sense for your workload. For more information, see Total target capacity - /// in the Amazon EC2 User Guide. + /// The total number of capacity units to be reserved by the Capacity Reservation Fleet. This + /// value, together with the instance type weights that you assign to each instance type + /// used by the Fleet determine the number of instances for which the Fleet reserves + /// capacity. Both values are based on units that make sense for your workload. For more + /// information, see Total target + /// capacity in the Amazon EC2 User Guide. public let totalTargetCapacity: Int? public init(capacityReservationFleetId: String? = nil, dryRun: Bool? = nil, endDate: Date? = nil, removeEndDate: Bool? = nil, totalTargetCapacity: Int? = nil) { @@ -38916,7 +39041,7 @@ extension EC2 { public struct ModifyEbsDefaultKmsKeyIdRequest: AWSEncodableShape { /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true. You can specify the KMS key using any of the following: Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, alias/ExampleAlias. Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails. Amazon EBS does not support asymmetric KMS keys. + /// The identifier of the KMS key to use for Amazon EBS encryption. If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified, the encrypted state must be true. You can specify the KMS key using any of the following: Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Key alias. For example, alias/ExampleAlias. Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails. Amazon EBS does not support asymmetric KMS keys. public let kmsKeyId: String? public init(dryRun: Bool? = nil, kmsKeyId: String? = nil) { @@ -39077,9 +39202,9 @@ extension EC2 { /// The IDs of the Dedicated Hosts to modify. @OptionalCustomCoding> public var hostIds: [String]? - /// Indicates whether to enable or disable host maintenance for the Dedicated Host. For more information, see Host maintenance in the Amazon EC2 User Guide. + /// Indicates whether to enable or disable host maintenance for the Dedicated Host. For more information, see Host maintenance in the Amazon EC2 User Guide. public let hostMaintenance: HostMaintenance? - /// Indicates whether to enable or disable host recovery for the Dedicated Host. For more information, see Host recovery in the Amazon EC2 User Guide. + /// Indicates whether to enable or disable host recovery for the Dedicated Host. For more information, see Host recovery in the Amazon EC2 User Guide. public let hostRecovery: HostRecovery? /// Specifies the instance family to be supported by the Dedicated Host. Specify this parameter to modify a Dedicated Host to support multiple instance types within its current instance family. If you want to modify a Dedicated Host to support a specific instance type only, omit this parameter and specify InstanceType instead. You cannot specify InstanceFamily and InstanceType in the same request. public let instanceFamily: String? @@ -39248,7 +39373,7 @@ extension EC2 { /// Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated. You can't modify the DeleteOnTermination attribute for volumes that are attached to Fargate tasks. To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Update the block device mapping when launching an instance in the Amazon EC2 User Guide. @OptionalCustomCoding> public var blockDeviceMappings: [InstanceBlockDeviceMappingSpecification]? - /// Indicates whether an instance is enabled for stop protection. For more information, see Stop Protection. + /// Indicates whether an instance is enabled for stop protection. For more information, see Enable stop protection for your instance. public let disableApiStop: AttributeBooleanValue? /// If the value is true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. You cannot use this parameter for Spot Instances. public let disableApiTermination: AttributeBooleanValue? @@ -40553,7 +40678,7 @@ extension EC2 { } public struct ModifyTrafficMirrorFilterRuleResult: AWSDecodableShape { - /// Modifies a Traffic Mirror rule. + /// Tags are not returned for ModifyTrafficMirrorFilterRule. A Traffic Mirror rule. public let trafficMirrorFilterRule: TrafficMirrorFilterRule? public init(trafficMirrorFilterRule: TrafficMirrorFilterRule? = nil) { @@ -40886,7 +41011,7 @@ extension EC2 { } public struct ModifyVerifiedAccessEndpointPolicyRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -40940,7 +41065,7 @@ extension EC2 { } public struct ModifyVerifiedAccessEndpointRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the Verified Access endpoint. public let description: String? @@ -40995,7 +41120,7 @@ extension EC2 { } public struct ModifyVerifiedAccessGroupPolicyRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -41049,7 +41174,7 @@ extension EC2 { } public struct ModifyVerifiedAccessGroupRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the Verified Access group. public let description: String? @@ -41093,7 +41218,7 @@ extension EC2 { public struct ModifyVerifiedAccessInstanceLoggingConfigurationRequest: AWSEncodableShape { /// The configuration options for Verified Access instances. public let accessLogs: VerifiedAccessLogOptions? - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -41129,7 +41254,7 @@ extension EC2 { } public struct ModifyVerifiedAccessInstanceRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the Verified Access instance. public let description: String? @@ -41217,7 +41342,7 @@ extension EC2 { } public struct ModifyVerifiedAccessTrustProviderRequest: AWSEncodableShape { - /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency. public let clientToken: String? /// A description for the Verified Access trust provider. public let description: String? @@ -41295,7 +41420,7 @@ extension EC2 { /// built on the Nitro System. On other instances, you can achieve performance up to 32,000 IOPS. Default: The existing value is retained if you keep the same volume type. If you change the volume type to io1, io2, or gp3, the default is 3,000. public let iops: Int? /// Specifies whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the - /// volume to up to 16 + /// volume to up to 16 /// Nitro-based instances in the same Availability Zone. This parameter is /// supported with io1 and io2 volumes only. For more information, see /// @@ -42174,7 +42299,7 @@ extension EC2 { public var natGatewayAddresses: [NatGatewayAddress]? /// The ID of the NAT gateway. public let natGatewayId: String? - /// Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center. + /// Reserved. If you need to sustain traffic greater than the documented limits, contact Amazon Web Services Support. public let provisionedBandwidth: ProvisionedBandwidth? /// The state of the NAT gateway. pending: The NAT gateway is being created and is not ready to process traffic. failed: The NAT gateway could not be created. Check the failureCode and failureMessage fields for the reason. available: The NAT gateway is able to process traffic. This status remains until you delete the NAT gateway, and does not indicate the health of the NAT gateway. deleting: The NAT gateway is in the process of being terminated and may still be processing traffic. deleted: The NAT gateway has been terminated and is no longer processing traffic. public let state: NatGatewayState? @@ -42263,7 +42388,7 @@ extension EC2 { public struct _EntriesEncoding: ArrayCoderProperties { public static let member = "item" } public struct _TagsEncoding: ArrayCoderProperties { public static let member = "item" } - /// Any associations between the network ACL and one or more subnets + /// Any associations between the network ACL and your subnets @OptionalCustomCoding> public var associations: [NetworkAclAssociation]? /// The entries (rules) in the network ACL. @@ -42436,7 +42561,7 @@ extension EC2 { public let efaInfo: EfaInfo? /// Indicates whether Elastic Fabric Adapter (EFA) is supported. public let efaSupported: Bool? - /// Indicates whether the instance type supports ENA Express. ENA Express uses Amazon Web Services Scalable Reliable Datagram (SRD) technology to increase the maximum bandwidth used per stream and minimize tail latency of network traffic between EC2 instances. + /// Indicates whether the instance type supports ENA Express. ENA Express uses Amazon Web Services Scalable Reliable Datagram (SRD) technology to increase the maximum bandwidth used per stream and minimize tail latency of network traffic between EC2 instances. public let enaSrdSupported: Bool? /// Indicates whether Elastic Network Adapter (ENA) is supported. public let enaSupport: EnaSupport? @@ -42777,7 +42902,7 @@ extension EC2 { public let attachment: NetworkInterfaceAttachment? /// The Availability Zone. public let availabilityZone: String? - /// A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide. + /// A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide. public let connectionTrackingConfiguration: ConnectionTrackingConfiguration? /// Indicates whether a network interface with an IPv6 address is unreachable from the public internet. If the value is true, inbound traffic from the internet is dropped and you cannot assign an elastic IP address to the network interface. The network interface is reachable from peered VPCs and resources connected through a transit gateway, including on-premises networks. public let denyAllIgwTraffic: Bool? @@ -43029,7 +43154,7 @@ extension EC2 { public struct NetworkInterfaceIpv6Address: AWSDecodableShape { /// The IPv6 address. public let ipv6Address: String? - /// Determines if an IPv6 address associated with a network interface is the primary IPv6 address. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information, see ModifyNetworkInterfaceAttribute. + /// Determines if an IPv6 address associated with a network interface is the primary IPv6 address. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information, see ModifyNetworkInterfaceAttribute. public let isPrimaryIpv6: Bool? public init(ipv6Address: String? = nil, isPrimaryIpv6: Bool? = nil) { @@ -43271,9 +43396,9 @@ extension EC2 { public let allocationStrategy: FleetOnDemandAllocationStrategy? /// The strategy for using unused Capacity Reservations for fulfilling On-Demand capacity. Supported only for fleets of type instant. public let capacityReservationOptions: CapacityReservationOptions? - /// The maximum amount per hour for On-Demand Instances that you're willing to pay. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The maxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. + /// The maximum amount per hour for On-Demand Instances that you're willing to pay. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The maxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide. public let maxTotalPrice: String? - /// The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType + /// The minimum target capacity for On-Demand Instances in the fleet. If this minimum capacity isn't reached, no instances are launched. Constraints: Maximum value of 1000. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType public let minTargetCapacity: Int? /// Indicates that the fleet launches all On-Demand Instances into a single Availability Zone. Supported only for fleets of type instant. public let singleAvailabilityZone: Bool? @@ -43304,9 +43429,9 @@ extension EC2 { public let allocationStrategy: FleetOnDemandAllocationStrategy? /// The strategy for using unused Capacity Reservations for fulfilling On-Demand capacity. Supported only for fleets of type instant. public let capacityReservationOptions: CapacityReservationOptionsRequest? - /// The maximum amount per hour for On-Demand Instances that you're willing to pay. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The MaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. + /// The maximum amount per hour for On-Demand Instances that you're willing to pay. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The MaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide. public let maxTotalPrice: String? - /// The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType + /// The minimum target capacity for On-Demand Instances in the fleet. If this minimum capacity isn't reached, no instances are launched. Constraints: Maximum value of 1000. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType public let minTargetCapacity: Int? /// Indicates that the fleet launches all On-Demand Instances into a single Availability Zone. Supported only for fleets of type instant. public let singleAvailabilityZone: Bool? @@ -44323,7 +44448,7 @@ extension EC2 { /// The architectures supported by the instance type. @OptionalCustomCoding> public var supportedArchitectures: [ArchitectureType]? - /// Indicates whether the instance type supports AMD SEV-SNP. If the request returns amd-sev-snp, AMD SEV-SNP is supported. Otherwise, it is not supported. For more information, see AMD SEV-SNP. + /// Indicates whether the instance type supports AMD SEV-SNP. If the request returns amd-sev-snp, AMD SEV-SNP is supported. Otherwise, it is not supported. For more information, see AMD SEV-SNP. @OptionalCustomCoding> public var supportedFeatures: [SupportedAdditionalProcessorFeature]? /// The speed of the processor, in GHz. @@ -44474,7 +44599,7 @@ extension EC2 { public let cidr: String? /// A signed document that proves that you are authorized to bring a specified IP address range to Amazon using BYOIP. This option applies to public pools only. public let cidrAuthorizationContext: IpamCidrAuthorizationContext? - /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency. + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. public let clientToken: String? /// A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -44558,15 +44683,15 @@ extension EC2 { } public struct ProvisionedBandwidth: AWSDecodableShape { - /// Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center. + /// Reserved. public let provisioned: String? - /// Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center. + /// Reserved. public let provisionTime: Date? - /// Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center. + /// Reserved. public let requested: String? - /// Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center. + /// Reserved. public let requestTime: Date? - /// Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center. + /// Reserved. public let status: String? public init(provisioned: String? = nil, provisionTime: Date? = nil, requested: String? = nil, requestTime: Date? = nil, status: String? = nil) { @@ -44876,7 +45001,7 @@ extension EC2 { } public struct PurchaseReservedInstancesOfferingResult: AWSDecodableShape { - /// The IDs of the purchased Reserved Instances. If your purchase crosses into a discounted pricing tier, the final Reserved Instances IDs might change. For more information, see Crossing pricing tiers in the Amazon Elastic Compute Cloud User Guide. + /// The IDs of the purchased Reserved Instances. If your purchase crosses into a discounted pricing tier, the final Reserved Instances IDs might change. For more information, see Crossing pricing tiers in the Amazon EC2 User Guide. public let reservedInstancesId: String? public init(reservedInstancesId: String? = nil) { @@ -46014,11 +46139,11 @@ extension EC2 { public var blockDeviceMappings: [LaunchTemplateBlockDeviceMappingRequest]? /// The Capacity Reservation targeting option. If you do not specify this parameter, the instance's Capacity Reservation preference defaults to open, which enables it to run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone). public let capacityReservationSpecification: LaunchTemplateCapacityReservationSpecificationRequest? - /// The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User Guide. + /// The CPU options for the instance. For more information, see Optimize CPU options in the Amazon EC2 User Guide. public let cpuOptions: LaunchTemplateCpuOptionsRequest? /// The credit option for CPU usage of the instance. Valid only for T instances. public let creditSpecification: CreditSpecificationRequest? - /// Indicates whether to enable the instance for stop protection. For more information, see Stop protection in the Amazon Elastic Compute Cloud User Guide. + /// Indicates whether to enable the instance for stop protection. For more information, see Enable stop protection for your instance in the Amazon EC2 User Guide. public let disableApiStop: Bool? /// If you set this parameter to true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute after launch, use ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, you can terminate the instance by running the shutdown command from the instance. public let disableApiTermination: Bool? @@ -46030,13 +46155,13 @@ extension EC2 { /// An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads. You cannot specify accelerators from different generations in the same request. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. @OptionalCustomCoding> public var elasticInferenceAccelerators: [LaunchTemplateElasticInferenceAccelerator]? - /// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide. You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance. + /// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide. You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance. public let enclaveOptions: LaunchTemplateEnclaveOptionsRequest? - /// Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide. + /// Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide. public let hibernationOptions: LaunchTemplateHibernationOptionsRequest? /// The name or Amazon Resource Name (ARN) of an IAM instance profile. public let iamInstanceProfile: LaunchTemplateIamInstanceProfileSpecificationRequest? - /// The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch. Valid formats: ami-17characters00000 resolve:ssm:parameter-name resolve:ssm:parameter-name:version-number resolve:ssm:parameter-name:label resolve:ssm:public-parameter Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager parameter. If the launch template will be used by an EC2 Fleet or Spot Fleet, you must specify the AMI ID. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide. + /// The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch. Valid formats: ami-17characters00000 resolve:ssm:parameter-name resolve:ssm:parameter-name:version-number resolve:ssm:parameter-name:label resolve:ssm:public-parameter Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager parameter. If the launch template will be used by an EC2 Fleet or Spot Fleet, you must specify the AMI ID. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. public let imageId: String? /// Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown). Default: stop public let instanceInitiatedShutdownBehavior: ShutdownBehavior? @@ -46044,9 +46169,9 @@ extension EC2 { public let instanceMarketOptions: LaunchTemplateInstanceMarketOptionsRequest? /// The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes. You must specify VCpuCount and MemoryMiB. All other attributes are optional. Any unspecified optional attribute is set to its default. When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values. To limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request: AllowedInstanceTypes - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes. ExcludedInstanceTypes - The instance types to exclude from the list, even if they match your specified attributes. If you specify InstanceRequirements, you can't specify InstanceType. Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the launch instance wizard, or with the RunInstances API or AWS::EC2::Instance Amazon Web Services CloudFormation resource, you can't specify InstanceRequirements. For more information, see Attribute-based instance type selection for EC2 Fleet, Attribute-based instance type selection for Spot Fleet, and Spot placement score in the Amazon EC2 User Guide. public let instanceRequirements: InstanceRequirementsRequest? - /// The instance type. For more information, see Instance types in the Amazon Elastic Compute Cloud User Guide. If you specify InstanceType, you can't specify InstanceRequirements. + /// The instance type. For more information, see Amazon EC2 instance types in the Amazon EC2 User Guide. If you specify InstanceType, you can't specify InstanceRequirements. public let instanceType: InstanceType? - /// The ID of the kernel. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon Elastic Compute Cloud User Guide. + /// The ID of the kernel. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide. public let kernelId: String? /// The name of the key pair. You can create a key pair using CreateKeyPair or ImportKeyPair. If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in. public let keyName: String? @@ -46055,7 +46180,7 @@ extension EC2 { public var licenseSpecifications: [LaunchTemplateLicenseConfigurationRequest]? /// The maintenance options for the instance. public let maintenanceOptions: LaunchTemplateInstanceMaintenanceOptionsRequest? - /// The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon Elastic Compute Cloud User Guide. + /// The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon EC2 User Guide. public let metadataOptions: LaunchTemplateInstanceMetadataOptionsRequest? /// The monitoring for the instance. public let monitoring: LaunchTemplatesMonitoringRequest? @@ -46066,7 +46191,7 @@ extension EC2 { public let placement: LaunchTemplatePlacementRequest? /// The options for the instance hostname. The default values are inherited from the subnet. public let privateDnsNameOptions: LaunchTemplatePrivateDnsNameOptionsRequest? - /// The ID of the RAM disk. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon Elastic Compute Cloud User Guide. + /// The ID of the RAM disk. We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see User provided kernels in the Amazon EC2 User Guide. public let ramDiskId: String? /// The IDs of the security groups. If you specify a network interface, you must specify any security groups as part of the network interface instead of using this parameter. @OptionalCustomCoding> @@ -46077,7 +46202,7 @@ extension EC2 { /// The tags to apply to the resources that are created during instance launch. These tags are not applied to the launch template. @OptionalCustomCoding> public var tagSpecifications: [LaunchTemplateTagSpecificationRequest]? - /// The user data to make available to the instance. You must provide base64-encoded text. User data is limited to 16 KB. For more information, see Run commands on your Linux instance at launch (Linux) or Work with instance user data (Windows) in the Amazon Elastic Compute Cloud User Guide. If you are creating the launch template for use with Batch, the user data must be provided in the MIME multi-part archive format. For more information, see Amazon EC2 user data in launch templates in the Batch User Guide. + /// The user data to make available to the instance. You must provide base64-encoded text. User data is limited to 16 KB. For more information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User Guide. If you are creating the launch template for use with Batch, the user data must be provided in the MIME multi-part archive format. For more information, see Amazon EC2 user data in launch templates in the Batch User Guide. public let userData: String? public init(blockDeviceMappings: [LaunchTemplateBlockDeviceMappingRequest]? = nil, capacityReservationSpecification: LaunchTemplateCapacityReservationSpecificationRequest? = nil, cpuOptions: LaunchTemplateCpuOptionsRequest? = nil, creditSpecification: CreditSpecificationRequest? = nil, disableApiStop: Bool? = nil, disableApiTermination: Bool? = nil, ebsOptimized: Bool? = nil, elasticGpuSpecifications: [ElasticGpuSpecification]? = nil, elasticInferenceAccelerators: [LaunchTemplateElasticInferenceAccelerator]? = nil, enclaveOptions: LaunchTemplateEnclaveOptionsRequest? = nil, hibernationOptions: LaunchTemplateHibernationOptionsRequest? = nil, iamInstanceProfile: LaunchTemplateIamInstanceProfileSpecificationRequest? = nil, imageId: String? = nil, instanceInitiatedShutdownBehavior: ShutdownBehavior? = nil, instanceMarketOptions: LaunchTemplateInstanceMarketOptionsRequest? = nil, instanceRequirements: InstanceRequirementsRequest? = nil, instanceType: InstanceType? = nil, kernelId: String? = nil, keyName: String? = nil, licenseSpecifications: [LaunchTemplateLicenseConfigurationRequest]? = nil, maintenanceOptions: LaunchTemplateInstanceMaintenanceOptionsRequest? = nil, metadataOptions: LaunchTemplateInstanceMetadataOptionsRequest? = nil, monitoring: LaunchTemplatesMonitoringRequest? = nil, networkInterfaces: [LaunchTemplateInstanceNetworkInterfaceSpecificationRequest]? = nil, placement: LaunchTemplatePlacementRequest? = nil, privateDnsNameOptions: LaunchTemplatePrivateDnsNameOptionsRequest? = nil, ramDiskId: String? = nil, securityGroupIds: [String]? = nil, securityGroups: [String]? = nil, tagSpecifications: [LaunchTemplateTagSpecificationRequest]? = nil, userData: String? = nil) { @@ -46197,7 +46322,7 @@ extension EC2 { public let availabilityZoneGroup: String? /// Deprecated. public let blockDurationMinutes: Int? - /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency in the Amazon EC2 User Guide for Linux Instances. + /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency in Amazon EC2 API requests in the Amazon EC2 User Guide. public let clientToken: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -46404,16 +46529,16 @@ extension EC2 { public let instancePlatform: CapacityReservationInstancePlatform? /// The instance type for which the Capacity Reservation Fleet reserves capacity. public let instanceType: InstanceType? - /// The priority to assign to the instance type. This value is used to determine which of the instance types - /// specified for the Fleet should be prioritized for use. A lower value indicates a high priority. For more - /// information, see Instance type priority - /// in the Amazon EC2 User Guide. + /// The priority to assign to the instance type. This value is used to determine which of the + /// instance types specified for the Fleet should be prioritized for use. A lower value + /// indicates a high priority. For more information, see Instance type + /// priority in the Amazon EC2 User Guide. public let priority: Int? - /// The number of capacity units provided by the specified instance type. This value, together with the - /// total target capacity that you specify for the Fleet determine the number of instances for which the - /// Fleet reserves capacity. Both values are based on units that make sense for your workload. For more - /// information, see Total target capacity - /// in the Amazon EC2 User Guide. + /// The number of capacity units provided by the specified instance type. This value, together + /// with the total target capacity that you specify for the Fleet determine the number of + /// instances for which the Fleet reserves capacity. Both values are based on units that + /// make sense for your workload. For more information, see Total target + /// capacity in the Amazon EC2 User Guide. public let weight: Double? public init(availabilityZone: String? = nil, availabilityZoneId: String? = nil, ebsOptimized: Bool? = nil, instancePlatform: CapacityReservationInstancePlatform? = nil, instanceType: InstanceType? = nil, priority: Int? = nil, weight: Double? = nil) { @@ -47086,11 +47211,11 @@ extension EC2 { public var blockDeviceMappings: [LaunchTemplateBlockDeviceMapping]? /// Information about the Capacity Reservation targeting option. public let capacityReservationSpecification: LaunchTemplateCapacityReservationSpecificationResponse? - /// The CPU options for the instance. For more information, see Optimizing CPU options in the Amazon Elastic Compute Cloud User Guide. + /// The CPU options for the instance. For more information, see Optimize CPU options in the Amazon EC2 User Guide. public let cpuOptions: LaunchTemplateCpuOptions? /// The credit option for CPU usage of the instance. public let creditSpecification: CreditSpecification? - /// Indicates whether the instance is enabled for stop protection. For more information, see Stop protection in the Amazon Elastic Compute Cloud User Guide. + /// Indicates whether the instance is enabled for stop protection. For more information, see Enable stop protection for your instance in the Amazon EC2 User Guide. public let disableApiStop: Bool? /// If set to true, indicates that the instance cannot be terminated using the Amazon EC2 console, command line tool, or API. public let disableApiTermination: Bool? @@ -47104,11 +47229,11 @@ extension EC2 { public var elasticInferenceAccelerators: [LaunchTemplateElasticInferenceAcceleratorResponse]? /// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. public let enclaveOptions: LaunchTemplateEnclaveOptions? - /// Indicates whether an instance is configured for hibernation. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide. + /// Indicates whether an instance is configured for hibernation. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide. public let hibernationOptions: LaunchTemplateHibernationOptions? /// The IAM instance profile. public let iamInstanceProfile: LaunchTemplateIamInstanceProfileSpecification? - /// The ID of the AMI or a Systems Manager parameter. The Systems Manager parameter will resolve to the ID of the AMI at instance launch. The value depends on what you specified in the request. The possible values are: If an AMI ID was specified in the request, then this is the AMI ID. If a Systems Manager parameter was specified in the request, and ResolveAlias was configured as true, then this is the AMI ID that the parameter is mapped to in the Parameter Store. If a Systems Manager parameter was specified in the request, and ResolveAlias was configured as false, then this is the parameter value. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide. + /// The ID of the AMI or a Systems Manager parameter. The Systems Manager parameter will resolve to the ID of the AMI at instance launch. The value depends on what you specified in the request. The possible values are: If an AMI ID was specified in the request, then this is the AMI ID. If a Systems Manager parameter was specified in the request, and ResolveAlias was configured as true, then this is the AMI ID that the parameter is mapped to in the Parameter Store. If a Systems Manager parameter was specified in the request, and ResolveAlias was configured as false, then this is the parameter value. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide. public let imageId: String? /// Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown). public let instanceInitiatedShutdownBehavior: ShutdownBehavior? @@ -47127,7 +47252,7 @@ extension EC2 { public var licenseSpecifications: [LaunchTemplateLicenseConfiguration]? /// The maintenance options for your instance. public let maintenanceOptions: LaunchTemplateInstanceMaintenanceOptions? - /// The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon Elastic Compute Cloud User Guide. + /// The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon EC2 User Guide. public let metadataOptions: LaunchTemplateInstanceMetadataOptions? /// The monitoring for the instance. public let monitoring: LaunchTemplatesMonitoring? @@ -47720,7 +47845,7 @@ extension EC2 { public struct _RoutesEncoding: ArrayCoderProperties { public static let member = "item" } public struct _TagsEncoding: ArrayCoderProperties { public static let member = "item" } - /// The associations between the route table and one or more subnets or a gateway. + /// The associations between the route table and your subnets or gateways. @OptionalCustomCoding> public var associations: [RouteTableAssociation]? /// The ID of the Amazon Web Services account that owns the route table. @@ -47922,7 +48047,7 @@ extension EC2 { public let enablePrimaryIpv6: Bool? /// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide. You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance. public let enclaveOptions: EnclaveOptionsRequest? - /// Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your instance in the Amazon EC2 User Guide. You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance. + /// Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide. You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance. public let hibernationOptions: HibernationOptionsRequest? /// The name or Amazon Resource Name (ARN) of an IAM instance profile. public let iamInstanceProfile: IamInstanceProfileSpecification? @@ -47932,7 +48057,7 @@ extension EC2 { public let instanceInitiatedShutdownBehavior: ShutdownBehavior? /// The market (purchasing) option for the instances. For RunInstances, persistent Spot Instance requests are only supported when InstanceInterruptionBehavior is set to either hibernate or stop. public let instanceMarketOptions: InstanceMarketOptionsRequest? - /// The instance type. For more information, see Instance types in the Amazon EC2 User Guide. + /// The instance type. For more information, see Amazon EC2 instance types in the Amazon EC2 User Guide. public let instanceType: InstanceType? /// The number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch. You cannot specify this option and the network interfaces option in the same request. public let ipv6AddressCount: Int? @@ -47950,11 +48075,11 @@ extension EC2 { public var licenseSpecifications: [LicenseConfigurationRequest]? /// The maintenance and recovery options for the instance. public let maintenanceOptions: InstanceMaintenanceOptionsRequest? - /// The maximum number of instances to launch. If you specify more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above MinCount. Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 FAQ. + /// The maximum number of instances to launch. If you specify a value that is more capacity than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above the specified minimum count. Constraints: Between 1 and the quota for the specified instance type for your account for this Region. For more information, see Amazon EC2 instance type quotas. public let maxCount: Int? /// The metadata options for the instance. For more information, see Instance metadata and user data. public let metadataOptions: InstanceMetadataOptionsRequest? - /// The minimum number of instances to launch. If you specify a minimum that is more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches no instances. Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ. + /// The minimum number of instances to launch. If you specify a value that is more capacity than Amazon EC2 can provide in the target Availability Zone, Amazon EC2 does not launch any instances. Constraints: Between 1 and the quota for the specified instance type for your account for this Region. For more information, see Amazon EC2 instance type quotas. public let minCount: Int? /// Specifies whether detailed monitoring is enabled for the instance. public let monitoring: RunInstancesMonitoringEnabled? @@ -47980,7 +48105,7 @@ extension EC2 { /// The tags to apply to the resources that are created during instance launch. You can specify tags for the following resources only: Instances Volumes Spot Instance requests Network interfaces To tag a resource after it has been created, see CreateTags. @OptionalCustomCoding> public var tagSpecifications: [TagSpecification]? - /// The user data script to make available to the instance. For more information, see Run commands on your Linux instance at launch and Run commands on your Windows instance at launch. If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB. + /// The user data script to make available to the instance. For more information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User Guide. If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB. public let userData: String? public init(additionalInfo: String? = nil, blockDeviceMappings: [BlockDeviceMapping]? = nil, capacityReservationSpecification: CapacityReservationSpecification? = nil, clientToken: String? = RunInstancesRequest.idempotencyToken(), cpuOptions: CpuOptionsRequest? = nil, creditSpecification: CreditSpecificationRequest? = nil, disableApiStop: Bool? = nil, disableApiTermination: Bool? = nil, dryRun: Bool? = nil, ebsOptimized: Bool? = nil, elasticGpuSpecification: [ElasticGpuSpecification]? = nil, elasticInferenceAccelerators: [ElasticInferenceAccelerator]? = nil, enablePrimaryIpv6: Bool? = nil, enclaveOptions: EnclaveOptionsRequest? = nil, hibernationOptions: HibernationOptionsRequest? = nil, iamInstanceProfile: IamInstanceProfileSpecification? = nil, imageId: String? = nil, instanceInitiatedShutdownBehavior: ShutdownBehavior? = nil, instanceMarketOptions: InstanceMarketOptionsRequest? = nil, instanceType: InstanceType? = nil, ipv6AddressCount: Int? = nil, ipv6Addresses: [InstanceIpv6Address]? = nil, kernelId: String? = nil, keyName: String? = nil, launchTemplate: LaunchTemplateSpecification? = nil, licenseSpecifications: [LicenseConfigurationRequest]? = nil, maintenanceOptions: InstanceMaintenanceOptionsRequest? = nil, maxCount: Int? = nil, metadataOptions: InstanceMetadataOptionsRequest? = nil, minCount: Int? = nil, monitoring: RunInstancesMonitoringEnabled? = nil, networkInterfaces: [InstanceNetworkInterfaceSpecification]? = nil, placement: Placement? = nil, privateDnsNameOptions: PrivateDnsNameOptionsRequest? = nil, privateIpAddress: String? = nil, ramdiskId: String? = nil, securityGroupIds: [String]? = nil, securityGroups: [String]? = nil, subnetId: String? = nil, tagSpecifications: [TagSpecification]? = nil, userData: String? = nil) { @@ -49303,7 +49428,7 @@ extension EC2 { public let description: String? /// Indicates whether the snapshot is encrypted. public let encrypted: Bool? - /// The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key that was used to protect the volume encryption key for the parent volume. + /// The Amazon Resource Name (ARN) of the KMS key that was used to protect the volume encryption key for the parent volume. public let kmsKeyId: String? /// The ARN of the Outpost on which the snapshot is stored. For more information, see Amazon EBS local snapshots on Outposts in the Amazon EBS User Guide. public let outpostArn: String? @@ -49323,7 +49448,7 @@ extension EC2 { public let startTime: Date? /// The snapshot state. public let state: SnapshotState? - /// Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper Key Management Service (KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by DescribeSnapshots. + /// Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper KMS permissions are not obtained) this field displays error state details to help you diagnose why the error occurred. This parameter is only returned by DescribeSnapshots. public let stateMessage: String? /// The storage tier in which the snapshot is stored. standard indicates that the snapshot is stored in the standard snapshot storage tier and that it is ready for use. archive indicates that the snapshot is currently archived and that it must be restored before it can be used. public let storageTier: StorageTier? @@ -49852,7 +49977,7 @@ extension EC2 { public struct _LaunchTemplateConfigsEncoding: ArrayCoderProperties { public static let member = "item" } public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the Spot Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide. priceCapacityOptimized (recommended) Spot Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. Spot Fleet then requests Spot Instances from the lowest priced of these pools. capacityOptimized Spot Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacityOptimizedPrioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacityOptimizedPrioritized is supported only if your Spot Fleet uses a launch template. Note that if the OnDemandAllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity. diversified Spot Fleet requests instances from all of the Spot Instance pools that you specify. lowestPrice Spot Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, Spot Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates. Default: lowestPrice + /// The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the Spot Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide. priceCapacityOptimized (recommended) Spot Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. Spot Fleet then requests Spot Instances from the lowest priced of these pools. capacityOptimized Spot Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacityOptimizedPrioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacityOptimizedPrioritized is supported only if your Spot Fleet uses a launch template. Note that if the OnDemandAllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity. diversified Spot Fleet requests instances from all of the Spot Instance pools that you specify. lowestPrice (not recommended) We don't recommend the lowestPrice allocation strategy because it has the highest risk of interruption for your Spot Instances. Spot Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, Spot Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates. Default: lowestPrice public let allocationStrategy: AllocationStrategy? /// A unique, case-sensitive identifier that you provide to ensure the idempotency of your listings. This helps to avoid duplicate listings. For more information, see Ensuring Idempotency. public let clientToken: String? @@ -49880,7 +50005,7 @@ extension EC2 { public let onDemandAllocationStrategy: OnDemandAllocationStrategy? /// The number of On-Demand units fulfilled by this request compared to the set target On-Demand capacity. public let onDemandFulfilledCapacity: Double? - /// The maximum amount per hour for On-Demand Instances that you're willing to pay. You can use the onDemandMaxTotalPrice parameter, the spotMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The onDemandMaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for onDemandMaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. + /// The maximum amount per hour for On-Demand Instances that you're willing to pay. You can use the onDemandMaxTotalPrice parameter, the spotMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The onDemandMaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for onDemandMaxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide. public let onDemandMaxTotalPrice: String? /// The number of On-Demand units to request. You can choose to set the target capacity in terms of instances or a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later. public let onDemandTargetCapacity: Int? @@ -49888,7 +50013,7 @@ extension EC2 { public let replaceUnhealthyInstances: Bool? /// The strategies for managing your Spot Instances that are at an elevated risk of being interrupted. public let spotMaintenanceStrategies: SpotMaintenanceStrategies? - /// The maximum amount per hour for Spot Instances that you're willing to pay. You can use the spotMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The spotMaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for spotMaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. + /// The maximum amount per hour for Spot Instances that you're willing to pay. You can use the spotMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the maximum amount you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The spotMaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for spotMaxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide. public let spotMaxTotalPrice: String? /// The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter. public let spotPrice: String? @@ -50026,7 +50151,7 @@ extension EC2 { public let spotInstanceRequestId: String? /// The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter. public let spotPrice: String? - /// The state of the Spot Instance request. Spot request status information helps track your Spot Instance requests. For more information, see Spot request status in the Amazon EC2 User Guide for Linux Instances. + /// The state of the Spot Instance request. Spot request status information helps track your Spot Instance requests. For more information, see Spot request status in the Amazon EC2 User Guide. public let state: SpotInstanceState? /// The status code and status message describing the Spot Instance request. public let status: SpotInstanceStatus? @@ -50103,7 +50228,7 @@ extension EC2 { } public struct SpotInstanceStatus: AWSDecodableShape { - /// The status code. For a list of status codes, see Spot request status codes in the Amazon EC2 User Guide for Linux Instances. + /// The status code. For a list of status codes, see Spot request status codes in the Amazon EC2 User Guide. public let code: String? /// The description for the status code. public let message: String? @@ -50124,7 +50249,7 @@ extension EC2 { } public struct SpotMaintenanceStrategies: AWSEncodableShape & AWSDecodableShape { - /// The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted. For more information, see Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances. + /// The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an elevated risk of being interrupted. For more information, see Capacity rebalancing in the Amazon EC2 User Guide. public let capacityRebalance: SpotCapacityRebalance? public init(capacityRebalance: SpotCapacityRebalance? = nil) { @@ -50166,7 +50291,7 @@ extension EC2 { } public struct SpotOptions: AWSDecodableShape { - /// The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide. price-capacity-optimized (recommended) EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools. capacity-optimized EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized is supported only if your EC2 Fleet uses a launch template. Note that if the On-Demand AllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity. diversified EC2 Fleet requests instances from all of the Spot Instance pools that you specify. lowest-price EC2 Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, EC2 Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates. Default: lowest-price + /// The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide. price-capacity-optimized (recommended) EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools. capacity-optimized EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized is supported only if your EC2 Fleet uses a launch template. Note that if the On-Demand AllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity. diversified EC2 Fleet requests instances from all of the Spot Instance pools that you specify. lowest-price (not recommended) We don't recommend the lowest-price allocation strategy because it has the highest risk of interruption for your Spot Instances. EC2 Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, EC2 Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates. Default: lowest-price public let allocationStrategy: SpotAllocationStrategy? /// The behavior when a Spot Instance is interrupted. Default: terminate public let instanceInterruptionBehavior: SpotInstanceInterruptionBehavior? @@ -50174,9 +50299,9 @@ extension EC2 { public let instancePoolsToUseCount: Int? /// The strategies for managing your workloads on your Spot Instances that will be interrupted. Currently only the capacity rebalance strategy is available. public let maintenanceStrategies: FleetSpotMaintenanceStrategies? - /// The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The maxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. + /// The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The maxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide. public let maxTotalPrice: String? - /// The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType + /// The minimum target capacity for Spot Instances in the fleet. If this minimum capacity isn't reached, no instances are launched. Constraints: Maximum value of 1000. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType public let minTargetCapacity: Int? /// Indicates that the fleet launches all Spot Instances into a single Availability Zone. Supported only for fleets of type instant. public let singleAvailabilityZone: Bool? @@ -50207,7 +50332,7 @@ extension EC2 { } public struct SpotOptionsRequest: AWSEncodableShape { - /// The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide. price-capacity-optimized (recommended) EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools. capacity-optimized EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized is supported only if your EC2 Fleet uses a launch template. Note that if the On-Demand AllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity. diversified EC2 Fleet requests instances from all of the Spot Instance pools that you specify. lowest-price EC2 Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, EC2 Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates. Default: lowest-price + /// The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the Amazon EC2 User Guide. price-capacity-optimized (recommended) EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools. capacity-optimized EC2 Fleet identifies the pools with the highest capacity availability for the number of instances that are launching. This means that we will request Spot Instances from the pools that we believe have the lowest chance of interruption in the near term. To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized. Set a priority for each instance type by using the Priority parameter for LaunchTemplateOverrides. You can assign the same priority to different LaunchTemplateOverrides. EC2 implements the priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized is supported only if your EC2 Fleet uses a launch template. Note that if the On-Demand AllocationStrategy is set to prioritized, the same priority is applied when fulfilling On-Demand capacity. diversified EC2 Fleet requests instances from all of the Spot Instance pools that you specify. lowest-price (not recommended) We don't recommend the lowest-price allocation strategy because it has the highest risk of interruption for your Spot Instances. EC2 Fleet requests instances from the lowest priced Spot Instance pool that has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances come from the next lowest priced pool that has available capacity. If a pool runs out of capacity before fulfilling your desired capacity, EC2 Fleet will continue to fulfill your request by drawing from the next lowest priced pool. To ensure that your desired capacity is met, you might receive Spot Instances from several pools. Because this strategy only considers instance price and not capacity availability, it might lead to high interruption rates. Default: lowest-price public let allocationStrategy: SpotAllocationStrategy? /// The behavior when a Spot Instance is interrupted. Default: terminate public let instanceInterruptionBehavior: SpotInstanceInterruptionBehavior? @@ -50215,9 +50340,9 @@ extension EC2 { public let instancePoolsToUseCount: Int? /// The strategies for managing your Spot Instances that are at an elevated risk of being interrupted. public let maintenanceStrategies: FleetSpotMaintenanceStrategiesRequest? - /// The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The MaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User Guide. + /// The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter. If your fleet includes T instances that are configured as unlimited, and if their average CPU usage exceeds the baseline utilization, you will incur a charge for surplus credits. The MaxTotalPrice does not account for surplus credits, and, if you use surplus credits, your final cost might be higher than what you specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide. public let maxTotalPrice: String? - /// The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType + /// The minimum target capacity for Spot Instances in the fleet. If this minimum capacity isn't reached, no instances are launched. Constraints: Maximum value of 1000. Supported only for fleets of type instant. At least one of the following must be specified: SingleAvailabilityZone | SingleInstanceType public let minTargetCapacity: Int? /// Indicates that the fleet launches all Spot Instances into a single Availability Zone. Supported only for fleets of type instant. public let singleAvailabilityZone: Bool? @@ -51417,6 +51542,8 @@ extension EC2 { } public struct TrafficMirrorFilterRule: AWSDecodableShape { + public struct _TagsEncoding: ArrayCoderProperties { public static let member = "item" } + /// The description of the Traffic Mirror rule. public let description: String? /// The destination CIDR block assigned to the Traffic Mirror rule. @@ -51433,6 +51560,9 @@ extension EC2 { public let sourceCidrBlock: String? /// The source port range assigned to the Traffic Mirror rule. public let sourcePortRange: TrafficMirrorPortRange? + /// Tags on Traffic Mirroring filter rules. + @OptionalCustomCoding> + public var tags: [Tag]? /// The traffic direction assigned to the Traffic Mirror rule. public let trafficDirection: TrafficDirection? /// The ID of the Traffic Mirror filter that the rule is associated with. @@ -51440,7 +51570,7 @@ extension EC2 { /// The ID of the Traffic Mirror rule. public let trafficMirrorFilterRuleId: String? - public init(description: String? = nil, destinationCidrBlock: String? = nil, destinationPortRange: TrafficMirrorPortRange? = nil, protocol: Int? = nil, ruleAction: TrafficMirrorRuleAction? = nil, ruleNumber: Int? = nil, sourceCidrBlock: String? = nil, sourcePortRange: TrafficMirrorPortRange? = nil, trafficDirection: TrafficDirection? = nil, trafficMirrorFilterId: String? = nil, trafficMirrorFilterRuleId: String? = nil) { + public init(description: String? = nil, destinationCidrBlock: String? = nil, destinationPortRange: TrafficMirrorPortRange? = nil, protocol: Int? = nil, ruleAction: TrafficMirrorRuleAction? = nil, ruleNumber: Int? = nil, sourceCidrBlock: String? = nil, sourcePortRange: TrafficMirrorPortRange? = nil, tags: [Tag]? = nil, trafficDirection: TrafficDirection? = nil, trafficMirrorFilterId: String? = nil, trafficMirrorFilterRuleId: String? = nil) { self.description = description self.destinationCidrBlock = destinationCidrBlock self.destinationPortRange = destinationPortRange @@ -51449,6 +51579,7 @@ extension EC2 { self.ruleNumber = ruleNumber self.sourceCidrBlock = sourceCidrBlock self.sourcePortRange = sourcePortRange + self.tags = tags self.trafficDirection = trafficDirection self.trafficMirrorFilterId = trafficMirrorFilterId self.trafficMirrorFilterRuleId = trafficMirrorFilterRuleId @@ -51463,6 +51594,7 @@ extension EC2 { case ruleNumber = "ruleNumber" case sourceCidrBlock = "sourceCidrBlock" case sourcePortRange = "sourcePortRange" + case tags = "tagSet" case trafficDirection = "trafficDirection" case trafficMirrorFilterId = "trafficMirrorFilterId" case trafficMirrorFilterRuleId = "trafficMirrorFilterRuleId" @@ -54250,7 +54382,7 @@ extension EC2 { public let fastRestored: Bool? /// The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. public let iops: Int? - /// The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key that was used to protect the volume encryption key for the volume. + /// The Amazon Resource Name (ARN) of the KMS key that was used to protect the volume encryption key for the volume. public let kmsKeyId: String? /// Indicates whether Amazon EBS Multi-Attach is enabled. public let multiAttachEnabled: Bool? diff --git a/Sources/Soto/Services/ECR/ECR_shapes.swift b/Sources/Soto/Services/ECR/ECR_shapes.swift index f84a3ceecb..02d417340c 100644 --- a/Sources/Soto/Services/ECR/ECR_shapes.swift +++ b/Sources/Soto/Services/ECR/ECR_shapes.swift @@ -146,6 +146,7 @@ extension ECR { case dockerHub = "docker-hub" case ecrPublic = "ecr-public" case gitHubContainerRegistry = "github-container-registry" + case gitLabContainerRegistry = "gitlab-container-registry" case k8s = "k8s" case quay = "quay" public var description: String { return self.rawValue } @@ -500,7 +501,7 @@ extension ECR { public let registryId: String? /// The name of the upstream registry. public let upstreamRegistry: UpstreamRegistry? - /// The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry. Amazon ECR Public (ecr-public) - public.ecr.aws Docker Hub (docker-hub) - registry-1.docker.io Quay (quay) - quay.io Kubernetes (k8s) - registry.k8s.io GitHub Container Registry (github-container-registry) - ghcr.io Microsoft Azure Container Registry (azure-container-registry) - .azurecr.io + /// The registry URL of the upstream public registry to use as the source for the pull through cache rule. The following is the syntax to use for each supported upstream registry. Amazon ECR Public (ecr-public) - public.ecr.aws Docker Hub (docker-hub) - registry-1.docker.io Quay (quay) - quay.io Kubernetes (k8s) - registry.k8s.io GitHub Container Registry (github-container-registry) - ghcr.io Microsoft Azure Container Registry (azure-container-registry) - .azurecr.io GitLab Container Registry (gitlab-container-registry) - registry.gitlab.com public let upstreamRegistryUrl: String public init(credentialArn: String? = nil, ecrRepositoryPrefix: String, registryId: String? = nil, upstreamRegistry: UpstreamRegistry? = nil, upstreamRegistryUrl: String) { diff --git a/Sources/Soto/Services/ECS/ECS_api.swift b/Sources/Soto/Services/ECS/ECS_api.swift index 9bee5dade7..5e29920a41 100644 --- a/Sources/Soto/Services/ECS/ECS_api.swift +++ b/Sources/Soto/Services/ECS/ECS_api.swift @@ -863,7 +863,9 @@ public struct ECS: AWSService { /// SIGTERM value and a default 30-second timeout, after which the /// SIGKILL value is sent and the containers are forcibly stopped. If the /// container handles the SIGTERM value gracefully and exits within 30 seconds - /// from receiving it, no SIGKILL value is sent. The default 30-second timeout can be configured on the Amazon ECS container agent with + /// from receiving it, no SIGKILL value is sent. For Windows containers, POSIX signals do not work and runtime stops the container by sending + /// a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown + /// of (Windows) container #25982 on GitHub. The default 30-second timeout can be configured on the Amazon ECS container agent with /// the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see /// Amazon ECS Container Agent Configuration in the /// Amazon Elastic Container Service Developer Guide. diff --git a/Sources/Soto/Services/ECS/ECS_shapes.swift b/Sources/Soto/Services/ECS/ECS_shapes.swift index d4adbe3ea8..4f351dddca 100644 --- a/Sources/Soto/Services/ECS/ECS_shapes.swift +++ b/Sources/Soto/Services/ECS/ECS_shapes.swift @@ -831,13 +831,17 @@ extension ECS { public struct ClusterConfiguration: AWSEncodableShape & AWSDecodableShape { /// The details of the execute command configuration. public let executeCommandConfiguration: ExecuteCommandConfiguration? + /// The details of the managed storage configuration. + public let managedStorageConfiguration: ManagedStorageConfiguration? - public init(executeCommandConfiguration: ExecuteCommandConfiguration? = nil) { + public init(executeCommandConfiguration: ExecuteCommandConfiguration? = nil, managedStorageConfiguration: ManagedStorageConfiguration? = nil) { self.executeCommandConfiguration = executeCommandConfiguration + self.managedStorageConfiguration = managedStorageConfiguration } private enum CodingKeys: String, CodingKey { case executeCommandConfiguration = "executeCommandConfiguration" + case managedStorageConfiguration = "managedStorageConfiguration" } } @@ -2476,6 +2480,8 @@ extension ECS { /// stopped. Once a service deployment has one or more successfully running tasks, the failed /// task count resets to zero and stops being evaluated. public let failedTasks: Int? + /// The Fargate ephemeral storage settings for the deployment. + public let fargateEphemeralStorage: DeploymentEphemeralStorage? /// The ID of the deployment. public let id: String? /// The launch type the tasks in the service are using. For more information, see Amazon ECS @@ -2535,11 +2541,12 @@ extension ECS { /// must match the name from the task definition. public let volumeConfigurations: [ServiceVolumeConfiguration]? - public init(capacityProviderStrategy: [CapacityProviderStrategyItem]? = nil, createdAt: Date? = nil, desiredCount: Int? = nil, failedTasks: Int? = nil, id: String? = nil, launchType: LaunchType? = nil, networkConfiguration: NetworkConfiguration? = nil, pendingCount: Int? = nil, platformFamily: String? = nil, platformVersion: String? = nil, rolloutState: DeploymentRolloutState? = nil, rolloutStateReason: String? = nil, runningCount: Int? = nil, serviceConnectConfiguration: ServiceConnectConfiguration? = nil, serviceConnectResources: [ServiceConnectServiceResource]? = nil, status: String? = nil, taskDefinition: String? = nil, updatedAt: Date? = nil, volumeConfigurations: [ServiceVolumeConfiguration]? = nil) { + public init(capacityProviderStrategy: [CapacityProviderStrategyItem]? = nil, createdAt: Date? = nil, desiredCount: Int? = nil, failedTasks: Int? = nil, fargateEphemeralStorage: DeploymentEphemeralStorage? = nil, id: String? = nil, launchType: LaunchType? = nil, networkConfiguration: NetworkConfiguration? = nil, pendingCount: Int? = nil, platformFamily: String? = nil, platformVersion: String? = nil, rolloutState: DeploymentRolloutState? = nil, rolloutStateReason: String? = nil, runningCount: Int? = nil, serviceConnectConfiguration: ServiceConnectConfiguration? = nil, serviceConnectResources: [ServiceConnectServiceResource]? = nil, status: String? = nil, taskDefinition: String? = nil, updatedAt: Date? = nil, volumeConfigurations: [ServiceVolumeConfiguration]? = nil) { self.capacityProviderStrategy = capacityProviderStrategy self.createdAt = createdAt self.desiredCount = desiredCount self.failedTasks = failedTasks + self.fargateEphemeralStorage = fargateEphemeralStorage self.id = id self.launchType = launchType self.networkConfiguration = networkConfiguration @@ -2562,6 +2569,7 @@ extension ECS { case createdAt = "createdAt" case desiredCount = "desiredCount" case failedTasks = "failedTasks" + case fargateEphemeralStorage = "fargateEphemeralStorage" case id = "id" case launchType = "launchType" case networkConfiguration = "networkConfiguration" @@ -2734,6 +2742,19 @@ extension ECS { } } + public struct DeploymentEphemeralStorage: AWSDecodableShape { + /// Specify an Key Management Service key ID to encrypt the ephemeral storage for deployment. + public let kmsKeyId: String? + + public init(kmsKeyId: String? = nil) { + self.kmsKeyId = kmsKeyId + } + + private enum CodingKeys: String, CodingKey { + case kmsKeyId = "kmsKeyId" + } + } + public struct DeregisterContainerInstanceRequest: AWSEncodableShape { /// The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instance to /// deregister. If you do not specify a cluster, the default cluster is assumed. @@ -4727,6 +4748,23 @@ extension ECS { } } + public struct ManagedStorageConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Specify the Key Management Service key ID for the Fargate ephemeral storage. + public let fargateEphemeralStorageKmsKeyId: String? + /// Specify a Key Management Service key ID to encrypt the managed storage. + public let kmsKeyId: String? + + public init(fargateEphemeralStorageKmsKeyId: String? = nil, kmsKeyId: String? = nil) { + self.fargateEphemeralStorageKmsKeyId = fargateEphemeralStorageKmsKeyId + self.kmsKeyId = kmsKeyId + } + + private enum CodingKeys: String, CodingKey { + case fargateEphemeralStorageKmsKeyId = "fargateEphemeralStorageKmsKeyId" + case kmsKeyId = "kmsKeyId" + } + } + public struct MountPoint: AWSEncodableShape & AWSDecodableShape { /// The path on the container to mount the host volume at. public let containerPath: String? @@ -5448,8 +5486,7 @@ extension ECS { /// response. public let requiresCompatibilities: [Compatibility]? /// The operating system that your tasks definitions run on. A platform family is - /// specified only for tasks using the Fargate launch type. When you specify a task definition in a service, this value must match the - /// runtimePlatform value of the service. + /// specified only for tasks using the Fargate launch type. public let runtimePlatform: RuntimePlatform? /// The metadata that you apply to the task definition to help you categorize and organize /// them. Each tag consists of a key and an optional value. You define both of them. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8 Maximum value length - 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. @@ -5585,13 +5622,12 @@ extension ECS { } public struct ResourceRequirement: AWSEncodableShape & AWSDecodableShape { - /// The type of resource to assign to a container. The supported values are - /// GPU or InferenceAccelerator. + /// The type of resource to assign to a container. public let type: ResourceType - /// The value for the specified resource type. If the GPU type is used, the value is the number of physical - /// GPUs the Amazon ECS container agent reserves for the container. The number - /// of GPUs that's reserved for all containers in a task can't exceed the number of - /// available GPUs on the container instance that the task is launched on. If the InferenceAccelerator type is used, the value matches + /// The value for the specified resource type. When the type is GPU, the value is the number of physical GPUs the + /// Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for + /// all containers in a task can't exceed the number of available GPUs on the container + /// instance that the task is launched on. When the type is InferenceAccelerator, the value matches /// the deviceName for an InferenceAccelerator specified in a task definition. public let value: String @@ -6854,6 +6890,8 @@ extension ECS { public let ephemeralStorage: EphemeralStorage? /// The Unix timestamp for the time when the task execution stopped. public let executionStoppedAt: Date? + /// The Fargate ephemeral storage settings for the task. + public let fargateEphemeralStorage: TaskEphemeralStorage? /// The name of the task group that's associated with the task. public let group: String? /// The health status for the task. It's determined by the health of the essential @@ -6935,7 +6973,7 @@ extension ECS { /// current. public let version: Int64? - public init(attachments: [Attachment]? = nil, attributes: [Attribute]? = nil, availabilityZone: String? = nil, capacityProviderName: String? = nil, clusterArn: String? = nil, connectivity: Connectivity? = nil, connectivityAt: Date? = nil, containerInstanceArn: String? = nil, containers: [Container]? = nil, cpu: String? = nil, createdAt: Date? = nil, desiredStatus: String? = nil, enableExecuteCommand: Bool? = nil, ephemeralStorage: EphemeralStorage? = nil, executionStoppedAt: Date? = nil, group: String? = nil, healthStatus: HealthStatus? = nil, inferenceAccelerators: [InferenceAccelerator]? = nil, lastStatus: String? = nil, launchType: LaunchType? = nil, memory: String? = nil, overrides: TaskOverride? = nil, platformFamily: String? = nil, platformVersion: String? = nil, pullStartedAt: Date? = nil, pullStoppedAt: Date? = nil, startedAt: Date? = nil, startedBy: String? = nil, stopCode: TaskStopCode? = nil, stoppedAt: Date? = nil, stoppedReason: String? = nil, stoppingAt: Date? = nil, tags: [Tag]? = nil, taskArn: String? = nil, taskDefinitionArn: String? = nil, version: Int64? = nil) { + public init(attachments: [Attachment]? = nil, attributes: [Attribute]? = nil, availabilityZone: String? = nil, capacityProviderName: String? = nil, clusterArn: String? = nil, connectivity: Connectivity? = nil, connectivityAt: Date? = nil, containerInstanceArn: String? = nil, containers: [Container]? = nil, cpu: String? = nil, createdAt: Date? = nil, desiredStatus: String? = nil, enableExecuteCommand: Bool? = nil, ephemeralStorage: EphemeralStorage? = nil, executionStoppedAt: Date? = nil, fargateEphemeralStorage: TaskEphemeralStorage? = nil, group: String? = nil, healthStatus: HealthStatus? = nil, inferenceAccelerators: [InferenceAccelerator]? = nil, lastStatus: String? = nil, launchType: LaunchType? = nil, memory: String? = nil, overrides: TaskOverride? = nil, platformFamily: String? = nil, platformVersion: String? = nil, pullStartedAt: Date? = nil, pullStoppedAt: Date? = nil, startedAt: Date? = nil, startedBy: String? = nil, stopCode: TaskStopCode? = nil, stoppedAt: Date? = nil, stoppedReason: String? = nil, stoppingAt: Date? = nil, tags: [Tag]? = nil, taskArn: String? = nil, taskDefinitionArn: String? = nil, version: Int64? = nil) { self.attachments = attachments self.attributes = attributes self.availabilityZone = availabilityZone @@ -6951,6 +6989,7 @@ extension ECS { self.enableExecuteCommand = enableExecuteCommand self.ephemeralStorage = ephemeralStorage self.executionStoppedAt = executionStoppedAt + self.fargateEphemeralStorage = fargateEphemeralStorage self.group = group self.healthStatus = healthStatus self.inferenceAccelerators = inferenceAccelerators @@ -6990,6 +7029,7 @@ extension ECS { case enableExecuteCommand = "enableExecuteCommand" case ephemeralStorage = "ephemeralStorage" case executionStoppedAt = "executionStoppedAt" + case fargateEphemeralStorage = "fargateEphemeralStorage" case group = "group" case healthStatus = "healthStatus" case inferenceAccelerators = "inferenceAccelerators" @@ -7181,6 +7221,25 @@ extension ECS { } } + public struct TaskEphemeralStorage: AWSDecodableShape { + /// Specify an Key Management Service key ID to encrypt the ephemeral storage for the task. + public let kmsKeyId: String? + /// The total amount, in GiB, of the ephemeral storage to set for the task. The minimum + /// supported value is 20 GiB and the maximum supported value is
 200 + /// GiB. + public let sizeInGiB: Int? + + public init(kmsKeyId: String? = nil, sizeInGiB: Int? = nil) { + self.kmsKeyId = kmsKeyId + self.sizeInGiB = sizeInGiB + } + + private enum CodingKeys: String, CodingKey { + case kmsKeyId = "kmsKeyId" + case sizeInGiB = "sizeInGiB" + } + } + public struct TaskManagedEBSVolumeConfiguration: AWSEncodableShape { /// Indicates whether the volume should be encrypted. If no value is specified, encryption /// is turned on by default. This parameter maps 1:1 with the Encrypted @@ -7355,6 +7414,8 @@ extension ECS { /// discovery registry, the externalId parameter contains the /// ECS_TASK_SET_EXTERNAL_ID Cloud Map attribute. public let externalId: String? + /// The Fargate ephemeral storage settings for the task set. + public let fargateEphemeralStorage: DeploymentEphemeralStorage? /// The ID of the task set. public let id: String? /// The launch type the tasks in the task set are using. For more information, see Amazon ECS @@ -7417,12 +7478,13 @@ extension ECS { /// The Unix timestamp for the time when the task set was last updated. public let updatedAt: Date? - public init(capacityProviderStrategy: [CapacityProviderStrategyItem]? = nil, clusterArn: String? = nil, computedDesiredCount: Int? = nil, createdAt: Date? = nil, externalId: String? = nil, id: String? = nil, launchType: LaunchType? = nil, loadBalancers: [LoadBalancer]? = nil, networkConfiguration: NetworkConfiguration? = nil, pendingCount: Int? = nil, platformFamily: String? = nil, platformVersion: String? = nil, runningCount: Int? = nil, scale: Scale? = nil, serviceArn: String? = nil, serviceRegistries: [ServiceRegistry]? = nil, stabilityStatus: StabilityStatus? = nil, stabilityStatusAt: Date? = nil, startedBy: String? = nil, status: String? = nil, tags: [Tag]? = nil, taskDefinition: String? = nil, taskSetArn: String? = nil, updatedAt: Date? = nil) { + public init(capacityProviderStrategy: [CapacityProviderStrategyItem]? = nil, clusterArn: String? = nil, computedDesiredCount: Int? = nil, createdAt: Date? = nil, externalId: String? = nil, fargateEphemeralStorage: DeploymentEphemeralStorage? = nil, id: String? = nil, launchType: LaunchType? = nil, loadBalancers: [LoadBalancer]? = nil, networkConfiguration: NetworkConfiguration? = nil, pendingCount: Int? = nil, platformFamily: String? = nil, platformVersion: String? = nil, runningCount: Int? = nil, scale: Scale? = nil, serviceArn: String? = nil, serviceRegistries: [ServiceRegistry]? = nil, stabilityStatus: StabilityStatus? = nil, stabilityStatusAt: Date? = nil, startedBy: String? = nil, status: String? = nil, tags: [Tag]? = nil, taskDefinition: String? = nil, taskSetArn: String? = nil, updatedAt: Date? = nil) { self.capacityProviderStrategy = capacityProviderStrategy self.clusterArn = clusterArn self.computedDesiredCount = computedDesiredCount self.createdAt = createdAt self.externalId = externalId + self.fargateEphemeralStorage = fargateEphemeralStorage self.id = id self.launchType = launchType self.loadBalancers = loadBalancers @@ -7450,6 +7512,7 @@ extension ECS { case computedDesiredCount = "computedDesiredCount" case createdAt = "createdAt" case externalId = "externalId" + case fargateEphemeralStorage = "fargateEphemeralStorage" case id = "id" case launchType = "launchType" case loadBalancers = "loadBalancers" diff --git a/Sources/Soto/Services/EKS/EKS_api.swift b/Sources/Soto/Services/EKS/EKS_api.swift index 1e7d3f0034..3c263c1921 100644 --- a/Sources/Soto/Services/EKS/EKS_api.swift +++ b/Sources/Soto/Services/EKS/EKS_api.swift @@ -175,7 +175,7 @@ public struct EKS: AWSService { ) } - /// Creates an Amazon EKS control plane. The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single tenant and unique. It runs on its own set of Amazon EC2 instances. The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support kubectl exec, logs, and proxy data flows). Amazon EKS nodes run in your Amazon Web Services account and connect to your cluster's control plane over the Kubernetes API server endpoint and a certificate file that is created for your cluster. You can use the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide . You can use the logging parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch nodes into your cluster. For more information, see Managing Cluster Authentication and Launching Amazon EKS nodes in the Amazon EKS User Guide. + /// Creates an Amazon EKS control plane. The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single tenant and unique. It runs on its own set of Amazon EC2 instances. The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the nodes (for example, to support kubectl exec, logs, and proxy data flows). Amazon EKS nodes run in your Amazon Web Services account and connect to your cluster's control plane over the Kubernetes API server endpoint and a certificate file that is created for your cluster. You can use the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide . You can use the logging parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch nodes into your cluster. For more information, see Allowing users to access your cluster and Launching Amazon EKS nodes in the Amazon EKS User Guide. @Sendable public func createCluster(_ input: CreateClusterRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateClusterResponse { return try await self.client.execute( @@ -214,7 +214,7 @@ public struct EKS: AWSService { ) } - /// Creates a managed node group for an Amazon EKS cluster. You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using a launch template. For more information about using launch templates, see Launch template support. An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by Amazon Web Services for an Amazon EKS cluster. For more information, see Managed node groups in the Amazon EKS User Guide. Windows AMI types are only supported for commercial Amazon Web Services Regions that support Windows on Amazon EKS. + /// Creates a managed node group for an Amazon EKS cluster. You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using a launch template. For more information about using launch templates, see Customizing managed nodes with launch templates. An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by Amazon Web Services for an Amazon EKS cluster. For more information, see Managed node groups in the Amazon EKS User Guide. Windows AMI types are only supported for commercial Amazon Web Services Regions that support Windows on Amazon EKS. @Sendable public func createNodegroup(_ input: CreateNodegroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateNodegroupResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/EKS/EKS_shapes.swift b/Sources/Soto/Services/EKS/EKS_shapes.swift index 0acddd78e0..5f79e8c004 100644 --- a/Sources/Soto/Services/EKS/EKS_shapes.swift +++ b/Sources/Soto/Services/EKS/EKS_shapes.swift @@ -52,6 +52,8 @@ extension EKS { public enum AddonIssueCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case accessDenied = "AccessDenied" + case addonPermissionFailure = "AddonPermissionFailure" + case addonSubscriptionNeeded = "AddonSubscriptionNeeded" case admissionRequestDenied = "AdmissionRequestDenied" case clusterUnreachable = "ClusterUnreachable" case configurationConflict = "ConfigurationConflict" @@ -186,6 +188,14 @@ extension EKS { public var description: String { return self.rawValue } } + public enum FargateProfileIssueCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case accessDenied = "AccessDenied" + case clusterUnreachable = "ClusterUnreachable" + case internalFailure = "InternalFailure" + case podExecutionRoleAlreadyInUse = "PodExecutionRoleAlreadyInUse" + public var description: String { return self.rawValue } + } + public enum FargateProfileStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "ACTIVE" case createFailed = "CREATE_FAILED" @@ -301,6 +311,7 @@ extension EKS { case maxUnavailablePercentage = "MaxUnavailablePercentage" case minSize = "MinSize" case platformVersion = "PlatformVersion" + case podIdentityAssociations = "PodIdentityAssociations" case publicAccessCidrs = "PublicAccessCidrs" case releaseVersion = "ReleaseVersion" case resolveConflicts = "ResolveConflicts" @@ -454,6 +465,8 @@ extension EKS { public let modifiedAt: Date? /// The owner of the add-on. public let owner: String? + /// An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide. + public let podIdentityAssociations: [String]? /// The publisher of the add-on. public let publisher: String? /// The Amazon Resource Name (ARN) of the IAM role that's bound to the Kubernetes ServiceAccount object that the add-on uses. @@ -463,7 +476,7 @@ extension EKS { /// Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. public let tags: [String: String]? - public init(addonArn: String? = nil, addonName: String? = nil, addonVersion: String? = nil, clusterName: String? = nil, configurationValues: String? = nil, createdAt: Date? = nil, health: AddonHealth? = nil, marketplaceInformation: MarketplaceInformation? = nil, modifiedAt: Date? = nil, owner: String? = nil, publisher: String? = nil, serviceAccountRoleArn: String? = nil, status: AddonStatus? = nil, tags: [String: String]? = nil) { + public init(addonArn: String? = nil, addonName: String? = nil, addonVersion: String? = nil, clusterName: String? = nil, configurationValues: String? = nil, createdAt: Date? = nil, health: AddonHealth? = nil, marketplaceInformation: MarketplaceInformation? = nil, modifiedAt: Date? = nil, owner: String? = nil, podIdentityAssociations: [String]? = nil, publisher: String? = nil, serviceAccountRoleArn: String? = nil, status: AddonStatus? = nil, tags: [String: String]? = nil) { self.addonArn = addonArn self.addonName = addonName self.addonVersion = addonVersion @@ -474,6 +487,7 @@ extension EKS { self.marketplaceInformation = marketplaceInformation self.modifiedAt = modifiedAt self.owner = owner + self.podIdentityAssociations = podIdentityAssociations self.publisher = publisher self.serviceAccountRoleArn = serviceAccountRoleArn self.status = status @@ -491,6 +505,7 @@ extension EKS { case marketplaceInformation = "marketplaceInformation" case modifiedAt = "modifiedAt" case owner = "owner" + case podIdentityAssociations = "podIdentityAssociations" case publisher = "publisher" case serviceAccountRoleArn = "serviceAccountRoleArn" case status = "status" @@ -565,6 +580,40 @@ extension EKS { } } + public struct AddonPodIdentityAssociations: AWSEncodableShape { + /// The ARN of an IAM Role. + public let roleArn: String + /// The name of a Kubernetes Service Account. + public let serviceAccount: String + + public init(roleArn: String, serviceAccount: String) { + self.roleArn = roleArn + self.serviceAccount = serviceAccount + } + + private enum CodingKeys: String, CodingKey { + case roleArn = "roleArn" + case serviceAccount = "serviceAccount" + } + } + + public struct AddonPodIdentityConfiguration: AWSDecodableShape { + /// A suggested IAM Policy for the addon. + public let recommendedManagedPolicies: [String]? + /// The Kubernetes Service Account name used by the addon. + public let serviceAccount: String? + + public init(recommendedManagedPolicies: [String]? = nil, serviceAccount: String? = nil) { + self.recommendedManagedPolicies = recommendedManagedPolicies + self.serviceAccount = serviceAccount + } + + private enum CodingKeys: String, CodingKey { + case recommendedManagedPolicies = "recommendedManagedPolicies" + case serviceAccount = "serviceAccount" + } + } + public struct AddonVersionInfo: AWSDecodableShape { /// The version of the add-on. public let addonVersion: String? @@ -574,12 +623,15 @@ extension EKS { public let compatibilities: [Compatibility]? /// Whether the add-on requires configuration. public let requiresConfiguration: Bool? + /// Indicates if the Addon requires IAM Permissions to operate, such as networking permissions. + public let requiresIamPermissions: Bool? - public init(addonVersion: String? = nil, architecture: [String]? = nil, compatibilities: [Compatibility]? = nil, requiresConfiguration: Bool? = nil) { + public init(addonVersion: String? = nil, architecture: [String]? = nil, compatibilities: [Compatibility]? = nil, requiresConfiguration: Bool? = nil, requiresIamPermissions: Bool? = nil) { self.addonVersion = addonVersion self.architecture = architecture self.compatibilities = compatibilities self.requiresConfiguration = requiresConfiguration + self.requiresIamPermissions = requiresIamPermissions } private enum CodingKeys: String, CodingKey { @@ -587,6 +639,7 @@ extension EKS { case architecture = "architecture" case compatibilities = "compatibilities" case requiresConfiguration = "requiresConfiguration" + case requiresIamPermissions = "requiresIamPermissions" } } @@ -839,7 +892,7 @@ extension EKS { public let encryptionConfig: [EncryptionConfig]? /// The endpoint for your Kubernetes API server. public let endpoint: String? - /// An object representing the health of your local Amazon EKS cluster on an Amazon Web Services Outpost. This object isn't available for clusters on the Amazon Web Services cloud. + /// An object representing the health of your Amazon EKS cluster. public let health: ClusterHealth? /// The ID of your local Amazon EKS cluster on an Amazon Web Services Outpost. This property isn't available for an Amazon EKS cluster on the Amazon Web Services cloud. public let id: String? @@ -916,7 +969,7 @@ extension EKS { } public struct ClusterHealth: AWSDecodableShape { - /// An object representing the health issues of your local Amazon EKS cluster on an Amazon Web Services Outpost. + /// An object representing the health issues of your Amazon EKS cluster. public let issues: [ClusterIssue]? public init(issues: [ClusterIssue]? = nil) { @@ -1142,6 +1195,8 @@ extension EKS { public let clusterName: String /// The set of configuration values for the add-on that's created. The values that you provide are validated against the schema returned by DescribeAddonConfiguration. public let configurationValues: String? + /// An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide. + public let podIdentityAssociations: [AddonPodIdentityAssociations]? /// How to resolve field value conflicts for an Amazon EKS add-on. Conflicts are handled based on the value you choose: None – If the self-managed version of the add-on is installed on your cluster, Amazon EKS doesn't change the value. Creation of the add-on might fail. Overwrite – If the self-managed version of the add-on is installed on your cluster and the Amazon EKS default value is different than the existing value, Amazon EKS changes the value to the Amazon EKS default value. Preserve – This is similar to the NONE option. If the self-managed version of the add-on is installed on your cluster Amazon EKS doesn't change the add-on resource properties. Creation of the add-on might fail if conflicts are detected. This option works differently during the update operation. For more information, see UpdateAddon. If you don't currently have the self-managed version of the add-on installed on your cluster, the Amazon EKS add-on is installed. Amazon EKS sets all values to default values, regardless of the option that you specify. public let resolveConflicts: ResolveConflicts? /// The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the permissions assigned to the node IAM role. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide. To specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for your cluster. For more information, see Enabling IAM roles for service accounts on your cluster in the Amazon EKS User Guide. @@ -1149,12 +1204,13 @@ extension EKS { /// Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. public let tags: [String: String]? - public init(addonName: String, addonVersion: String? = nil, clientRequestToken: String? = CreateAddonRequest.idempotencyToken(), clusterName: String, configurationValues: String? = nil, resolveConflicts: ResolveConflicts? = nil, serviceAccountRoleArn: String? = nil, tags: [String: String]? = nil) { + public init(addonName: String, addonVersion: String? = nil, clientRequestToken: String? = CreateAddonRequest.idempotencyToken(), clusterName: String, configurationValues: String? = nil, podIdentityAssociations: [AddonPodIdentityAssociations]? = nil, resolveConflicts: ResolveConflicts? = nil, serviceAccountRoleArn: String? = nil, tags: [String: String]? = nil) { self.addonName = addonName self.addonVersion = addonVersion self.clientRequestToken = clientRequestToken self.clusterName = clusterName self.configurationValues = configurationValues + self.podIdentityAssociations = podIdentityAssociations self.resolveConflicts = resolveConflicts self.serviceAccountRoleArn = serviceAccountRoleArn self.tags = tags @@ -1168,6 +1224,7 @@ extension EKS { try container.encodeIfPresent(self.clientRequestToken, forKey: .clientRequestToken) request.encodePath(self.clusterName, key: "clusterName") try container.encodeIfPresent(self.configurationValues, forKey: .configurationValues) + try container.encodeIfPresent(self.podIdentityAssociations, forKey: .podIdentityAssociations) try container.encodeIfPresent(self.resolveConflicts, forKey: .resolveConflicts) try container.encodeIfPresent(self.serviceAccountRoleArn, forKey: .serviceAccountRoleArn) try container.encodeIfPresent(self.tags, forKey: .tags) @@ -1193,6 +1250,7 @@ extension EKS { case addonVersion = "addonVersion" case clientRequestToken = "clientRequestToken" case configurationValues = "configurationValues" + case podIdentityAssociations = "podIdentityAssociations" case resolveConflicts = "resolveConflicts" case serviceAccountRoleArn = "serviceAccountRoleArn" case tags = "tags" @@ -1214,6 +1272,8 @@ extension EKS { public struct CreateClusterRequest: AWSEncodableShape { /// The access configuration for the cluster. public let accessConfig: CreateAccessConfigRequest? + /// If you set this value to False when creating a cluster, the default networking add-ons will not be installed. The default networking addons include vpc-cni, coredns, and kube-proxy. Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons. + public let bootstrapSelfManagedAddons: Bool? /// A unique, case-sensitive identifier that you provide to ensure /// the idempotency of the request. public let clientRequestToken: String? @@ -1236,8 +1296,9 @@ extension EKS { /// The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used. The default version might not be the latest version available. public let version: String? - public init(accessConfig: CreateAccessConfigRequest? = nil, clientRequestToken: String? = CreateClusterRequest.idempotencyToken(), encryptionConfig: [EncryptionConfig]? = nil, kubernetesNetworkConfig: KubernetesNetworkConfigRequest? = nil, logging: Logging? = nil, name: String, outpostConfig: OutpostConfigRequest? = nil, resourcesVpcConfig: VpcConfigRequest, roleArn: String, tags: [String: String]? = nil, version: String? = nil) { + public init(accessConfig: CreateAccessConfigRequest? = nil, bootstrapSelfManagedAddons: Bool? = nil, clientRequestToken: String? = CreateClusterRequest.idempotencyToken(), encryptionConfig: [EncryptionConfig]? = nil, kubernetesNetworkConfig: KubernetesNetworkConfigRequest? = nil, logging: Logging? = nil, name: String, outpostConfig: OutpostConfigRequest? = nil, resourcesVpcConfig: VpcConfigRequest, roleArn: String, tags: [String: String]? = nil, version: String? = nil) { self.accessConfig = accessConfig + self.bootstrapSelfManagedAddons = bootstrapSelfManagedAddons self.clientRequestToken = clientRequestToken self.encryptionConfig = encryptionConfig self.kubernetesNetworkConfig = kubernetesNetworkConfig @@ -1266,6 +1327,7 @@ extension EKS { private enum CodingKeys: String, CodingKey { case accessConfig = "accessConfig" + case bootstrapSelfManagedAddons = "bootstrapSelfManagedAddons" case clientRequestToken = "clientRequestToken" case encryptionConfig = "encryptionConfig" case kubernetesNetworkConfig = "kubernetesNetworkConfig" @@ -1429,7 +1491,7 @@ extension EKS { } public struct CreateNodegroupRequest: AWSEncodableShape { - /// The AMI type for your node group. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify amiType, or the node group deployment will fail. If your launch template uses a Windows custom AMI, then add eks:kube-proxy-windows to your Windows nodes rolearn in the aws-auth ConfigMap. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + /// The AMI type for your node group. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify amiType, or the node group deployment will fail. If your launch template uses a Windows custom AMI, then add eks:kube-proxy-windows to your Windows nodes rolearn in the aws-auth ConfigMap. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let amiType: AMITypes? /// The capacity type for your node group. public let capacityType: CapacityTypes? @@ -1438,25 +1500,25 @@ extension EKS { public let clientRequestToken: String? /// The name of your cluster. public let clusterName: String - /// The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows. If you specify launchTemplate, then don't specify diskSize, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + /// The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows. If you specify launchTemplate, then don't specify diskSize, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let diskSize: Int? - /// Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the amiType parameter. If you specify launchTemplate, then you can specify zero or one instance type in your launch template or you can specify 0-20 instance types for instanceTypes. If however, you specify an instance type in your launch template and specify any instanceTypes, the node group deployment will fail. If you don't specify an instance type in a launch template or for instanceTypes, then t3.medium is used, by default. If you specify Spot for capacityType, then we recommend specifying multiple values for instanceTypes. For more information, see Managed node group capacity types and Launch template support in the Amazon EKS User Guide. + /// Specify the instance types for a node group. If you specify a GPU instance type, make sure to also specify an applicable GPU AMI type with the amiType parameter. If you specify launchTemplate, then you can specify zero or one instance type in your launch template or you can specify 0-20 instance types for instanceTypes. If however, you specify an instance type in your launch template and specify any instanceTypes, the node group deployment will fail. If you don't specify an instance type in a launch template or for instanceTypes, then t3.medium is used, by default. If you specify Spot for capacityType, then we recommend specifying multiple values for instanceTypes. For more information, see Managed node group capacity types and Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let instanceTypes: [String]? /// The Kubernetes labels to apply to the nodes in the node group when they are created. public let labels: [String: String]? - /// An object representing a node group's launch template specification. If specified, then do not specify instanceTypes, diskSize, or remoteAccess and make sure that the launch template meets the requirements in launchTemplateSpecification. + /// An object representing a node group's launch template specification. When using this object, don't directly specify instanceTypes, diskSize, or remoteAccess. Make sure that the launch template meets the requirements in launchTemplateSpecification. Also refer to Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let launchTemplate: LaunchTemplateSpecification? /// The unique name to give your node group. public let nodegroupName: String - /// The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide . If you specify launchTemplate, then don't specify IamInstanceProfile in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + /// The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch nodes and register them into a cluster, you must create an IAM role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide . If you specify launchTemplate, then don't specify IamInstanceProfile in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let nodeRole: String - /// The AMI version of the Amazon EKS optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + /// The AMI version of the Amazon EKS optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let releaseVersion: String? - /// The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify launchTemplate, then don't specify remoteAccess, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + /// The remote access configuration to use with your node group. For Linux, the protocol is SSH. For Windows, the protocol is RDP. If you specify launchTemplate, then don't specify remoteAccess, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let remoteAccess: RemoteAccessConfig? /// The scaling configuration details for the Auto Scaling group that is created for your node group. public let scalingConfig: NodegroupScalingConfig? - /// The subnets to use for the Auto Scaling group that is created for your node group. If you specify launchTemplate, then don't specify SubnetId in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + /// The subnets to use for the Auto Scaling group that is created for your node group. If you specify launchTemplate, then don't specify SubnetId in your launch template, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let subnets: [String] /// Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. public let tags: [String: String]? @@ -1464,7 +1526,7 @@ extension EKS { public let taints: [Taint]? /// The node group update configuration. public let updateConfig: NodegroupUpdateConfig? - /// The Kubernetes version to use for your managed nodes. By default, the Kubernetes version of the cluster is used, and this is the only accepted specified value. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + /// The Kubernetes version to use for your managed nodes. By default, the Kubernetes version of the cluster is used, and this is the only accepted specified value. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version, or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let version: String? public init(amiType: AMITypes? = nil, capacityType: CapacityTypes? = nil, clientRequestToken: String? = CreateNodegroupRequest.idempotencyToken(), clusterName: String, diskSize: Int? = nil, instanceTypes: [String]? = nil, labels: [String: String]? = nil, launchTemplate: LaunchTemplateSpecification? = nil, nodegroupName: String, nodeRole: String, releaseVersion: String? = nil, remoteAccess: RemoteAccessConfig? = nil, scalingConfig: NodegroupScalingConfig? = nil, subnets: [String], tags: [String: String]? = nil, taints: [Taint]? = nil, updateConfig: NodegroupUpdateConfig? = nil, version: String? = nil) { @@ -1983,17 +2045,21 @@ extension EKS { public let addonVersion: String? /// A JSON schema that's used to validate the configuration values you provide when an add-on is created or updated. public let configurationSchema: String? + /// The Kubernetes service account name used by the addon, and any suggested IAM policies. Use this information to create an IAM Role for the Addon. + public let podIdentityConfiguration: [AddonPodIdentityConfiguration]? - public init(addonName: String? = nil, addonVersion: String? = nil, configurationSchema: String? = nil) { + public init(addonName: String? = nil, addonVersion: String? = nil, configurationSchema: String? = nil, podIdentityConfiguration: [AddonPodIdentityConfiguration]? = nil) { self.addonName = addonName self.addonVersion = addonVersion self.configurationSchema = configurationSchema + self.podIdentityConfiguration = podIdentityConfiguration } private enum CodingKeys: String, CodingKey { case addonName = "addonName" case addonVersion = "addonVersion" case configurationSchema = "configurationSchema" + case podIdentityConfiguration = "podIdentityConfiguration" } } @@ -2564,6 +2630,8 @@ extension EKS { public let fargateProfileArn: String? /// The name of the Fargate profile. public let fargateProfileName: String? + /// The health status of the Fargate profile. If there are issues with your Fargate profile's health, they are listed here. + public let health: FargateProfileHealth? /// The Amazon Resource Name (ARN) of the Pod execution role to use for any Pod that matches the selectors in the Fargate profile. For more information, see Pod execution role in the Amazon EKS User Guide. public let podExecutionRoleArn: String? /// The selectors to match for a Pod to use this Fargate profile. @@ -2575,11 +2643,12 @@ extension EKS { /// Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. public let tags: [String: String]? - public init(clusterName: String? = nil, createdAt: Date? = nil, fargateProfileArn: String? = nil, fargateProfileName: String? = nil, podExecutionRoleArn: String? = nil, selectors: [FargateProfileSelector]? = nil, status: FargateProfileStatus? = nil, subnets: [String]? = nil, tags: [String: String]? = nil) { + public init(clusterName: String? = nil, createdAt: Date? = nil, fargateProfileArn: String? = nil, fargateProfileName: String? = nil, health: FargateProfileHealth? = nil, podExecutionRoleArn: String? = nil, selectors: [FargateProfileSelector]? = nil, status: FargateProfileStatus? = nil, subnets: [String]? = nil, tags: [String: String]? = nil) { self.clusterName = clusterName self.createdAt = createdAt self.fargateProfileArn = fargateProfileArn self.fargateProfileName = fargateProfileName + self.health = health self.podExecutionRoleArn = podExecutionRoleArn self.selectors = selectors self.status = status @@ -2592,6 +2661,7 @@ extension EKS { case createdAt = "createdAt" case fargateProfileArn = "fargateProfileArn" case fargateProfileName = "fargateProfileName" + case health = "health" case podExecutionRoleArn = "podExecutionRoleArn" case selectors = "selectors" case status = "status" @@ -2600,6 +2670,40 @@ extension EKS { } } + public struct FargateProfileHealth: AWSDecodableShape { + /// Any issues that are associated with the Fargate profile. + public let issues: [FargateProfileIssue]? + + public init(issues: [FargateProfileIssue]? = nil) { + self.issues = issues + } + + private enum CodingKeys: String, CodingKey { + case issues = "issues" + } + } + + public struct FargateProfileIssue: AWSDecodableShape { + /// A brief description of the error. + public let code: FargateProfileIssueCode? + /// The error message associated with the issue. + public let message: String? + /// The Amazon Web Services resources that are affected by this issue. + public let resourceIds: [String]? + + public init(code: FargateProfileIssueCode? = nil, message: String? = nil, resourceIds: [String]? = nil) { + self.code = code + self.message = message + self.resourceIds = resourceIds + } + + private enum CodingKeys: String, CodingKey { + case code = "code" + case message = "message" + case resourceIds = "resourceIds" + } + } + public struct FargateProfileSelector: AWSEncodableShape & AWSDecodableShape { /// The Kubernetes labels that the selector should match. A pod must contain all of the labels that are specified in the selector for it to be considered a match. public let labels: [String: String]? @@ -3946,6 +4050,8 @@ extension EKS { public let modifiedAt: Date? /// The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the pods that use the service account must be in this namespace. public let namespace: String? + /// If defined, the Pod Identity Association is owned by an Amazon EKS Addon. + public let ownerArn: String? /// The Amazon Resource Name (ARN) of the IAM role to associate with the service account. The EKS Pod Identity agent manages credentials to assume this role for applications in the containers in the pods that use this service account. public let roleArn: String? /// The name of the Kubernetes service account inside the cluster to associate the IAM credentials with. @@ -3953,13 +4059,14 @@ extension EKS { /// Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. The following basic restrictions apply to tags: Maximum number of tags per resource – 50 For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length – 128 Unicode characters in UTF-8 Maximum value length – 256 Unicode characters in UTF-8 If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit. public let tags: [String: String]? - public init(associationArn: String? = nil, associationId: String? = nil, clusterName: String? = nil, createdAt: Date? = nil, modifiedAt: Date? = nil, namespace: String? = nil, roleArn: String? = nil, serviceAccount: String? = nil, tags: [String: String]? = nil) { + public init(associationArn: String? = nil, associationId: String? = nil, clusterName: String? = nil, createdAt: Date? = nil, modifiedAt: Date? = nil, namespace: String? = nil, ownerArn: String? = nil, roleArn: String? = nil, serviceAccount: String? = nil, tags: [String: String]? = nil) { self.associationArn = associationArn self.associationId = associationId self.clusterName = clusterName self.createdAt = createdAt self.modifiedAt = modifiedAt self.namespace = namespace + self.ownerArn = ownerArn self.roleArn = roleArn self.serviceAccount = serviceAccount self.tags = tags @@ -3972,6 +4079,7 @@ extension EKS { case createdAt = "createdAt" case modifiedAt = "modifiedAt" case namespace = "namespace" + case ownerArn = "ownerArn" case roleArn = "roleArn" case serviceAccount = "serviceAccount" case tags = "tags" @@ -3987,14 +4095,17 @@ extension EKS { public let clusterName: String? /// The name of the Kubernetes namespace inside the cluster to create the association in. The service account and the pods that use the service account must be in this namespace. public let namespace: String? + /// If defined, the Pod Identity Association is owned by an Amazon EKS Addon. + public let ownerArn: String? /// The name of the Kubernetes service account inside the cluster to associate the IAM credentials with. public let serviceAccount: String? - public init(associationArn: String? = nil, associationId: String? = nil, clusterName: String? = nil, namespace: String? = nil, serviceAccount: String? = nil) { + public init(associationArn: String? = nil, associationId: String? = nil, clusterName: String? = nil, namespace: String? = nil, ownerArn: String? = nil, serviceAccount: String? = nil) { self.associationArn = associationArn self.associationId = associationId self.clusterName = clusterName self.namespace = namespace + self.ownerArn = ownerArn self.serviceAccount = serviceAccount } @@ -4003,6 +4114,7 @@ extension EKS { case associationId = "associationId" case clusterName = "clusterName" case namespace = "namespace" + case ownerArn = "ownerArn" case serviceAccount = "serviceAccount" } } @@ -4295,17 +4407,20 @@ extension EKS { public let clusterName: String /// The set of configuration values for the add-on that's created. The values that you provide are validated against the schema returned by DescribeAddonConfiguration. public let configurationValues: String? + /// An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted. For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide. + public let podIdentityAssociations: [AddonPodIdentityAssociations]? /// How to resolve field value conflicts for an Amazon EKS add-on if you've changed a value from the Amazon EKS default value. Conflicts are handled based on the option you choose: None – Amazon EKS doesn't change the value. The update might fail. Overwrite – Amazon EKS overwrites the changed value back to the Amazon EKS default value. Preserve – Amazon EKS preserves the value. If you choose this option, we recommend that you test any field and value changes on a non-production cluster before updating the add-on on your production cluster. public let resolveConflicts: ResolveConflicts? /// The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the permissions assigned to the node IAM role. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide. To specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for your cluster. For more information, see Enabling IAM roles for service accounts on your cluster in the Amazon EKS User Guide. public let serviceAccountRoleArn: String? - public init(addonName: String, addonVersion: String? = nil, clientRequestToken: String? = UpdateAddonRequest.idempotencyToken(), clusterName: String, configurationValues: String? = nil, resolveConflicts: ResolveConflicts? = nil, serviceAccountRoleArn: String? = nil) { + public init(addonName: String, addonVersion: String? = nil, clientRequestToken: String? = UpdateAddonRequest.idempotencyToken(), clusterName: String, configurationValues: String? = nil, podIdentityAssociations: [AddonPodIdentityAssociations]? = nil, resolveConflicts: ResolveConflicts? = nil, serviceAccountRoleArn: String? = nil) { self.addonName = addonName self.addonVersion = addonVersion self.clientRequestToken = clientRequestToken self.clusterName = clusterName self.configurationValues = configurationValues + self.podIdentityAssociations = podIdentityAssociations self.resolveConflicts = resolveConflicts self.serviceAccountRoleArn = serviceAccountRoleArn } @@ -4318,6 +4433,7 @@ extension EKS { try container.encodeIfPresent(self.clientRequestToken, forKey: .clientRequestToken) request.encodePath(self.clusterName, key: "clusterName") try container.encodeIfPresent(self.configurationValues, forKey: .configurationValues) + try container.encodeIfPresent(self.podIdentityAssociations, forKey: .podIdentityAssociations) try container.encodeIfPresent(self.resolveConflicts, forKey: .resolveConflicts) try container.encodeIfPresent(self.serviceAccountRoleArn, forKey: .serviceAccountRoleArn) } @@ -4334,6 +4450,7 @@ extension EKS { case addonVersion = "addonVersion" case clientRequestToken = "clientRequestToken" case configurationValues = "configurationValues" + case podIdentityAssociations = "podIdentityAssociations" case resolveConflicts = "resolveConflicts" case serviceAccountRoleArn = "serviceAccountRoleArn" } @@ -4589,9 +4706,9 @@ extension EKS { public let launchTemplate: LaunchTemplateSpecification? /// The name of the managed node group to update. public let nodegroupName: String - /// The AMI version of the Amazon EKS optimized AMI to use for the update. By default, the latest available AMI version for the node group's Kubernetes version is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group update will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + /// The AMI version of the Amazon EKS optimized AMI to use for the update. By default, the latest available AMI version for the node group's Kubernetes version is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group update will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let releaseVersion: String? - /// The Kubernetes version to update to. If no version is specified, then the Kubernetes version of the node group does not change. You can specify the Kubernetes version of the cluster to update the node group to the latest AMI version of the cluster's Kubernetes version. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version, or the node group update will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide. + /// The Kubernetes version to update to. If no version is specified, then the Kubernetes version of the node group does not change. You can specify the Kubernetes version of the cluster to update the node group to the latest AMI version of the cluster's Kubernetes version. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version, or the node group update will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide. public let version: String? public init(clientRequestToken: String? = UpdateNodegroupVersionRequest.idempotencyToken(), clusterName: String, force: Bool? = nil, launchTemplate: LaunchTemplateSpecification? = nil, nodegroupName: String, releaseVersion: String? = nil, version: String? = nil) { diff --git a/Sources/Soto/Services/EMRContainers/EMRContainers_api.swift b/Sources/Soto/Services/EMRContainers/EMRContainers_api.swift index dff5d7798a..bf68f0cb87 100644 --- a/Sources/Soto/Services/EMRContainers/EMRContainers_api.swift +++ b/Sources/Soto/Services/EMRContainers/EMRContainers_api.swift @@ -77,6 +77,8 @@ public struct EMRContainers: AWSService { "ca-central-1": "emr-containers-fips.ca-central-1.amazonaws.com", "us-east-1": "emr-containers-fips.us-east-1.amazonaws.com", "us-east-2": "emr-containers-fips.us-east-2.amazonaws.com", + "us-gov-east-1": "emr-containers.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "emr-containers.us-gov-west-1.amazonaws.com", "us-west-1": "emr-containers-fips.us-west-1.amazonaws.com", "us-west-2": "emr-containers-fips.us-west-2.amazonaws.com" ]) diff --git a/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift b/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift index 8d92988e3d..620824bfab 100644 --- a/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift +++ b/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift @@ -77,6 +77,8 @@ public struct EMRServerless: AWSService { "ca-central-1": "emr-serverless-fips.ca-central-1.amazonaws.com", "us-east-1": "emr-serverless-fips.us-east-1.amazonaws.com", "us-east-2": "emr-serverless-fips.us-east-2.amazonaws.com", + "us-gov-east-1": "emr-serverless.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "emr-serverless.us-gov-west-1.amazonaws.com", "us-west-1": "emr-serverless-fips.us-west-1.amazonaws.com", "us-west-2": "emr-serverless-fips.us-west-2.amazonaws.com" ]) @@ -175,6 +177,19 @@ public struct EMRServerless: AWSService { ) } + /// Lists all attempt of a job run. + @Sendable + public func listJobRunAttempts(_ input: ListJobRunAttemptsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListJobRunAttemptsResponse { + return try await self.client.execute( + operation: "ListJobRunAttempts", + path: "/applications/{applicationId}/jobruns/{jobRunId}/attempts", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists job runs based on a set of parameters. @Sendable public func listJobRuns(_ input: ListJobRunsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListJobRunsResponse { @@ -312,6 +327,25 @@ extension EMRServerless { ) } + /// Lists all attempt of a job run. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listJobRunAttemptsPaginator( + _ input: ListJobRunAttemptsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listJobRunAttempts, + inputKey: \ListJobRunAttemptsRequest.nextToken, + outputKey: \ListJobRunAttemptsResponse.nextToken, + logger: logger + ) + } + /// Lists job runs based on a set of parameters. /// Return PaginatorSequence for operation. /// @@ -342,6 +376,17 @@ extension EMRServerless.ListApplicationsRequest: AWSPaginateToken { } } +extension EMRServerless.ListJobRunAttemptsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> EMRServerless.ListJobRunAttemptsRequest { + return .init( + applicationId: self.applicationId, + jobRunId: self.jobRunId, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension EMRServerless.ListJobRunsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> EMRServerless.ListJobRunsRequest { return .init( @@ -349,6 +394,7 @@ extension EMRServerless.ListJobRunsRequest: AWSPaginateToken { createdAtAfter: self.createdAtAfter, createdAtBefore: self.createdAtBefore, maxResults: self.maxResults, + mode: self.mode, nextToken: token, states: self.states ) diff --git a/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift b/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift index 36742e9f7d..9e04510ec1 100644 --- a/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift +++ b/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift @@ -43,6 +43,12 @@ extension EMRServerless { public var description: String { return self.rawValue } } + public enum JobRunMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case batch = "BATCH" + case streaming = "STREAMING" + public var description: String { return self.rawValue } + } + public enum JobRunState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case cancelled = "CANCELLED" case cancelling = "CANCELLING" @@ -124,6 +130,8 @@ extension EMRServerless { public let imageConfiguration: ImageConfiguration? /// The initial capacity of the application. public let initialCapacity: [String: InitialCapacityConfig]? + /// The interactive configuration object that enables the interactive use cases for an application. + public let interactiveConfiguration: InteractiveConfiguration? /// The maximum capacity of the application. This is cumulative across all workers at any given point in time during the lifespan of the application is created. No new resources will be created once any one of the defined limits is hit. public let maximumCapacity: MaximumAllowedResources? public let monitoringConfiguration: MonitoringConfiguration? @@ -148,7 +156,7 @@ extension EMRServerless { /// The specification applied to each worker type. public let workerTypeSpecifications: [String: WorkerTypeSpecification]? - public init(applicationId: String, architecture: Architecture? = nil, arn: String, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, createdAt: Date, imageConfiguration: ImageConfiguration? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, runtimeConfiguration: [Configuration]? = nil, state: ApplicationState, stateDetails: String? = nil, tags: [String: String]? = nil, type: String, updatedAt: Date, workerTypeSpecifications: [String: WorkerTypeSpecification]? = nil) { + public init(applicationId: String, architecture: Architecture? = nil, arn: String, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, createdAt: Date, imageConfiguration: ImageConfiguration? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, interactiveConfiguration: InteractiveConfiguration? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, runtimeConfiguration: [Configuration]? = nil, state: ApplicationState, stateDetails: String? = nil, tags: [String: String]? = nil, type: String, updatedAt: Date, workerTypeSpecifications: [String: WorkerTypeSpecification]? = nil) { self.applicationId = applicationId self.architecture = architecture self.arn = arn @@ -157,6 +165,7 @@ extension EMRServerless { self.createdAt = createdAt self.imageConfiguration = imageConfiguration self.initialCapacity = initialCapacity + self.interactiveConfiguration = interactiveConfiguration self.maximumCapacity = maximumCapacity self.monitoringConfiguration = monitoringConfiguration self.name = name @@ -180,6 +189,7 @@ extension EMRServerless { case createdAt = "createdAt" case imageConfiguration = "imageConfiguration" case initialCapacity = "initialCapacity" + case interactiveConfiguration = "interactiveConfiguration" case maximumCapacity = "maximumCapacity" case monitoringConfiguration = "monitoringConfiguration" case name = "name" @@ -448,6 +458,8 @@ extension EMRServerless { public let imageConfiguration: ImageConfigurationInput? /// The capacity to initialize when the application is created. public let initialCapacity: [String: InitialCapacityConfig]? + /// The interactive configuration object that enables the interactive use cases to use when running an application. + public let interactiveConfiguration: InteractiveConfiguration? /// The maximum capacity to allocate when the application is created. This is cumulative across all workers at any given point in time, not just when an application is created. No new resources will be created once any one of the defined limits is hit. public let maximumCapacity: MaximumAllowedResources? /// The configuration setting for monitoring. @@ -467,13 +479,14 @@ extension EMRServerless { /// The key-value pairs that specify worker type to WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a Spark or Hive application. Valid worker types include Driver and Executor for Spark applications and HiveDriver and TezTask for Hive applications. You can either set image details in this parameter for each worker type, or in imageConfiguration for all worker types. public let workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? - public init(architecture: Architecture? = nil, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, clientToken: String = CreateApplicationRequest.idempotencyToken(), imageConfiguration: ImageConfigurationInput? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, runtimeConfiguration: [Configuration]? = nil, tags: [String: String]? = nil, type: String, workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? = nil) { + public init(architecture: Architecture? = nil, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, clientToken: String = CreateApplicationRequest.idempotencyToken(), imageConfiguration: ImageConfigurationInput? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, interactiveConfiguration: InteractiveConfiguration? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, runtimeConfiguration: [Configuration]? = nil, tags: [String: String]? = nil, type: String, workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? = nil) { self.architecture = architecture self.autoStartConfiguration = autoStartConfiguration self.autoStopConfiguration = autoStopConfiguration self.clientToken = clientToken self.imageConfiguration = imageConfiguration self.initialCapacity = initialCapacity + self.interactiveConfiguration = interactiveConfiguration self.maximumCapacity = maximumCapacity self.monitoringConfiguration = monitoringConfiguration self.name = name @@ -535,6 +548,7 @@ extension EMRServerless { case clientToken = "clientToken" case imageConfiguration = "imageConfiguration" case initialCapacity = "initialCapacity" + case interactiveConfiguration = "interactiveConfiguration" case maximumCapacity = "maximumCapacity" case monitoringConfiguration = "monitoringConfiguration" case name = "name" @@ -634,11 +648,14 @@ extension EMRServerless { public struct GetDashboardForJobRunRequest: AWSEncodableShape { /// The ID of the application. public let applicationId: String + /// An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job. + public let attempt: Int? /// The ID of the job run. public let jobRunId: String - public init(applicationId: String, jobRunId: String) { + public init(applicationId: String, attempt: Int? = nil, jobRunId: String) { self.applicationId = applicationId + self.attempt = attempt self.jobRunId = jobRunId } @@ -646,6 +663,7 @@ extension EMRServerless { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.applicationId, key: "applicationId") + request.encodeQuery(self.attempt, key: "attempt") request.encodePath(self.jobRunId, key: "jobRunId") } @@ -653,6 +671,7 @@ extension EMRServerless { try self.validate(self.applicationId, name: "applicationId", parent: name, max: 64) try self.validate(self.applicationId, name: "applicationId", parent: name, min: 1) try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^[0-9a-z]+$") + try self.validate(self.attempt, name: "attempt", parent: name, min: 1) try self.validate(self.jobRunId, name: "jobRunId", parent: name, max: 64) try self.validate(self.jobRunId, name: "jobRunId", parent: name, min: 1) try self.validate(self.jobRunId, name: "jobRunId", parent: name, pattern: "^[0-9a-z]+$") @@ -677,11 +696,14 @@ extension EMRServerless { public struct GetJobRunRequest: AWSEncodableShape { /// The ID of the application on which the job run is submitted. public let applicationId: String + /// An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job. + public let attempt: Int? /// The ID of the job run. public let jobRunId: String - public init(applicationId: String, jobRunId: String) { + public init(applicationId: String, attempt: Int? = nil, jobRunId: String) { self.applicationId = applicationId + self.attempt = attempt self.jobRunId = jobRunId } @@ -689,6 +711,7 @@ extension EMRServerless { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.applicationId, key: "applicationId") + request.encodeQuery(self.attempt, key: "attempt") request.encodePath(self.jobRunId, key: "jobRunId") } @@ -696,6 +719,7 @@ extension EMRServerless { try self.validate(self.applicationId, name: "applicationId", parent: name, max: 64) try self.validate(self.applicationId, name: "applicationId", parent: name, min: 1) try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^[0-9a-z]+$") + try self.validate(self.attempt, name: "attempt", parent: name, min: 1) try self.validate(self.jobRunId, name: "jobRunId", parent: name, max: 64) try self.validate(self.jobRunId, name: "jobRunId", parent: name, min: 1) try self.validate(self.jobRunId, name: "jobRunId", parent: name, pattern: "^[0-9a-z]+$") @@ -809,11 +833,34 @@ extension EMRServerless { } } + public struct InteractiveConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Enables an Apache Livy endpoint that you can connect to and run interactive jobs. + public let livyEndpointEnabled: Bool? + /// Enables you to connect an application to Amazon EMR Studio to run interactive workloads in a notebook. + public let studioEnabled: Bool? + + public init(livyEndpointEnabled: Bool? = nil, studioEnabled: Bool? = nil) { + self.livyEndpointEnabled = livyEndpointEnabled + self.studioEnabled = studioEnabled + } + + private enum CodingKeys: String, CodingKey { + case livyEndpointEnabled = "livyEndpointEnabled" + case studioEnabled = "studioEnabled" + } + } + public struct JobRun: AWSDecodableShape { /// The ID of the application the job is running on. public let applicationId: String /// The execution role ARN of the job run. public let arn: String + /// The attempt of the job run. + public let attempt: Int? + /// The date and time of when the job run attempt was created. + public let attemptCreatedAt: Date? + /// The date and time of when the job run attempt was last updated. + public let attemptUpdatedAt: Date? /// The aggregate vCPU, memory, and storage that Amazon Web Services has billed for the job run. The billed resources include a 1-minute minimum usage for workers, plus additional storage over 20 GB per worker. Note that billed resources do not include usage for idle pre-initialized workers. public let billedResourceUtilization: ResourceUtilization? /// The configuration settings that are used to override default configuration. @@ -830,11 +877,15 @@ extension EMRServerless { public let jobDriver: JobDriver /// The ID of the job run. public let jobRunId: String + /// The mode of the job run. + public let mode: JobRunMode? /// The optional job run name. This doesn't have to be unique. public let name: String? public let networkConfiguration: NetworkConfiguration? /// The Amazon EMR release associated with the application your job is running on. public let releaseLabel: String + /// The retry policy of the job run. + public let retryPolicy: RetryPolicy? /// The state of the job run. public let state: JobRunState /// The state details of the job run. @@ -848,9 +899,12 @@ extension EMRServerless { /// The date and time when the job run was updated. public let updatedAt: Date - public init(applicationId: String, arn: String, billedResourceUtilization: ResourceUtilization? = nil, configurationOverrides: ConfigurationOverrides? = nil, createdAt: Date, createdBy: String, executionRole: String, executionTimeoutMinutes: Int64? = nil, jobDriver: JobDriver, jobRunId: String, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, state: JobRunState, stateDetails: String, tags: [String: String]? = nil, totalExecutionDurationSeconds: Int? = nil, totalResourceUtilization: TotalResourceUtilization? = nil, updatedAt: Date) { + public init(applicationId: String, arn: String, attempt: Int? = nil, attemptCreatedAt: Date? = nil, attemptUpdatedAt: Date? = nil, billedResourceUtilization: ResourceUtilization? = nil, configurationOverrides: ConfigurationOverrides? = nil, createdAt: Date, createdBy: String, executionRole: String, executionTimeoutMinutes: Int64? = nil, jobDriver: JobDriver, jobRunId: String, mode: JobRunMode? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, retryPolicy: RetryPolicy? = nil, state: JobRunState, stateDetails: String, tags: [String: String]? = nil, totalExecutionDurationSeconds: Int? = nil, totalResourceUtilization: TotalResourceUtilization? = nil, updatedAt: Date) { self.applicationId = applicationId self.arn = arn + self.attempt = attempt + self.attemptCreatedAt = attemptCreatedAt + self.attemptUpdatedAt = attemptUpdatedAt self.billedResourceUtilization = billedResourceUtilization self.configurationOverrides = configurationOverrides self.createdAt = createdAt @@ -859,9 +913,11 @@ extension EMRServerless { self.executionTimeoutMinutes = executionTimeoutMinutes self.jobDriver = jobDriver self.jobRunId = jobRunId + self.mode = mode self.name = name self.networkConfiguration = networkConfiguration self.releaseLabel = releaseLabel + self.retryPolicy = retryPolicy self.state = state self.stateDetails = stateDetails self.tags = tags @@ -873,6 +929,9 @@ extension EMRServerless { private enum CodingKeys: String, CodingKey { case applicationId = "applicationId" case arn = "arn" + case attempt = "attempt" + case attemptCreatedAt = "attemptCreatedAt" + case attemptUpdatedAt = "attemptUpdatedAt" case billedResourceUtilization = "billedResourceUtilization" case configurationOverrides = "configurationOverrides" case createdAt = "createdAt" @@ -881,9 +940,11 @@ extension EMRServerless { case executionTimeoutMinutes = "executionTimeoutMinutes" case jobDriver = "jobDriver" case jobRunId = "jobRunId" + case mode = "mode" case name = "name" case networkConfiguration = "networkConfiguration" case releaseLabel = "releaseLabel" + case retryPolicy = "retryPolicy" case state = "state" case stateDetails = "stateDetails" case tags = "tags" @@ -893,11 +954,86 @@ extension EMRServerless { } } + public struct JobRunAttemptSummary: AWSDecodableShape { + /// The ID of the application the job is running on. + public let applicationId: String + /// The Amazon Resource Name (ARN) of the job run. + public let arn: String + /// The attempt number of the job run execution. + public let attempt: Int? + /// The date and time when the job run attempt was created. + public let createdAt: Date + /// The user who created the job run. + public let createdBy: String + /// The Amazon Resource Name (ARN) of the execution role of the job run.. + public let executionRole: String + /// The ID of the job run attempt. + public let id: String + /// The date and time of when the job run was created. + public let jobCreatedAt: Date + /// The mode of the job run attempt. + public let mode: JobRunMode? + /// The name of the job run attempt. + public let name: String? + /// The Amazon EMR release label of the job run attempt. + public let releaseLabel: String + /// The state of the job run attempt. + public let state: JobRunState + /// The state details of the job run attempt. + public let stateDetails: String + /// The type of the job run, such as Spark or Hive. + public let type: String? + /// The date and time of when the job run attempt was last updated. + public let updatedAt: Date + + public init(applicationId: String, arn: String, attempt: Int? = nil, createdAt: Date, createdBy: String, executionRole: String, id: String, jobCreatedAt: Date, mode: JobRunMode? = nil, name: String? = nil, releaseLabel: String, state: JobRunState, stateDetails: String, type: String? = nil, updatedAt: Date) { + self.applicationId = applicationId + self.arn = arn + self.attempt = attempt + self.createdAt = createdAt + self.createdBy = createdBy + self.executionRole = executionRole + self.id = id + self.jobCreatedAt = jobCreatedAt + self.mode = mode + self.name = name + self.releaseLabel = releaseLabel + self.state = state + self.stateDetails = stateDetails + self.type = type + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case applicationId = "applicationId" + case arn = "arn" + case attempt = "attempt" + case createdAt = "createdAt" + case createdBy = "createdBy" + case executionRole = "executionRole" + case id = "id" + case jobCreatedAt = "jobCreatedAt" + case mode = "mode" + case name = "name" + case releaseLabel = "releaseLabel" + case state = "state" + case stateDetails = "stateDetails" + case type = "type" + case updatedAt = "updatedAt" + } + } + public struct JobRunSummary: AWSDecodableShape { /// The ID of the application the job is running on. public let applicationId: String /// The ARN of the job run. public let arn: String + /// The attempt number of the job run execution. + public let attempt: Int? + /// The date and time of when the job run attempt was created. + public let attemptCreatedAt: Date? + /// The date and time of when the job run attempt was last updated. + public let attemptUpdatedAt: Date? /// The date and time when the job run was created. public let createdAt: Date /// The user who created the job run. @@ -906,6 +1042,8 @@ extension EMRServerless { public let executionRole: String /// The ID of the job run. public let id: String + /// The mode of the job run. + public let mode: JobRunMode? /// The optional job run name. This doesn't have to be unique. public let name: String? /// The Amazon EMR release associated with the application your job is running on. @@ -919,13 +1057,17 @@ extension EMRServerless { /// The date and time when the job run was last updated. public let updatedAt: Date - public init(applicationId: String, arn: String, createdAt: Date, createdBy: String, executionRole: String, id: String, name: String? = nil, releaseLabel: String, state: JobRunState, stateDetails: String, type: String? = nil, updatedAt: Date) { + public init(applicationId: String, arn: String, attempt: Int? = nil, attemptCreatedAt: Date? = nil, attemptUpdatedAt: Date? = nil, createdAt: Date, createdBy: String, executionRole: String, id: String, mode: JobRunMode? = nil, name: String? = nil, releaseLabel: String, state: JobRunState, stateDetails: String, type: String? = nil, updatedAt: Date) { self.applicationId = applicationId self.arn = arn + self.attempt = attempt + self.attemptCreatedAt = attemptCreatedAt + self.attemptUpdatedAt = attemptUpdatedAt self.createdAt = createdAt self.createdBy = createdBy self.executionRole = executionRole self.id = id + self.mode = mode self.name = name self.releaseLabel = releaseLabel self.state = state @@ -937,10 +1079,14 @@ extension EMRServerless { private enum CodingKeys: String, CodingKey { case applicationId = "applicationId" case arn = "arn" + case attempt = "attempt" + case attemptCreatedAt = "attemptCreatedAt" + case attemptUpdatedAt = "attemptUpdatedAt" case createdAt = "createdAt" case createdBy = "createdBy" case executionRole = "executionRole" case id = "id" + case mode = "mode" case name = "name" case releaseLabel = "releaseLabel" case state = "state" @@ -1000,6 +1146,64 @@ extension EMRServerless { } } + public struct ListJobRunAttemptsRequest: AWSEncodableShape { + /// The ID of the application for which to list job runs. + public let applicationId: String + /// The ID of the job run to list. + public let jobRunId: String + /// The maximum number of job run attempts to list. + public let maxResults: Int? + /// The token for the next set of job run attempt results. + public let nextToken: String? + + public init(applicationId: String, jobRunId: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.applicationId = applicationId + self.jobRunId = jobRunId + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.applicationId, key: "applicationId") + request.encodePath(self.jobRunId, key: "jobRunId") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.applicationId, name: "applicationId", parent: name, max: 64) + try self.validate(self.applicationId, name: "applicationId", parent: name, min: 1) + try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^[0-9a-z]+$") + try self.validate(self.jobRunId, name: "jobRunId", parent: name, max: 64) + try self.validate(self.jobRunId, name: "jobRunId", parent: name, min: 1) + try self.validate(self.jobRunId, name: "jobRunId", parent: name, pattern: "^[0-9a-z]+$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[A-Za-z0-9_=-]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListJobRunAttemptsResponse: AWSDecodableShape { + /// The array of the listed job run attempt objects. + public let jobRunAttempts: [JobRunAttemptSummary] + /// The output displays the token for the next set of application results. This is required for pagination and is available as a response of the previous request. + public let nextToken: String? + + public init(jobRunAttempts: [JobRunAttemptSummary], nextToken: String? = nil) { + self.jobRunAttempts = jobRunAttempts + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case jobRunAttempts = "jobRunAttempts" + case nextToken = "nextToken" + } + } + public struct ListJobRunsRequest: AWSEncodableShape { /// The ID of the application for which to list the job run. public let applicationId: String @@ -1009,16 +1213,19 @@ extension EMRServerless { public let createdAtBefore: Date? /// The maximum number of job runs that can be listed. public let maxResults: Int? + /// The mode of the job runs to list. + public let mode: JobRunMode? /// The token for the next set of job run results. public let nextToken: String? /// An optional filter for job run states. Note that if this filter contains multiple states, the resulting list will be grouped by the state. public let states: [JobRunState]? - public init(applicationId: String, createdAtAfter: Date? = nil, createdAtBefore: Date? = nil, maxResults: Int? = nil, nextToken: String? = nil, states: [JobRunState]? = nil) { + public init(applicationId: String, createdAtAfter: Date? = nil, createdAtBefore: Date? = nil, maxResults: Int? = nil, mode: JobRunMode? = nil, nextToken: String? = nil, states: [JobRunState]? = nil) { self.applicationId = applicationId self.createdAtAfter = createdAtAfter self.createdAtBefore = createdAtBefore self.maxResults = maxResults + self.mode = mode self.nextToken = nextToken self.states = states } @@ -1030,6 +1237,7 @@ extension EMRServerless { request.encodeQuery(self.createdAtAfter, key: "createdAtAfter") request.encodeQuery(self.createdAtBefore, key: "createdAtBefore") request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.mode, key: "mode") request.encodeQuery(self.nextToken, key: "nextToken") request.encodeQuery(self.states, key: "states") } @@ -1260,6 +1468,27 @@ extension EMRServerless { } } + public struct RetryPolicy: AWSEncodableShape & AWSDecodableShape { + /// Maximum number of attempts for the job run. This parameter is only applicable for BATCH mode. + public let maxAttempts: Int? + /// Maximum number of failed attempts per hour. This [arameter is only applicable for STREAMING mode. + public let maxFailedAttemptsPerHour: Int? + + public init(maxAttempts: Int? = nil, maxFailedAttemptsPerHour: Int? = nil) { + self.maxAttempts = maxAttempts + self.maxFailedAttemptsPerHour = maxFailedAttemptsPerHour + } + + public func validate(name: String) throws { + try self.validate(self.maxAttempts, name: "maxAttempts", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case maxAttempts = "maxAttempts" + case maxFailedAttemptsPerHour = "maxFailedAttemptsPerHour" + } + } + public struct S3MonitoringConfiguration: AWSEncodableShape & AWSDecodableShape { /// The KMS key ARN to encrypt the logs published to the given Amazon S3 destination. public let encryptionKeyArn: String? @@ -1361,19 +1590,25 @@ extension EMRServerless { public let executionTimeoutMinutes: Int64? /// The job driver for the job run. public let jobDriver: JobDriver? + /// The mode of the job run when it starts. + public let mode: JobRunMode? /// The optional job run name. This doesn't have to be unique. public let name: String? + /// The retry policy when job run starts. + public let retryPolicy: RetryPolicy? /// The tags assigned to the job run. public let tags: [String: String]? - public init(applicationId: String, clientToken: String = StartJobRunRequest.idempotencyToken(), configurationOverrides: ConfigurationOverrides? = nil, executionRoleArn: String, executionTimeoutMinutes: Int64? = nil, jobDriver: JobDriver? = nil, name: String? = nil, tags: [String: String]? = nil) { + public init(applicationId: String, clientToken: String = StartJobRunRequest.idempotencyToken(), configurationOverrides: ConfigurationOverrides? = nil, executionRoleArn: String, executionTimeoutMinutes: Int64? = nil, jobDriver: JobDriver? = nil, mode: JobRunMode? = nil, name: String? = nil, retryPolicy: RetryPolicy? = nil, tags: [String: String]? = nil) { self.applicationId = applicationId self.clientToken = clientToken self.configurationOverrides = configurationOverrides self.executionRoleArn = executionRoleArn self.executionTimeoutMinutes = executionTimeoutMinutes self.jobDriver = jobDriver + self.mode = mode self.name = name + self.retryPolicy = retryPolicy self.tags = tags } @@ -1386,7 +1621,9 @@ extension EMRServerless { try container.encode(self.executionRoleArn, forKey: .executionRoleArn) try container.encodeIfPresent(self.executionTimeoutMinutes, forKey: .executionTimeoutMinutes) try container.encodeIfPresent(self.jobDriver, forKey: .jobDriver) + try container.encodeIfPresent(self.mode, forKey: .mode) try container.encodeIfPresent(self.name, forKey: .name) + try container.encodeIfPresent(self.retryPolicy, forKey: .retryPolicy) try container.encodeIfPresent(self.tags, forKey: .tags) } @@ -1407,6 +1644,7 @@ extension EMRServerless { try self.validate(self.name, name: "name", parent: name, max: 256) try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, pattern: ".*\\S.*") + try self.retryPolicy?.validate(name: "\(name).retryPolicy") try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -1423,7 +1661,9 @@ extension EMRServerless { case executionRoleArn = "executionRoleArn" case executionTimeoutMinutes = "executionTimeoutMinutes" case jobDriver = "jobDriver" + case mode = "mode" case name = "name" + case retryPolicy = "retryPolicy" case tags = "tags" } } @@ -1591,6 +1831,8 @@ extension EMRServerless { public let imageConfiguration: ImageConfigurationInput? /// The capacity to initialize when the application is updated. public let initialCapacity: [String: InitialCapacityConfig]? + /// The interactive configuration object that contains new interactive use cases when the application is updated. + public let interactiveConfiguration: InteractiveConfiguration? /// The maximum capacity to allocate when the application is updated. This is cumulative across all workers at any given point in time during the lifespan of the application. No new resources will be created once any one of the defined limits is hit. public let maximumCapacity: MaximumAllowedResources? /// The configuration setting for monitoring. @@ -1603,7 +1845,7 @@ extension EMRServerless { /// The key-value pairs that specify worker type to WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a Spark or Hive application. Valid worker types include Driver and Executor for Spark applications and HiveDriver and TezTask for Hive applications. You can either set image details in this parameter for each worker type, or in imageConfiguration for all worker types. public let workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? - public init(applicationId: String, architecture: Architecture? = nil, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, clientToken: String = UpdateApplicationRequest.idempotencyToken(), imageConfiguration: ImageConfigurationInput? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String? = nil, runtimeConfiguration: [Configuration]? = nil, workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? = nil) { + public init(applicationId: String, architecture: Architecture? = nil, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, clientToken: String = UpdateApplicationRequest.idempotencyToken(), imageConfiguration: ImageConfigurationInput? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, interactiveConfiguration: InteractiveConfiguration? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String? = nil, runtimeConfiguration: [Configuration]? = nil, workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? = nil) { self.applicationId = applicationId self.architecture = architecture self.autoStartConfiguration = autoStartConfiguration @@ -1611,6 +1853,7 @@ extension EMRServerless { self.clientToken = clientToken self.imageConfiguration = imageConfiguration self.initialCapacity = initialCapacity + self.interactiveConfiguration = interactiveConfiguration self.maximumCapacity = maximumCapacity self.monitoringConfiguration = monitoringConfiguration self.networkConfiguration = networkConfiguration @@ -1629,6 +1872,7 @@ extension EMRServerless { try container.encode(self.clientToken, forKey: .clientToken) try container.encodeIfPresent(self.imageConfiguration, forKey: .imageConfiguration) try container.encodeIfPresent(self.initialCapacity, forKey: .initialCapacity) + try container.encodeIfPresent(self.interactiveConfiguration, forKey: .interactiveConfiguration) try container.encodeIfPresent(self.maximumCapacity, forKey: .maximumCapacity) try container.encodeIfPresent(self.monitoringConfiguration, forKey: .monitoringConfiguration) try container.encodeIfPresent(self.networkConfiguration, forKey: .networkConfiguration) @@ -1677,6 +1921,7 @@ extension EMRServerless { case clientToken = "clientToken" case imageConfiguration = "imageConfiguration" case initialCapacity = "initialCapacity" + case interactiveConfiguration = "interactiveConfiguration" case maximumCapacity = "maximumCapacity" case monitoringConfiguration = "monitoringConfiguration" case networkConfiguration = "networkConfiguration" diff --git a/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift b/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift index 7e2744be87..54b72bda58 100644 --- a/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift +++ b/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift @@ -1035,7 +1035,7 @@ public struct ElastiCache: AWSService { ) } - /// Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 5 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Test Failover API called for node group Cache cluster message: Failover from primary node to replica node completed Replication group message: Failover from primary node to replica node completed Cache cluster message: Recovering cache nodes Cache cluster message: Finished recovery for cache nodes For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide. + /// Represents the input of a TestFailover operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Redis (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Test Failover API called for node group Cache cluster message: Failover from primary node to replica node completed Replication group message: Failover from primary node to replica node completed Cache cluster message: Recovering cache nodes Cache cluster message: Finished recovery for cache nodes For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide. @Sendable public func testFailover(_ input: TestFailoverMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> TestFailoverResult { return try await self.client.execute( diff --git a/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift b/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift index 8056251509..71a7aab807 100644 --- a/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift +++ b/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift @@ -3097,7 +3097,7 @@ extension ElastiCache { } public struct DescribeUserGroupsResult: AWSDecodableShape { - /// An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. > + /// An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.> public let marker: String? /// Returns a list of user groups. @OptionalCustomCoding> @@ -3777,7 +3777,7 @@ extension ElastiCache { public let applyImmediately: Bool? /// Reserved parameter. The password used to access a password protected server. This parameter must be specified with the auth-token-update parameter. Password constraints: Must be only printable ASCII characters Must be at least 16 characters and no more than 128 characters in length Cannot contain any of the following characters: '/', '"', or '@', '%' For more information, see AUTH password at AUTH. public let authToken: String? - /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: Rotate Set For more information, see Authenticating Users with Redis AUTH + /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis AUTH public let authTokenUpdateStrategy: AuthTokenUpdateStrategyType? ///  If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? @@ -3996,7 +3996,7 @@ extension ElastiCache { public let applyImmediately: Bool? /// Reserved parameter. The password used to access a password protected server. This parameter must be specified with the auth-token-update-strategy parameter. Password constraints: Must be only printable ASCII characters Must be at least 16 characters and no more than 128 characters in length Cannot contain any of the following characters: '/', '"', or '@', '%' For more information, see AUTH password at AUTH. public let authToken: String? - /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: Rotate Set For more information, see Authenticating Users with Redis AUTH + /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis AUTH public let authTokenUpdateStrategy: AuthTokenUpdateStrategyType? /// Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure. Valid values: true | false public let automaticFailoverEnabled: Bool? @@ -5957,7 +5957,7 @@ extension ElastiCache { } public struct TestFailoverMessage: AWSEncodableShape { - /// The name of the node group (called shard in the console) in this replication group on which automatic failover is to be tested. You may test automatic failover on up to 5 node groups in any rolling 24-hour period. + /// The name of the node group (called shard in the console) in this replication group on which automatic failover is to be tested. You may test automatic failover on up to 15 node groups in any rolling 24-hour period. public let nodeGroupId: String? /// The name of the replication group (console: cluster) whose automatic failover is being tested by this operation. public let replicationGroupId: String? diff --git a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift index 6f852c58fb..f171698b3b 100644 --- a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift +++ b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift @@ -69,6 +69,7 @@ extension ElasticLoadBalancingV2 { public enum IpAddressType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case dualstack = "dualstack" + case dualstackWithoutPublicIpv4 = "dualstack-without-public-ipv4" case ipv4 = "ipv4" public var description: String { return self.rawValue } } @@ -568,7 +569,7 @@ extension ElasticLoadBalancingV2 { public struct CreateLoadBalancerInput: AWSEncodableShape { /// [Application Load Balancers on Outposts] The ID of the customer-owned address pool (CoIP pool). public let customerOwnedIpv4Pool: String? - /// The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). + /// Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). public let ipAddressType: IpAddressType? /// The name of the load balancer. This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, must not begin or end with a hyphen, and must not begin with "internal-". public let name: String? @@ -1835,7 +1836,7 @@ extension ElasticLoadBalancingV2 { public let dnsName: String? /// Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through Amazon Web Services PrivateLink. public let enforceSecurityGroupInboundRulesOnPrivateLinkTraffic: String? - /// The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). + /// [Application Load Balancers] The type of IP addresses used for public or private connections by the subnets attached to your load balancer. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load Balancers and Gateway Load Balancers] The type of IP addresses used for public or private connections by the subnets attached to your load balancer. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). public let ipAddressType: IpAddressType? /// The Amazon Resource Name (ARN) of the load balancer. public let loadBalancerArn: String? @@ -2610,7 +2611,7 @@ extension ElasticLoadBalancingV2 { } public struct SetIpAddressTypeInput: AWSEncodableShape { - /// The IP address type. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. + /// Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). public let ipAddressType: IpAddressType? /// The Amazon Resource Name (ARN) of the load balancer. public let loadBalancerArn: String? @@ -2714,7 +2715,7 @@ extension ElasticLoadBalancingV2 { } public struct SetSubnetsInput: AWSEncodableShape { - /// [Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). + /// [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). public let ipAddressType: IpAddressType? /// The Amazon Resource Name (ARN) of the load balancer. public let loadBalancerArn: String? @@ -2744,7 +2745,7 @@ extension ElasticLoadBalancingV2 { /// Information about the subnets. @OptionalCustomCoding> public var availabilityZones: [AvailabilityZone]? - /// [Network Load Balancers] The IP address type. [Gateway Load Balancers] The IP address type. + /// [Application Load Balancers] The IP address type. [Network Load Balancers] The IP address type. [Gateway Load Balancers] The IP address type. public let ipAddressType: IpAddressType? public init(availabilityZones: [AvailabilityZone]? = nil, ipAddressType: IpAddressType? = nil) { diff --git a/Sources/Soto/Services/EventBridge/EventBridge_api.swift b/Sources/Soto/Services/EventBridge/EventBridge_api.swift index 526cb45caa..b12971c94a 100644 --- a/Sources/Soto/Services/EventBridge/EventBridge_api.swift +++ b/Sources/Soto/Services/EventBridge/EventBridge_api.swift @@ -126,7 +126,7 @@ public struct EventBridge: AWSService { ) } - /// Creates an archive of events with the specified settings. When you create an archive, incoming events might not immediately start being sent to the archive. Allow a short period of time for changes to take effect. If you do not specify a pattern to filter events sent to the archive, all events are sent to the archive except replayed events. Replayed events are not sent to an archive. + /// Creates an archive of events with the specified settings. When you create an archive, incoming events might not immediately start being sent to the archive. Allow a short period of time for changes to take effect. If you do not specify a pattern to filter events sent to the archive, all events are sent to the archive except replayed events. Replayed events are not sent to an archive. Archives and schema discovery are not supported for event buses encrypted using a customer managed key. EventBridge returns an error if: You call CreateArchive on an event bus set to use a customer managed key for encryption. You call CreateDiscoverer on an event bus set to use a customer managed key for encryption. You call UpdatedEventBus to set a customer managed key on an event bus with an archives or schema discovery enabled. To enable archives or schema discovery on an event bus, choose to use an Amazon Web Services owned key. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide. @Sendable public func createArchive(_ input: CreateArchiveRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateArchiveResponse { return try await self.client.execute( @@ -152,7 +152,7 @@ public struct EventBridge: AWSService { ) } - /// Creates a global endpoint. Global endpoints improve your application's availability by making it regional-fault tolerant. To do this, you define a primary and secondary Region with event buses in each Region. You also create a Amazon Route 53 health check that will tell EventBridge to route events to the secondary Region when an "unhealthy" state is encountered and events will be routed back to the primary Region when the health check reports a "healthy" state. + /// Creates a global endpoint. Global endpoints improve your application's availability by making it regional-fault tolerant. To do this, you define a primary and secondary Region with event buses in each Region. You also create a Amazon Route 53 health check that will tell EventBridge to route events to the secondary Region when an "unhealthy" state is encountered and events will be routed back to the primary Region when the health check reports a "healthy" state. @Sendable public func createEndpoint(_ input: CreateEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateEndpointResponse { return try await self.client.execute( @@ -256,7 +256,7 @@ public struct EventBridge: AWSService { ) } - /// Delete an existing global endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide. + /// Delete an existing global endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide . @Sendable public func deleteEndpoint(_ input: DeleteEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteEndpointResponse { return try await self.client.execute( @@ -347,7 +347,7 @@ public struct EventBridge: AWSService { ) } - /// Get the information about an existing global endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide. + /// Get the information about an existing global endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide . @Sendable public func describeEndpoint(_ input: DescribeEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeEndpointResponse { return try await self.client.execute( @@ -490,7 +490,7 @@ public struct EventBridge: AWSService { ) } - /// List the global endpoints associated with this account. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide. + /// List the global endpoints associated with this account. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide . @Sendable public func listEndpoints(_ input: ListEndpointsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListEndpointsResponse { return try await self.client.execute( @@ -620,7 +620,7 @@ public struct EventBridge: AWSService { ) } - /// Sends custom events to Amazon EventBridge so that they can be matched to rules. The maximum size for a PutEvents event entry is 256 KB. Entry size is calculated including the event and any necessary characters and keys of the JSON representation of the event. To learn more, see Calculating PutEvents event entry size in the Amazon EventBridge User Guide PutEvents accepts the data in JSON format. For the JSON number (integer) data type, the constraints are: a minimum value of -9,223,372,036,854,775,808 and a maximum value of 9,223,372,036,854,775,807. PutEvents will only process nested JSON up to 1100 levels deep. + /// Sends custom events to Amazon EventBridge so that they can be matched to rules. The maximum size for a PutEvents event entry is 256 KB. Entry size is calculated including the event and any necessary characters and keys of the JSON representation of the event. To learn more, see Calculating PutEvents event entry size in the Amazon EventBridge User Guide PutEvents accepts the data in JSON format. For the JSON number (integer) data type, the constraints are: a minimum value of -9,223,372,036,854,775,808 and a maximum value of 9,223,372,036,854,775,807. PutEvents will only process nested JSON up to 1100 levels deep. @Sendable public func putEvents(_ input: PutEventsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutEventsResponse { return try await self.client.execute( @@ -633,7 +633,7 @@ public struct EventBridge: AWSService { ) } - /// This is used by SaaS partners to write events to a customer's partner event bus. Amazon Web Services customers do not use this operation. For information on calculating event batch size, see Calculating EventBridge PutEvents event entry size in the EventBridge User Guide. + /// This is used by SaaS partners to write events to a customer's partner event bus. Amazon Web Services customers do not use this operation. For information on calculating event batch size, see Calculating EventBridge PutEvents event entry size in the EventBridge User Guide. @Sendable public func putPartnerEvents(_ input: PutPartnerEventsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutPartnerEventsResponse { return try await self.client.execute( @@ -672,7 +672,7 @@ public struct EventBridge: AWSService { ) } - /// Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule. Targets are the resources that are invoked when a rule is triggered. The maximum number of entries per request is 10. Each rule can have up to five (5) targets associated with it at one time. For a list of services you can configure as targets for events, see EventBridge targets in the Amazon EventBridge User Guide. Creating rules with built-in targets is supported only in the Amazon Web Services Management Console. The built-in targets are: Amazon EBS CreateSnapshot API call Amazon EC2 RebootInstances API call Amazon EC2 StopInstances API call Amazon EC2 TerminateInstances API call For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field. To be able to make API calls against the resources that you own, Amazon EventBridge needs the appropriate permissions: For Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis Data Streams, Step Functions state machines and API Gateway APIs, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide. If another Amazon Web Services account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge Pricing. Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different Amazon Web Services account. If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide. If you have an IAM role on a cross-account event bus target, a PutTargets call without a role on the same target (same Id and Arn) will not remove the role. For more information about enabling cross-account events, see PutPermission. Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event: If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target). If Input is specified in the form of valid JSON, then the matched event is overridden with this constant. If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed). If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target. When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation. When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect. This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code. + /// Adds the specified targets to the specified rule, or updates the targets if they are already associated with the rule. Targets are the resources that are invoked when a rule is triggered. The maximum number of entries per request is 10. Each rule can have up to five (5) targets associated with it at one time. For a list of services you can configure as targets for events, see EventBridge targets in the Amazon EventBridge User Guide . Creating rules with built-in targets is supported only in the Amazon Web Services Management Console. The built-in targets are: Amazon EBS CreateSnapshot API call Amazon EC2 RebootInstances API call Amazon EC2 StopInstances API call Amazon EC2 TerminateInstances API call For some target types, PutTargets provides target-specific parameters. If the target is a Kinesis data stream, you can optionally specify which shard the event goes to by using the KinesisParameters argument. To invoke a command on multiple EC2 instances with one rule, you can use the RunCommandParameters field. To be able to make API calls against the resources that you own, Amazon EventBridge needs the appropriate permissions: For Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. For EC2 instances, Kinesis Data Streams, Step Functions state machines and API Gateway APIs, EventBridge relies on IAM roles that you specify in the RoleARN argument in PutTargets. For more information, see Authentication and Access Control in the Amazon EventBridge User Guide . If another Amazon Web Services account is in the same region and has granted you permission (using PutPermission), you can send events to that account. Set that account's event bus as a target of the rules in your account. To send the matched events to the other account, specify that account's event bus as the Arn value when you run PutTargets. If your account sends events to another account, your account is charged for each sent event. Each event sent to another account is charged as a custom event. The account receiving the event is not charged. For more information, see Amazon EventBridge Pricing. Input, InputPath, and InputTransformer are not available with PutTarget if the target is an event bus of a different Amazon Web Services account. If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide. If you have an IAM role on a cross-account event bus target, a PutTargets call without a role on the same target (same Id and Arn) will not remove the role. For more information about enabling cross-account events, see PutPermission. Input, InputPath, and InputTransformer are mutually exclusive and optional parameters of a target. When a rule is triggered due to a matched event: If none of the following arguments are specified for a target, then the entire event is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing from the event is passed to the target). If Input is specified in the form of valid JSON, then the matched event is overridden with this constant. If InputPath is specified in the form of JSONPath (for example, $.detail), then only the part of the event specified in the path is passed to the target (for example, only the detail part of the event is passed). If InputTransformer is specified, then one or more specified JSONPaths are extracted from the event and used as values in a template that you specify as the input to the target. When you specify InputPath or InputTransformer, you must use JSON dot notation, not bracket notation. When you add targets to a rule and the associated rule triggers soon after, new or updated targets might not be immediately invoked. Allow a short period of time for changes to take effect. This action can partially fail if too many requests are made at the same time. If that happens, FailedEntryCount is non-zero in the response and each entry in FailedEntries provides the ID of the failed target and the error code. @Sendable public func putTargets(_ input: PutTargetsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutTargetsResponse { return try await self.client.execute( @@ -802,7 +802,7 @@ public struct EventBridge: AWSService { ) } - /// Update an existing endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide. + /// Update an existing endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide . @Sendable public func updateEndpoint(_ input: UpdateEndpointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEndpointResponse { return try await self.client.execute( @@ -814,6 +814,19 @@ public struct EventBridge: AWSService { logger: logger ) } + + /// Updates the specified event bus. + @Sendable + public func updateEventBus(_ input: UpdateEventBusRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEventBusResponse { + return try await self.client.execute( + operation: "UpdateEventBus", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } } extension EventBridge { diff --git a/Sources/Soto/Services/EventBridge/EventBridge_shapes.swift b/Sources/Soto/Services/EventBridge/EventBridge_shapes.swift index 138a049a96..00a6a50c40 100644 --- a/Sources/Soto/Services/EventBridge/EventBridge_shapes.swift +++ b/Sources/Soto/Services/EventBridge/EventBridge_shapes.swift @@ -1026,7 +1026,7 @@ extension EventBridge { public let eventBuses: [EndpointEventBus] /// The name of the global endpoint. For example, "Name":"us-east-2-custom_bus_A-endpoint". public let name: String - /// Enable or disable event replication. The default state is ENABLED which means you must supply a RoleArn. If you don't have a RoleArn or you don't want event replication enabled, set the state to DISABLED. + /// Enable or disable event replication. The default state is ENABLED which means you must supply a RoleArn. If you don't have a RoleArn or you don't want event replication enabled, set the state to DISABLED. public let replicationConfig: ReplicationConfig? /// The ARN of the role used for replication. public let roleArn: String? @@ -1107,23 +1107,34 @@ extension EventBridge { } public struct CreateEventBusRequest: AWSEncodableShape { + public let deadLetterConfig: DeadLetterConfig? + /// The event bus description. + public let description: String? /// If you are creating a partner event bus, this specifies the partner event source that the new event bus will be matched with. public let eventSourceName: String? + /// The identifier of the KMS customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN. If you do not specify a customer managed key identifier, EventBridge uses an Amazon Web Services owned key to encrypt events on the event bus. For more information, see Managing keys in the Key Management Service Developer Guide. Archives and schema discovery are not supported for event buses encrypted using a customer managed key. EventBridge returns an error if: You call CreateArchive on an event bus set to use a customer managed key for encryption. You call CreateDiscoverer on an event bus set to use a customer managed key for encryption. You call UpdatedEventBus to set a customer managed key on an event bus with an archives or schema discovery enabled. To enable archives or schema discovery on an event bus, choose to use an Amazon Web Services owned key. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide. + public let kmsKeyIdentifier: String? /// The name of the new event bus. Custom event bus names can't contain the / character, but you can use the / character in partner event bus names. In addition, for partner event buses, the name must exactly match the name of the partner event source that this event bus is matched to. You can't use the name default for a custom event bus, as this name is already used for your account's default event bus. public let name: String /// Tags to associate with the event bus. public let tags: [Tag]? - public init(eventSourceName: String? = nil, name: String, tags: [Tag]? = nil) { + public init(deadLetterConfig: DeadLetterConfig? = nil, description: String? = nil, eventSourceName: String? = nil, kmsKeyIdentifier: String? = nil, name: String, tags: [Tag]? = nil) { + self.deadLetterConfig = deadLetterConfig + self.description = description self.eventSourceName = eventSourceName + self.kmsKeyIdentifier = kmsKeyIdentifier self.name = name self.tags = tags } public func validate(name: String) throws { + try self.deadLetterConfig?.validate(name: "\(name).deadLetterConfig") + try self.validate(self.description, name: "description", parent: name, max: 512) try self.validate(self.eventSourceName, name: "eventSourceName", parent: name, max: 256) try self.validate(self.eventSourceName, name: "eventSourceName", parent: name, min: 1) try self.validate(self.eventSourceName, name: "eventSourceName", parent: name, pattern: "^aws\\.partner(/[\\.\\-_A-Za-z0-9]+){2,}$") + try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, max: 2048) try self.validate(self.name, name: "name", parent: name, max: 256) try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, pattern: "^[/\\.\\-_A-Za-z0-9]+$") @@ -1133,22 +1144,36 @@ extension EventBridge { } private enum CodingKeys: String, CodingKey { + case deadLetterConfig = "DeadLetterConfig" + case description = "Description" case eventSourceName = "EventSourceName" + case kmsKeyIdentifier = "KmsKeyIdentifier" case name = "Name" case tags = "Tags" } } public struct CreateEventBusResponse: AWSDecodableShape { + public let deadLetterConfig: DeadLetterConfig? + /// The event bus description. + public let description: String? /// The ARN of the new event bus. public let eventBusArn: String? + /// The identifier of the KMS customer managed key for EventBridge to use to encrypt events on this event bus, if one has been specified. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide. + public let kmsKeyIdentifier: String? - public init(eventBusArn: String? = nil) { + public init(deadLetterConfig: DeadLetterConfig? = nil, description: String? = nil, eventBusArn: String? = nil, kmsKeyIdentifier: String? = nil) { + self.deadLetterConfig = deadLetterConfig + self.description = description self.eventBusArn = eventBusArn + self.kmsKeyIdentifier = kmsKeyIdentifier } private enum CodingKeys: String, CodingKey { + case deadLetterConfig = "DeadLetterConfig" + case description = "Description" case eventBusArn = "EventBusArn" + case kmsKeyIdentifier = "KmsKeyIdentifier" } } @@ -1789,19 +1814,38 @@ extension EventBridge { public struct DescribeEventBusResponse: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the account permitted to write events to the current account. public let arn: String? + /// The time the event bus was created. + public let creationTime: Date? + public let deadLetterConfig: DeadLetterConfig? + /// The event bus description. + public let description: String? + /// The identifier of the KMS customer managed key for EventBridge to use to encrypt events on this event bus, if one has been specified. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide. + public let kmsKeyIdentifier: String? + /// The time the event bus was last modified. + public let lastModifiedTime: Date? /// The name of the event bus. Currently, this is always default. public let name: String? /// The policy that enables the external account to send events to your account. public let policy: String? - public init(arn: String? = nil, name: String? = nil, policy: String? = nil) { + public init(arn: String? = nil, creationTime: Date? = nil, deadLetterConfig: DeadLetterConfig? = nil, description: String? = nil, kmsKeyIdentifier: String? = nil, lastModifiedTime: Date? = nil, name: String? = nil, policy: String? = nil) { self.arn = arn + self.creationTime = creationTime + self.deadLetterConfig = deadLetterConfig + self.description = description + self.kmsKeyIdentifier = kmsKeyIdentifier + self.lastModifiedTime = lastModifiedTime self.name = name self.policy = policy } private enum CodingKeys: String, CodingKey { case arn = "Arn" + case creationTime = "CreationTime" + case deadLetterConfig = "DeadLetterConfig" + case description = "Description" + case kmsKeyIdentifier = "KmsKeyIdentifier" + case lastModifiedTime = "LastModifiedTime" case name = "Name" case policy = "Policy" } @@ -2006,7 +2050,7 @@ extension EventBridge { public let description: String? /// The name of the event bus associated with the rule. public let eventBusName: String? - /// The event pattern. For more information, see Events and Event Patterns in the Amazon EventBridge User Guide. + /// The event pattern. For more information, see Events and Event Patterns in the Amazon EventBridge User Guide . public let eventPattern: String? /// If this is a managed rule, created by an Amazon Web Services service on your behalf, this field displays the principal name of the Amazon Web Services service that created the rule. public let managedBy: String? @@ -2202,7 +2246,7 @@ extension EventBridge { public let lastModifiedTime: Date? /// The name of the endpoint. public let name: String? - /// Whether event replication was enabled or disabled for this endpoint. The default state is ENABLED which means you must supply a RoleArn. If you don't have a RoleArn or you don't want event replication enabled, set the state to DISABLED. + /// Whether event replication was enabled or disabled for this endpoint. The default state is ENABLED which means you must supply a RoleArn. If you don't have a RoleArn or you don't want event replication enabled, set the state to DISABLED. public let replicationConfig: ReplicationConfig? /// The ARN of the role used by event replication for the endpoint. public let roleArn: String? @@ -2268,19 +2312,31 @@ extension EventBridge { public struct EventBus: AWSDecodableShape { /// The ARN of the event bus. public let arn: String? + /// The time the event bus was created. + public let creationTime: Date? + /// The event bus description. + public let description: String? + /// The time the event bus was last modified. + public let lastModifiedTime: Date? /// The name of the event bus. public let name: String? /// The permissions policy of the event bus, describing which other Amazon Web Services accounts can write events to this event bus. public let policy: String? - public init(arn: String? = nil, name: String? = nil, policy: String? = nil) { + public init(arn: String? = nil, creationTime: Date? = nil, description: String? = nil, lastModifiedTime: Date? = nil, name: String? = nil, policy: String? = nil) { self.arn = arn + self.creationTime = creationTime + self.description = description + self.lastModifiedTime = lastModifiedTime self.name = name self.policy = policy } private enum CodingKeys: String, CodingKey { case arn = "Arn" + case creationTime = "CreationTime" + case description = "Description" + case lastModifiedTime = "LastModifiedTime" case name = "Name" case policy = "Policy" } @@ -2346,7 +2402,7 @@ extension EventBridge { public let headerParameters: [String: String]? /// The path parameter values to be used to populate API Gateway API or EventBridge ApiDestination path wildcards ("*"). public let pathParameterValues: [String]? - /// The query string keys/values that need to be sent as part of request invoking the API Gateway API or EventBridge ApiDestination. + /// The query string keys/values that need to be sent as part of request invoking the API Gateway API or EventBridge ApiDestination. public let queryStringParameters: [String: String]? public init(headerParameters: [String: String]? = nil, pathParameterValues: [String]? = nil, queryStringParameters: [String: String]? = nil) { @@ -2598,7 +2654,7 @@ extension EventBridge { public let maxResults: Int? /// A value that will return a subset of the endpoints associated with this account. For example, "NamePrefix": "ABC" will return all endpoints with "ABC" in the name. public let namePrefix: String? - /// If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. + /// If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. public let nextToken: String? public init(homeRegion: String? = nil, maxResults: Int? = nil, namePrefix: String? = nil, nextToken: String? = nil) { @@ -2632,7 +2688,7 @@ extension EventBridge { public struct ListEndpointsResponse: AWSDecodableShape { /// The endpoints returned by the call. public let endpoints: [Endpoint]? - /// If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. + /// If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. public let nextToken: String? public init(endpoints: [Endpoint]? = nil, nextToken: String? = nil) { @@ -3240,7 +3296,7 @@ extension EventBridge { public let detail: String? /// Free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail. Detail, DetailType, and Source are required for EventBridge to successfully send an event to an event bus. If you include event entries in a request that do not include each of those properties, EventBridge fails that entry. If you submit a request in which none of the entries have each of these properties, EventBridge fails the entire request. public let detailType: String? - /// The name or ARN of the event bus to receive the event. Only the rules that are associated with this event bus are used to match the event. If you omit this, the default event bus is used. If you're using a global endpoint with a custom bus, you can enter either the name or Amazon Resource Name (ARN) of the event bus in either the primary or secondary Region here. EventBridge then determines the corresponding event bus in the other Region based on the endpoint referenced by the EndpointId. Specifying the event bus ARN is preferred. + /// The name or ARN of the event bus to receive the event. Only the rules that are associated with this event bus are used to match the event. If you omit this, the default event bus is used. If you're using a global endpoint with a custom bus, you can enter either the name or Amazon Resource Name (ARN) of the event bus in either the primary or secondary Region here. EventBridge then determines the corresponding event bus in the other Region based on the endpoint referenced by the EndpointId. Specifying the event bus ARN is preferred. public let eventBusName: String? /// Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present. public let resources: [String]? @@ -3381,7 +3437,7 @@ extension EventBridge { } public struct PutPartnerEventsResponse: AWSDecodableShape { - /// The results for each event entry the partner submitted in this request. If the event was successfully submitted, the entry has the event ID in it. Otherwise, you can use the error code and error message to identify the problem with the entry. For each record, the index of the response element is the same as the index in the request array. + /// The results for each event entry the partner submitted in this request. If the event was successfully submitted, the entry has the event ID in it. Otherwise, you can use the error code and error message to identify the problem with the entry. For each record, the index of the response element is the same as the index in the request array. public let entries: [PutPartnerEventsResultEntry]? /// The number of events from this operation that could not be written to the partner event bus. public let failedEntryCount: Int? @@ -3421,7 +3477,7 @@ extension EventBridge { public struct PutPermissionRequest: AWSEncodableShape { /// The action that you are enabling the other account to perform. public let action: String? - /// This parameter enables you to limit the permission to accounts that fulfill a certain condition, such as being a member of a certain Amazon Web Services organization. For more information about Amazon Web Services Organizations, see What Is Amazon Web Services Organizations in the Amazon Web Services Organizations User Guide. If you specify Condition with an Amazon Web Services organization ID, and specify "*" as the value for Principal, you grant permission to all the accounts in the named organization. The Condition is a JSON string which must contain Type, Key, and Value fields. + /// This parameter enables you to limit the permission to accounts that fulfill a certain condition, such as being a member of a certain Amazon Web Services organization. For more information about Amazon Web Services Organizations, see What Is Amazon Web Services Organizations in the Amazon Web Services Organizations User Guide. If you specify Condition with an Amazon Web Services organization ID, and specify "*" as the value for Principal, you grant permission to all the accounts in the named organization. The Condition is a JSON string which must contain Type, Key, and Value fields. public let condition: Condition? /// The name of the event bus associated with the rule. If you omit this, the default event bus is used. public let eventBusName: String? @@ -3471,7 +3527,7 @@ extension EventBridge { public let description: String? /// The name or ARN of the event bus to associate with this rule. If you omit this, the default event bus is used. public let eventBusName: String? - /// The event pattern. For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide. + /// The event pattern. For more information, see Amazon EventBridge event patterns in the Amazon EventBridge User Guide . public let eventPattern: String? /// The name of the rule that you are creating or updating. public let name: String @@ -3479,7 +3535,7 @@ extension EventBridge { public let roleArn: String? /// The scheduling expression. For example, "cron(0 20 * * ? *)" or "rate(5 minutes)". public let scheduleExpression: String? - /// Indicates whether the rule is enabled or disabled. + /// The state of the rule. Valid values include: DISABLED: The rule is disabled. EventBridge does not match any events against the rule. ENABLED: The rule is enabled. EventBridge matches events against the rule, except for Amazon Web Services management events delivered through CloudTrail. ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS: The rule is enabled for all events, including Amazon Web Services management events delivered through CloudTrail. Management events provide visibility into management operations that are performed on resources in your Amazon Web Services account. These are also known as control plane operations. For more information, see Logging management events in the CloudTrail User Guide, and Filtering management events from Amazon Web Services services in the Amazon EventBridge User Guide . This value is only valid for rules on the default event bus or custom event buses. It does not apply to partner event buses. public let state: RuleState? /// The list of key-value pairs to associate with the rule. public let tags: [Tag]? @@ -3619,7 +3675,7 @@ extension EventBridge { public let secretManagerArn: String? /// The SQL statement text to run. public let sql: String? - /// One or more SQL statements to run. The SQL statements are run as a single transaction. They run serially in the order of the array. Subsequent SQL statements don't start until the previous statement in the array completes. If any SQL statement fails, then because they are run as one transaction, all work is rolled back. + /// One or more SQL statements to run. The SQL statements are run as a single transaction. They run serially in the order of the array. Subsequent SQL statements don't start until the previous statement in the array completes. If any SQL statement fails, then because they are run as one transaction, all work is rolled back. public let sqls: [String]? /// The name of the SQL statement. You can name the SQL statement when you create it to identify the query. public let statementName: String? @@ -3907,7 +3963,7 @@ extension EventBridge { public let description: String? /// The name or ARN of the event bus associated with the rule. If you omit this, the default event bus is used. public let eventBusName: String? - /// The event pattern of the rule. For more information, see Events and Event Patterns in the Amazon EventBridge User Guide. + /// The event pattern of the rule. For more information, see Events and Event Patterns in the Amazon EventBridge User Guide . public let eventPattern: String? /// If the rule was created on behalf of your account by an Amazon Web Services service, this field displays the principal name of the service that created the rule. public let managedBy: String? @@ -3917,7 +3973,7 @@ extension EventBridge { public let roleArn: String? /// The scheduling expression. For example, "cron(0 20 * * ? *)", "rate(5 minutes)". For more information, see Creating an Amazon EventBridge rule that runs on a schedule. public let scheduleExpression: String? - /// The state of the rule. + /// The state of the rule. Valid values include: DISABLED: The rule is disabled. EventBridge does not match any events against the rule. ENABLED: The rule is enabled. EventBridge matches events against the rule, except for Amazon Web Services management events delivered through CloudTrail. ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS: The rule is enabled for all events, including Amazon Web Services management events delivered through CloudTrail. Management events provide visibility into management operations that are performed on resources in your Amazon Web Services account. These are also known as control plane operations. For more information, see Logging management events in the CloudTrail User Guide, and Filtering management events from Amazon Web Services services in the Amazon EventBridge User Guide . This value is only valid for rules on the default event bus or custom event buses. It does not apply to partner event buses. public let state: RuleState? public init(arn: String? = nil, description: String? = nil, eventBusName: String? = nil, eventPattern: String? = nil, managedBy: String? = nil, name: String? = nil, roleArn: String? = nil, scheduleExpression: String? = nil, state: RuleState? = nil) { @@ -4299,7 +4355,7 @@ extension EventBridge { public struct TestEventPatternRequest: AWSEncodableShape { /// The event, in JSON format, to test against the event pattern. The JSON must follow the format specified in Amazon Web Services Events, and the following fields are mandatory: id account source time region resources detail-type public let event: String - /// The event pattern. For more information, see Events and Event Patterns in the Amazon EventBridge User Guide. + /// The event pattern. For more information, see Events and Event Patterns in the Amazon EventBridge User Guide . public let eventPattern: String public init(event: String, eventPattern: String) { @@ -4793,6 +4849,67 @@ extension EventBridge { case state = "State" } } + + public struct UpdateEventBusRequest: AWSEncodableShape { + public let deadLetterConfig: DeadLetterConfig? + /// The event bus description. + public let description: String? + /// The identifier of the KMS customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN. If you do not specify a customer managed key identifier, EventBridge uses an Amazon Web Services owned key to encrypt events on the event bus. For more information, see Managing keys in the Key Management Service Developer Guide. Archives and schema discovery are not supported for event buses encrypted using a customer managed key. EventBridge returns an error if: You call CreateArchive on an event bus set to use a customer managed key for encryption. You call CreateDiscoverer on an event bus set to use a customer managed key for encryption. You call UpdatedEventBus to set a customer managed key on an event bus with an archives or schema discovery enabled. To enable archives or schema discovery on an event bus, choose to use an Amazon Web Services owned key. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide. + public let kmsKeyIdentifier: String? + /// The name of the event bus. + public let name: String? + + public init(deadLetterConfig: DeadLetterConfig? = nil, description: String? = nil, kmsKeyIdentifier: String? = nil, name: String? = nil) { + self.deadLetterConfig = deadLetterConfig + self.description = description + self.kmsKeyIdentifier = kmsKeyIdentifier + self.name = name + } + + public func validate(name: String) throws { + try self.deadLetterConfig?.validate(name: "\(name).deadLetterConfig") + try self.validate(self.description, name: "description", parent: name, max: 512) + try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, max: 2048) + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[/\\.\\-_A-Za-z0-9]+$") + } + + private enum CodingKeys: String, CodingKey { + case deadLetterConfig = "DeadLetterConfig" + case description = "Description" + case kmsKeyIdentifier = "KmsKeyIdentifier" + case name = "Name" + } + } + + public struct UpdateEventBusResponse: AWSDecodableShape { + /// The event bus Amazon Resource Name (ARN). + public let arn: String? + public let deadLetterConfig: DeadLetterConfig? + /// The event bus description. + public let description: String? + /// The identifier of the KMS customer managed key for EventBridge to use to encrypt events on this event bus, if one has been specified. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide. + public let kmsKeyIdentifier: String? + /// The event bus name. + public let name: String? + + public init(arn: String? = nil, deadLetterConfig: DeadLetterConfig? = nil, description: String? = nil, kmsKeyIdentifier: String? = nil, name: String? = nil) { + self.arn = arn + self.deadLetterConfig = deadLetterConfig + self.description = description + self.kmsKeyIdentifier = kmsKeyIdentifier + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case deadLetterConfig = "DeadLetterConfig" + case description = "Description" + case kmsKeyIdentifier = "KmsKeyIdentifier" + case name = "Name" + } + } } // MARK: - Errors diff --git a/Sources/Soto/Services/FMS/FMS_api.swift b/Sources/Soto/Services/FMS/FMS_api.swift index 32dae2d3b4..9566e6d81e 100644 --- a/Sources/Soto/Services/FMS/FMS_api.swift +++ b/Sources/Soto/Services/FMS/FMS_api.swift @@ -83,6 +83,7 @@ public struct FMS: AWSService { "ap-southeast-1": "fms-fips.ap-southeast-1.amazonaws.com", "ap-southeast-2": "fms-fips.ap-southeast-2.amazonaws.com", "ca-central-1": "fms-fips.ca-central-1.amazonaws.com", + "ca-west-1": "fms-fips.ca-west-1.amazonaws.com", "eu-central-1": "fms-fips.eu-central-1.amazonaws.com", "eu-south-1": "fms-fips.eu-south-1.amazonaws.com", "eu-west-1": "fms-fips.eu-west-1.amazonaws.com", diff --git a/Sources/Soto/Services/FMS/FMS_shapes.swift b/Sources/Soto/Services/FMS/FMS_shapes.swift index 796b689219..bbbdf734d5 100644 --- a/Sources/Soto/Services/FMS/FMS_shapes.swift +++ b/Sources/Soto/Services/FMS/FMS_shapes.swift @@ -2380,10 +2380,10 @@ extension FMS { /// The rules that you want to run first in the Firewall Manager managed network ACLs. Provide these in the order in which you want them to run. Firewall Manager will assign the specific rule numbers for you, in the network ACLs that it creates. You must specify at least one first entry or one last entry in any network ACL policy. public let firstEntries: [NetworkAclEntry]? /// Applies only when remediation is enabled for the policy as a whole. Firewall Manager uses this setting when it finds policy violations that involve conflicts between the custom entries and the policy entries. If forced remediation is disabled, Firewall Manager marks the network ACL as noncompliant and does not try to remediate. For more information about the remediation behavior, see - /// Network access control list (ACL) policies in the Firewall Manager Developer Guide. + /// Remediation for managed network ACLs in the Firewall Manager Developer Guide. public let forceRemediateForFirstEntries: Bool /// Applies only when remediation is enabled for the policy as a whole. Firewall Manager uses this setting when it finds policy violations that involve conflicts between the custom entries and the policy entries. If forced remediation is disabled, Firewall Manager marks the network ACL as noncompliant and does not try to remediate. For more information about the remediation behavior, see - /// Network access control list (ACL) policies in the Firewall Manager Developer Guide. + /// Remediation for managed network ACLs in the Firewall Manager Developer Guide. public let forceRemediateForLastEntries: Bool /// The rules that you want to run last in the Firewall Manager managed network ACLs. Provide these in the order in which you want them to run. Firewall Manager will assign the specific rule numbers for you, in the network ACLs that it creates. You must specify at least one first entry or one last entry in any network ACL policy. public let lastEntries: [NetworkAclEntry]? @@ -2924,7 +2924,7 @@ extension FMS { public let excludeResourceTags: Bool /// Specifies the Amazon Web Services account IDs and Organizations organizational units (OUs) to include in the policy. Specifying an OU is the equivalent of specifying all accounts in the OU and in any of its child OUs, including any child OUs and accounts that are added at a later time. You can specify inclusions or exclusions, but not both. If you specify an IncludeMap, Firewall Manager applies the policy to all accounts specified by the IncludeMap, and does not evaluate any ExcludeMap specifications. If you do not specify an IncludeMap, then Firewall Manager applies the policy to all accounts except for those specified by the ExcludeMap. You can specify account IDs, OUs, or a combination: Specify account IDs by setting the key to ACCOUNT. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}. Specify OUs by setting the key to ORG_UNIT. For example, the following is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}. Specify accounts and OUs together in a single map, separated with a comma. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}. public let includeMap: [CustomerPolicyScopeIdType: [String]]? - /// The definition of the Network Firewall firewall policy. + /// Your description of the Firewall Manager policy. public let policyDescription: String? /// The ID of the Firewall Manager policy. public let policyId: String? @@ -3751,7 +3751,7 @@ extension FMS { public struct ResourceTag: AWSEncodableShape & AWSDecodableShape { /// The resource tag key. public let key: String - /// The resource tag value. + /// The resource tag value. To specify an empty string value, either don't provide this or specify it as "". public let value: String? public init(key: String, value: String? = nil) { diff --git a/Sources/Soto/Services/FSx/FSx_api.swift b/Sources/Soto/Services/FSx/FSx_api.swift index 666a35a369..e48fc444aa 100644 --- a/Sources/Soto/Services/FSx/FSx_api.swift +++ b/Sources/Soto/Services/FSx/FSx_api.swift @@ -193,7 +193,7 @@ public struct FSx: AWSService { ) } - /// Creates a new, empty Amazon FSx file system. You can create the following supported Amazon FSx file systems using the CreateFileSystem API operation: Amazon FSx for Lustre Amazon FSx for NetApp ONTAP Amazon FSx for OpenZFS Amazon FSx for Windows File Server This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same. If a file system with the specified client request token exists and the parameters match, CreateFileSystem returns the description of the existing file system. If a file system with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, CreateFileSystem does the following: Creates a new, empty Amazon FSx file system with an assigned ID, and an initial lifecycle state of CREATING. Returns the description of the file system in JSON format. The CreateFileSystem call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information. + /// Creates a new, empty Amazon FSx file system. You can create the following supported Amazon FSx file systems using the CreateFileSystem API operation: Amazon FSx for Lustre Amazon FSx for NetApp ONTAP Amazon FSx for OpenZFS Amazon FSx for Windows File Server This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same. If a file system with the specified client request token exists and the parameters match, CreateFileSystem returns the description of the existing file system. If a file system with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, CreateFileSystem does the following: Creates a new, empty Amazon FSx file system with an assigned ID, and an initial lifecycle state of CREATING. Returns the description of the file system in JSON format. The CreateFileSystem call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information. @Sendable public func createFileSystem(_ input: CreateFileSystemRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateFileSystemResponse { return try await self.client.execute( @@ -310,7 +310,7 @@ public struct FSx: AWSService { ) } - /// Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted. To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId value to the DeleFileSystem operation. By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted. To delete an Amazon FSx for Lustre file system, first unmount it from every connected Amazon EC2 instance, then provide a FileSystemId value to the DeleFileSystem operation. By default, Amazon FSx will not take a final backup when the DeleteFileSystem operation is invoked. On file systems not linked to an Amazon S3 bucket, set SkipFinalBackup to false to take a final backup of the file system you are deleting. Backups cannot be enabled on S3-linked file systems. To ensure all of your data is written back to S3 before deleting your file system, you can either monitor for the AgeOfOldestQueuedMessage metric to be zero (if using automatic export) or you can run an export data repository task. If you have automatic export enabled and want to use an export data repository task, you have to disable automatic export before executing the export data repository task. The DeleteFileSystem operation returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems operation returns a FileSystemNotFound error. If a data repository task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request). The data in a deleted file system is also deleted and can't be recovered by any means. + /// Deletes a file system. After deletion, the file system no longer exists, and its data is gone. Any existing automatic backups and snapshots are also deleted. To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes and storage virtual machines (SVMs) on the file system. Then provide a FileSystemId value to the DeleteFileSystem operation. By default, when you delete an Amazon FSx for Windows File Server file system, a final backup is created upon deletion. This final backup isn't subject to the file system's retention policy, and must be manually deleted. To delete an Amazon FSx for Lustre file system, first unmount it from every connected Amazon EC2 instance, then provide a FileSystemId value to the DeleteFileSystem operation. By default, Amazon FSx will not take a final backup when the DeleteFileSystem operation is invoked. On file systems not linked to an Amazon S3 bucket, set SkipFinalBackup to false to take a final backup of the file system you are deleting. Backups cannot be enabled on S3-linked file systems. To ensure all of your data is written back to S3 before deleting your file system, you can either monitor for the AgeOfOldestQueuedMessage metric to be zero (if using automatic export) or you can run an export data repository task. If you have automatic export enabled and want to use an export data repository task, you have to disable automatic export before executing the export data repository task. The DeleteFileSystem operation returns while the file system has the DELETING status. You can check the file system deletion status by calling the DescribeFileSystems operation, which returns a list of file systems in your account. If you pass the file system ID for a deleted file system, the DescribeFileSystems operation returns a FileSystemNotFound error. If a data repository task is in a PENDING or EXECUTING state, deleting an Amazon FSx for Lustre file system will fail with an HTTP status code 400 (Bad Request). The data in a deleted file system is also deleted and can't be recovered by any means. @Sendable public func deleteFileSystem(_ input: DeleteFileSystemRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteFileSystemResponse { return try await self.client.execute( @@ -609,7 +609,7 @@ public struct FSx: AWSService { ) } - /// Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request. For FSx for Windows File Server file systems, you can update the following properties: AuditLogConfiguration AutomaticBackupRetentionDays DailyAutomaticBackupStartTime SelfManagedActiveDirectoryConfiguration StorageCapacity StorageType ThroughputCapacity DiskIopsConfiguration WeeklyMaintenanceStartTime For FSx for Lustre file systems, you can update the following properties: AutoImportPolicy AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DataCompressionType LogConfiguration LustreRootSquashConfiguration PerUnitStorageThroughput StorageCapacity WeeklyMaintenanceStartTime For FSx for ONTAP file systems, you can update the following properties: AddRouteTableIds AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DiskIopsConfiguration FsxAdminPassword HAPairs RemoveRouteTableIds StorageCapacity ThroughputCapacity ThroughputCapacityPerHAPair WeeklyMaintenanceStartTime For FSx for OpenZFS file systems, you can update the following properties: AddRouteTableIds AutomaticBackupRetentionDays CopyTagsToBackups CopyTagsToVolumes DailyAutomaticBackupStartTime DiskIopsConfiguration RemoveRouteTableIds StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime + /// Use this operation to update the configuration of an existing Amazon FSx file system. You can update multiple properties in a single request. For FSx for Windows File Server file systems, you can update the following properties: AuditLogConfiguration AutomaticBackupRetentionDays DailyAutomaticBackupStartTime SelfManagedActiveDirectoryConfiguration StorageCapacity StorageType ThroughputCapacity DiskIopsConfiguration WeeklyMaintenanceStartTime For FSx for Lustre file systems, you can update the following properties: AutoImportPolicy AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DataCompressionType LogConfiguration LustreRootSquashConfiguration MetadataConfiguration PerUnitStorageThroughput StorageCapacity WeeklyMaintenanceStartTime For FSx for ONTAP file systems, you can update the following properties: AddRouteTableIds AutomaticBackupRetentionDays DailyAutomaticBackupStartTime DiskIopsConfiguration FsxAdminPassword HAPairs RemoveRouteTableIds StorageCapacity ThroughputCapacity ThroughputCapacityPerHAPair WeeklyMaintenanceStartTime For FSx for OpenZFS file systems, you can update the following properties: AddRouteTableIds AutomaticBackupRetentionDays CopyTagsToBackups CopyTagsToVolumes DailyAutomaticBackupStartTime DiskIopsConfiguration RemoveRouteTableIds StorageCapacity ThroughputCapacity WeeklyMaintenanceStartTime @Sendable public func updateFileSystem(_ input: UpdateFileSystemRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateFileSystemResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/FSx/FSx_shapes.swift b/Sources/Soto/Services/FSx/FSx_shapes.swift index bdc448887e..78934f6f95 100644 --- a/Sources/Soto/Services/FSx/FSx_shapes.swift +++ b/Sources/Soto/Services/FSx/FSx_shapes.swift @@ -244,6 +244,12 @@ extension FSx { public var description: String { return self.rawValue } } + public enum MetadataConfigurationMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case automatic = "AUTOMATIC" + case userProvisioned = "USER_PROVISIONED" + public var description: String { return self.rawValue } + } + public enum NfsVersion: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case nfs3 = "NFS3" public var description: String { return self.rawValue } @@ -1434,7 +1440,7 @@ extension FSx { public let dailyAutomaticBackupStartTime: String? /// Sets the data compression configuration for the file system. DataCompressionType can have the following values: NONE - (Default) Data compression is turned off when the file system is created. LZ4 - Data compression is turned on with the LZ4 algorithm. For more information, see Lustre data compression in the Amazon FSx for Lustre User Guide. public let dataCompressionType: DataCompressionType? - /// (Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1. Choose PERSISTENT_1 for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1 supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available. Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2 supports SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB). PERSISTENT_2 is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2 is available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide. If you choose PERSISTENT_2, and you set FileSystemTypeVersion to 2.10, the CreateFileSystem operation fails. Encryption of data in transit is automatically turned on when you access SCRATCH_2, PERSISTENT_1 and PERSISTENT_2 file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide. (Default = SCRATCH_1) + /// (Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage and shorter-term processing of data. The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst throughput capacity than SCRATCH_1. Choose PERSISTENT_1 for longer-term storage and for throughput-focused workloads that aren’t latency-sensitive. PERSISTENT_1 supports encryption of data in transit, and is available in all Amazon Web Services Regions in which FSx for Lustre is available. Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads that require the highest levels of IOPS/throughput. PERSISTENT_2 supports SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB). You can optionally specify a metadata configuration mode for PERSISTENT_2 which supports increasing metadata performance. PERSISTENT_2 is available in a limited number of Amazon Web Services Regions. For more information, and an up-to-date list of Amazon Web Services Regions in which PERSISTENT_2 is available, see File system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide. If you choose PERSISTENT_2, and you set FileSystemTypeVersion to 2.10, the CreateFileSystem operation fails. Encryption of data in transit is automatically turned on when you access SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 file systems from Amazon EC2 instances that support automatic encryption in the Amazon Web Services Regions where they are available. For more information about encryption in transit for FSx for Lustre file systems, see Encrypting data in transit in the Amazon FSx for Lustre User Guide. (Default = SCRATCH_1) public let deploymentType: LustreDeploymentType? /// The type of drive cache used by PERSISTENT_1 file systems that are provisioned with HDD storage devices. This parameter is required when storage type is HDD. Set this property to READ to improve the performance for frequently accessed files by caching up to 20% of the total storage capacity of the file system. This parameter is required when StorageType is set to HDD. public let driveCacheType: DriveCacheType? @@ -1446,6 +1452,8 @@ extension FSx { public let importPath: String? /// The Lustre logging configuration used when creating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs. public let logConfiguration: LustreLogCreateConfiguration? + /// The Lustre metadata performance configuration for the creation of an FSx for Lustre file system using a PERSISTENT_2 deployment type. + public let metadataConfiguration: CreateFileSystemLustreMetadataConfiguration? /// Required with PERSISTENT_1 and PERSISTENT_2 deployment types, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4-TiB file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 120 MB/s of file system throughput. You pay for the amount of throughput that you provision. Valid values: For PERSISTENT_1 SSD storage: 50, 100, 200 MB/s/TiB. For PERSISTENT_1 HDD storage: 12, 40 MB/s/TiB. For PERSISTENT_2 SSD storage: 125, 250, 500, 1000 MB/s/TiB. public let perUnitStorageThroughput: Int? /// The Lustre root squash configuration used when creating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user. @@ -1453,7 +1461,7 @@ extension FSx { /// (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday. public let weeklyMaintenanceStartTime: String? - public init(autoImportPolicy: AutoImportPolicyType? = nil, automaticBackupRetentionDays: Int? = nil, copyTagsToBackups: Bool? = nil, dailyAutomaticBackupStartTime: String? = nil, dataCompressionType: DataCompressionType? = nil, deploymentType: LustreDeploymentType? = nil, driveCacheType: DriveCacheType? = nil, exportPath: String? = nil, importedFileChunkSize: Int? = nil, importPath: String? = nil, logConfiguration: LustreLogCreateConfiguration? = nil, perUnitStorageThroughput: Int? = nil, rootSquashConfiguration: LustreRootSquashConfiguration? = nil, weeklyMaintenanceStartTime: String? = nil) { + public init(autoImportPolicy: AutoImportPolicyType? = nil, automaticBackupRetentionDays: Int? = nil, copyTagsToBackups: Bool? = nil, dailyAutomaticBackupStartTime: String? = nil, dataCompressionType: DataCompressionType? = nil, deploymentType: LustreDeploymentType? = nil, driveCacheType: DriveCacheType? = nil, exportPath: String? = nil, importedFileChunkSize: Int? = nil, importPath: String? = nil, logConfiguration: LustreLogCreateConfiguration? = nil, metadataConfiguration: CreateFileSystemLustreMetadataConfiguration? = nil, perUnitStorageThroughput: Int? = nil, rootSquashConfiguration: LustreRootSquashConfiguration? = nil, weeklyMaintenanceStartTime: String? = nil) { self.autoImportPolicy = autoImportPolicy self.automaticBackupRetentionDays = automaticBackupRetentionDays self.copyTagsToBackups = copyTagsToBackups @@ -1465,6 +1473,7 @@ extension FSx { self.importedFileChunkSize = importedFileChunkSize self.importPath = importPath self.logConfiguration = logConfiguration + self.metadataConfiguration = metadataConfiguration self.perUnitStorageThroughput = perUnitStorageThroughput self.rootSquashConfiguration = rootSquashConfiguration self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime @@ -1485,6 +1494,7 @@ extension FSx { try self.validate(self.importPath, name: "importPath", parent: name, min: 3) try self.validate(self.importPath, name: "importPath", parent: name, pattern: "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4357}$") try self.logConfiguration?.validate(name: "\(name).logConfiguration") + try self.metadataConfiguration?.validate(name: "\(name).metadataConfiguration") try self.validate(self.perUnitStorageThroughput, name: "perUnitStorageThroughput", parent: name, max: 1000) try self.validate(self.perUnitStorageThroughput, name: "perUnitStorageThroughput", parent: name, min: 12) try self.rootSquashConfiguration?.validate(name: "\(name).rootSquashConfiguration") @@ -1505,12 +1515,35 @@ extension FSx { case importedFileChunkSize = "ImportedFileChunkSize" case importPath = "ImportPath" case logConfiguration = "LogConfiguration" + case metadataConfiguration = "MetadataConfiguration" case perUnitStorageThroughput = "PerUnitStorageThroughput" case rootSquashConfiguration = "RootSquashConfiguration" case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime" } } + public struct CreateFileSystemLustreMetadataConfiguration: AWSEncodableShape { + /// (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for the file system. This parameter sets the maximum rate of metadata disk IOPS supported by the file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000. Iops doesn’t have a default value. If you're using USER_PROVISIONED mode, you can choose to specify a valid value. If you're using AUTOMATIC mode, you cannot specify a value because FSx for Lustre automatically sets the value based on your file system storage capacity. + public let iops: Int? + /// The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type. In AUTOMATIC mode, FSx for Lustre automatically provisions and scales the number of Metadata IOPS for your file system based on your file system storage capacity. In USER_PROVISIONED mode, you specify the number of Metadata IOPS to provision for your file system. + public let mode: MetadataConfigurationMode? + + public init(iops: Int? = nil, mode: MetadataConfigurationMode? = nil) { + self.iops = iops + self.mode = mode + } + + public func validate(name: String) throws { + try self.validate(self.iops, name: "iops", parent: name, max: 192000) + try self.validate(self.iops, name: "iops", parent: name, min: 1500) + } + + private enum CodingKeys: String, CodingKey { + case iops = "Iops" + case mode = "Mode" + } + } + public struct CreateFileSystemOntapConfiguration: AWSEncodableShape { public let automaticBackupRetentionDays: Int? public let dailyAutomaticBackupStartTime: String? @@ -1683,7 +1716,7 @@ extension FSx { public let clientRequestToken: String? /// The type of Amazon FSx file system to create. Valid values are WINDOWS, LUSTRE, ONTAP, and OPENZFS. public let fileSystemType: FileSystemType? - /// (Optional) For FSx for Lustre file systems, sets the Lustre version for the file system that you're creating. Valid values are 2.10, 2.12, and 2.15: 2.10 is supported by the Scratch and Persistent_1 Lustre deployment types. 2.12 and 2.15 are supported by all Lustre deployment types. 2.12 or 2.15 is required when setting FSx for Lustre DeploymentType to PERSISTENT_2. Default value = 2.10, except when DeploymentType is set to PERSISTENT_2, then the default is 2.12. If you set FileSystemTypeVersion to 2.10 for a PERSISTENT_2 Lustre deployment type, the CreateFileSystem operation fails. + /// For FSx for Lustre file systems, sets the Lustre version for the file system that you're creating. Valid values are 2.10, 2.12, and 2.15: 2.10 is supported by the Scratch and Persistent_1 Lustre deployment types. 2.12 is supported by all Lustre deployment types, except for PERSISTENT_2 with a metadata configuration mode. 2.15 is supported by all Lustre deployment types and is recommended for all new file systems. Default value is 2.10, except for the following deployments: Default value is 2.12 when DeploymentType is set to PERSISTENT_2 without a metadata configuration mode. Default value is 2.15 when DeploymentType is set to PERSISTENT_2 with a metadata configuration mode. public let fileSystemTypeVersion: String? public let kmsKeyId: String? public let lustreConfiguration: CreateFileSystemLustreConfiguration? @@ -1692,9 +1725,9 @@ extension FSx { public let openZFSConfiguration: CreateFileSystemOpenZFSConfiguration? /// A list of IDs specifying the security groups to apply to all network interfaces created for file system access. This list isn't returned in later requests to describe the file system. You must specify a security group if you are creating a Multi-AZ FSx for ONTAP file system in a VPC subnet that has been shared with you. public let securityGroupIds: [String]? - /// Sets the storage capacity of the file system that you're creating, in gibibytes (GiB). FSx for Lustre file systems - The amount of storage capacity that you can configure depends on the value that you set for StorageType and the Lustre DeploymentType, as follows: For SCRATCH_2, PERSISTENT_2 and PERSISTENT_1 deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB. For PERSISTENT_1 HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems. For SCRATCH_1 deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB. FSx for ONTAP file systems - The amount of storage capacity that you can configure depends on the value of the HAPairs property. The minimum value is calculated as 1,024 * HAPairs and the maximum is calculated as 524,288 * HAPairs. FSx for OpenZFS file systems - The amount of storage capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB). FSx for Windows File Server file systems - The amount of storage capacity that you can configure depends on the value that you set for StorageType as follows: For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB). For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB). + /// Sets the storage capacity of the file system that you're creating, in gibibytes (GiB). FSx for Lustre file systems - The amount of storage capacity that you can configure depends on the value that you set for StorageType and the Lustre DeploymentType, as follows: For SCRATCH_2, PERSISTENT_2, and PERSISTENT_1 deployment types using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB. For PERSISTENT_1 HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems. For SCRATCH_1 deployment type, valid values are 1200 GiB, 2400 GiB, and increments of 3600 GiB. FSx for ONTAP file systems - The amount of storage capacity that you can configure depends on the value of the HAPairs property. The minimum value is calculated as 1,024 * HAPairs and the maximum is calculated as 524,288 * HAPairs. FSx for OpenZFS file systems - The amount of storage capacity that you can configure is from 64 GiB up to 524,288 GiB (512 TiB). FSx for Windows File Server file systems - The amount of storage capacity that you can configure depends on the value that you set for StorageType as follows: For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB). For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB). public let storageCapacity: Int? - /// Sets the storage type for the file system that you're creating. Valid values are SSD and HDD. Set to SSD to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types. Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types, and on PERSISTENT_1 Lustre file system deployment types. Default value is SSD. For more information, see Storage type options in the FSx for Windows File Server User Guide and Multiple storage options in the FSx for Lustre User Guide. + /// Sets the storage type for the file system that you're creating. Valid values are SSD and HDD. Set to SSD to use solid state drive storage. SSD is supported on all Windows, Lustre, ONTAP, and OpenZFS deployment types. Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types, and on PERSISTENT_1 Lustre file system deployment types. Default value is SSD. For more information, see Storage type options in the FSx for Windows File Server User Guide and Multiple storage options in the FSx for Lustre User Guide. public let storageType: StorageType? /// Specifies the IDs of the subnets that the file system will be accessible from. For Windows and ONTAP MULTI_AZ_1 deployment types,provide exactly two subnet IDs, one for the preferred file server and one for the standby file server. You specify one of these subnets as the preferred subnet using the WindowsConfiguration > PreferredSubnetID or OntapConfiguration > PreferredSubnetID properties. For more information about Multi-AZ file system configuration, see Availability and durability: Single-AZ and Multi-AZ file systems in the Amazon FSx for Windows User Guide and Availability and durability in the Amazon FSx for ONTAP User Guide. For Windows SINGLE_AZ_1 and SINGLE_AZ_2 and all Lustre deployment types, provide exactly one subnet ID. The file server is launched in that subnet's Availability Zone. public let subnetIds: [String]? @@ -1877,7 +1910,7 @@ extension FSx { public let junctionPath: String? /// Specifies the type of volume you are creating. Valid values are the following: RW specifies a read/write volume. RW is the default. DP specifies a data-protection volume. A DP volume is read-only and can be used as the destination of a NetApp SnapMirror relationship. For more information, see Volume types in the Amazon FSx for NetApp ONTAP User Guide. public let ontapVolumeType: InputOntapVolumeType? - /// Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see Volume security style in the Amazon FSx for NetApp ONTAP User Guide. Specify one of the following values: UNIX if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account. NTFS if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account. MIXED This is an advanced setting. For more information, see the topic What the security styles and their effects are in the NetApp Documentation Center. For more information, see Volume security style in the FSx for ONTAP User Guide. + /// Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. Specify one of the following values: UNIX if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account. NTFS if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account. MIXED This is an advanced setting. For more information, see the topic What the security styles and their effects are in the NetApp Documentation Center. For more information, see Volume security style in the FSx for ONTAP User Guide. public let securityStyle: SecurityStyle? /// Specifies the configured size of the volume, in bytes. public let sizeInBytes: Int64? @@ -3887,7 +3920,7 @@ extension FSx { } public struct FileCacheDataRepositoryAssociation: AWSEncodableShape { - /// The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths: The path can be an NFS data repository that links to the cache. The path can be in one of two formats: If you are not using the DataRepositorySubdirectories parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nsf://nfs-domain-name/exportpath. You can therefore link a single NFS Export to a single data repository association. If you are using the DataRepositorySubdirectories parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories parameter. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/. + /// The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths: The path can be an NFS data repository that links to the cache. The path can be in one of two formats: If you are not using the DataRepositorySubdirectories parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nfs://nfs-domain-name/exportpath. You can therefore link a single NFS Export to a single data repository association. If you are using the DataRepositorySubdirectories parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories parameter. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/. public let dataRepositoryPath: String? /// A list of NFS Exports that will be linked with this data repository association. The Export paths are in the format /exportpath1. To use this parameter, you must configure DataRepositoryPath as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that DataRepositorySubdirectories is not supported for S3 data repositories. public let dataRepositorySubdirectories: [String]? @@ -4153,6 +4186,23 @@ extension FSx { } } + public struct FileSystemLustreMetadataConfiguration: AWSDecodableShape { + /// The number of Metadata IOPS provisioned for the file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000. + public let iops: Int? + /// The metadata configuration mode for provisioning Metadata IOPS for the file system. In AUTOMATIC mode, FSx for Lustre automatically provisions and scales the number of Metadata IOPS on your file system based on your file system storage capacity. In USER_PROVISIONED mode, you can choose to specify the number of Metadata IOPS to provision for your file system. + public let mode: MetadataConfigurationMode? + + public init(iops: Int? = nil, mode: MetadataConfigurationMode? = nil) { + self.iops = iops + self.mode = mode + } + + private enum CodingKeys: String, CodingKey { + case iops = "Iops" + case mode = "Mode" + } + } + public struct Filter: AWSEncodableShape { /// The name for this filter. public let name: FilterName? @@ -4254,6 +4304,8 @@ extension FSx { public let driveCacheType: DriveCacheType? /// The Lustre logging configuration. Lustre logging writes the enabled log events for your file system to Amazon CloudWatch Logs. public let logConfiguration: LustreLogConfiguration? + /// The Lustre metadata performance configuration for an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. + public let metadataConfiguration: FileSystemLustreMetadataConfiguration? /// You use the MountName value when mounting the file system. For the SCRATCH_1 deployment type, this value is always "fsx". For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 deployment types, this value is a string that is unique within an Amazon Web Services Region. public let mountName: String? /// Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. File system throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). This option is only valid for PERSISTENT_1 and PERSISTENT_2 deployment types. Valid values: For PERSISTENT_1 SSD storage: 50, 100, 200. For PERSISTENT_1 HDD storage: 12, 40. For PERSISTENT_2 SSD storage: 125, 250, 500, 1000. @@ -4263,7 +4315,7 @@ extension FSx { /// The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. Here, d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday. public let weeklyMaintenanceStartTime: String? - public init(automaticBackupRetentionDays: Int? = nil, copyTagsToBackups: Bool? = nil, dailyAutomaticBackupStartTime: String? = nil, dataCompressionType: DataCompressionType? = nil, dataRepositoryConfiguration: DataRepositoryConfiguration? = nil, deploymentType: LustreDeploymentType? = nil, driveCacheType: DriveCacheType? = nil, logConfiguration: LustreLogConfiguration? = nil, mountName: String? = nil, perUnitStorageThroughput: Int? = nil, rootSquashConfiguration: LustreRootSquashConfiguration? = nil, weeklyMaintenanceStartTime: String? = nil) { + public init(automaticBackupRetentionDays: Int? = nil, copyTagsToBackups: Bool? = nil, dailyAutomaticBackupStartTime: String? = nil, dataCompressionType: DataCompressionType? = nil, dataRepositoryConfiguration: DataRepositoryConfiguration? = nil, deploymentType: LustreDeploymentType? = nil, driveCacheType: DriveCacheType? = nil, logConfiguration: LustreLogConfiguration? = nil, metadataConfiguration: FileSystemLustreMetadataConfiguration? = nil, mountName: String? = nil, perUnitStorageThroughput: Int? = nil, rootSquashConfiguration: LustreRootSquashConfiguration? = nil, weeklyMaintenanceStartTime: String? = nil) { self.automaticBackupRetentionDays = automaticBackupRetentionDays self.copyTagsToBackups = copyTagsToBackups self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime @@ -4272,6 +4324,7 @@ extension FSx { self.deploymentType = deploymentType self.driveCacheType = driveCacheType self.logConfiguration = logConfiguration + self.metadataConfiguration = metadataConfiguration self.mountName = mountName self.perUnitStorageThroughput = perUnitStorageThroughput self.rootSquashConfiguration = rootSquashConfiguration @@ -4287,6 +4340,7 @@ extension FSx { case deploymentType = "DeploymentType" case driveCacheType = "DriveCacheType" case logConfiguration = "LogConfiguration" + case metadataConfiguration = "MetadataConfiguration" case mountName = "MountName" case perUnitStorageThroughput = "PerUnitStorageThroughput" case rootSquashConfiguration = "RootSquashConfiguration" @@ -5033,17 +5087,17 @@ extension FSx { } public struct SelfManagedActiveDirectoryConfigurationUpdates: AWSEncodableShape { - /// A list of up to three DNS server or domain controller IP addresses in your self-managed AD domain. + /// A list of up to three DNS server or domain controller IP addresses in your self-managed Active Directory domain. public let dnsIps: [String]? - /// Specifies an updated fully qualified domain name of your self-managed AD configuration. + /// Specifies an updated fully qualified domain name of your self-managed Active Directory configuration. public let domainName: String? - /// Specifies the updated name of the self-managed AD domain group whose members are granted administrative privileges for the Amazon FSx resource. + /// For FSx for ONTAP file systems only - Specifies the updated name of the self-managed Active Directory domain group whose members are granted administrative privileges for the Amazon FSx resource. public let fileSystemAdministratorsGroup: String? - /// Specifies an updated fully qualified distinguished name of the organization unit within your self-managed AD. + /// Specifies an updated fully qualified distinguished name of the organization unit within your self-managed Active Directory. public let organizationalUnitDistinguishedName: String? - /// Specifies the updated password for the service account on your self-managed AD domain. Amazon FSx uses this account to join to your self-managed AD domain. + /// Specifies the updated password for the service account on your self-managed Active Directory domain. Amazon FSx uses this account to join to your self-managed Active Directory domain. public let password: String? - /// Specifies the updated user name for the service account on your self-managed AD domain. Amazon FSx uses this account to join to your self-managed AD domain. This account must have the permissions required to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName. + /// Specifies the updated user name for the service account on your self-managed Active Directory domain. Amazon FSx uses this account to join to your self-managed Active Directory domain. This account must have the permissions required to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName. public let userName: String? public init(dnsIps: [String]? = nil, domainName: String? = nil, fileSystemAdministratorsGroup: String? = nil, organizationalUnitDistinguishedName: String? = nil, password: String? = nil, userName: String? = nil) { @@ -5626,6 +5680,8 @@ extension FSx { public let dataCompressionType: DataCompressionType? /// The Lustre logging configuration used when updating an Amazon FSx for Lustre file system. When logging is enabled, Lustre logs error and warning events for data repositories associated with your file system to Amazon CloudWatch Logs. public let logConfiguration: LustreLogCreateConfiguration? + /// The Lustre metadata performance configuration for an Amazon FSx for Lustre file system using a PERSISTENT_2 deployment type. When this configuration is enabled, the file system supports increasing metadata performance. + public let metadataConfiguration: UpdateFileSystemLustreMetadataConfiguration? /// The throughput of an Amazon FSx for Lustre Persistent SSD-based file system, measured in megabytes per second per tebibyte (MB/s/TiB). You can increase or decrease your file system's throughput. Valid values depend on the deployment type of the file system, as follows: For PERSISTENT_1 SSD-based deployment types, valid values are 50, 100, and 200 MB/s/TiB. For PERSISTENT_2 SSD-based deployment types, valid values are 125, 250, 500, and 1000 MB/s/TiB. For more information, see Managing throughput capacity. public let perUnitStorageThroughput: Int? /// The Lustre root squash configuration used when updating an Amazon FSx for Lustre file system. When enabled, root squash restricts root-level access from clients that try to access your file system as a root user. @@ -5633,12 +5689,13 @@ extension FSx { /// (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday. public let weeklyMaintenanceStartTime: String? - public init(autoImportPolicy: AutoImportPolicyType? = nil, automaticBackupRetentionDays: Int? = nil, dailyAutomaticBackupStartTime: String? = nil, dataCompressionType: DataCompressionType? = nil, logConfiguration: LustreLogCreateConfiguration? = nil, perUnitStorageThroughput: Int? = nil, rootSquashConfiguration: LustreRootSquashConfiguration? = nil, weeklyMaintenanceStartTime: String? = nil) { + public init(autoImportPolicy: AutoImportPolicyType? = nil, automaticBackupRetentionDays: Int? = nil, dailyAutomaticBackupStartTime: String? = nil, dataCompressionType: DataCompressionType? = nil, logConfiguration: LustreLogCreateConfiguration? = nil, metadataConfiguration: UpdateFileSystemLustreMetadataConfiguration? = nil, perUnitStorageThroughput: Int? = nil, rootSquashConfiguration: LustreRootSquashConfiguration? = nil, weeklyMaintenanceStartTime: String? = nil) { self.autoImportPolicy = autoImportPolicy self.automaticBackupRetentionDays = automaticBackupRetentionDays self.dailyAutomaticBackupStartTime = dailyAutomaticBackupStartTime self.dataCompressionType = dataCompressionType self.logConfiguration = logConfiguration + self.metadataConfiguration = metadataConfiguration self.perUnitStorageThroughput = perUnitStorageThroughput self.rootSquashConfiguration = rootSquashConfiguration self.weeklyMaintenanceStartTime = weeklyMaintenanceStartTime @@ -5651,6 +5708,7 @@ extension FSx { try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, min: 5) try self.validate(self.dailyAutomaticBackupStartTime, name: "dailyAutomaticBackupStartTime", parent: name, pattern: "^([01]\\d|2[0-3]):?([0-5]\\d)$") try self.logConfiguration?.validate(name: "\(name).logConfiguration") + try self.metadataConfiguration?.validate(name: "\(name).metadataConfiguration") try self.validate(self.perUnitStorageThroughput, name: "perUnitStorageThroughput", parent: name, max: 1000) try self.validate(self.perUnitStorageThroughput, name: "perUnitStorageThroughput", parent: name, min: 12) try self.rootSquashConfiguration?.validate(name: "\(name).rootSquashConfiguration") @@ -5665,12 +5723,35 @@ extension FSx { case dailyAutomaticBackupStartTime = "DailyAutomaticBackupStartTime" case dataCompressionType = "DataCompressionType" case logConfiguration = "LogConfiguration" + case metadataConfiguration = "MetadataConfiguration" case perUnitStorageThroughput = "PerUnitStorageThroughput" case rootSquashConfiguration = "RootSquashConfiguration" case weeklyMaintenanceStartTime = "WeeklyMaintenanceStartTime" } } + public struct UpdateFileSystemLustreMetadataConfiguration: AWSEncodableShape { + /// (USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision for your file system. Valid values are 1500, 3000, 6000, 12000, and multiples of 12000 up to a maximum of 192000. The value you provide must be greater than or equal to the current number of Metadata IOPS provisioned for the file system. + public let iops: Int? + /// The metadata configuration mode for provisioning Metadata IOPS for an FSx for Lustre file system using a PERSISTENT_2 deployment type. To increase the Metadata IOPS or to switch from AUTOMATIC mode, specify USER_PROVISIONED as the value for this parameter. Then use the Iops parameter to provide a Metadata IOPS value that is greater than or equal to the current number of Metadata IOPS provisioned for the file system. To switch from USER_PROVISIONED mode, specify AUTOMATIC as the value for this parameter, but do not input a value for Iops. If you request to switch from USER_PROVISIONED to AUTOMATIC mode and the current Metadata IOPS value is greater than the automated default, FSx for Lustre rejects the request because downscaling Metadata IOPS is not supported. + public let mode: MetadataConfigurationMode? + + public init(iops: Int? = nil, mode: MetadataConfigurationMode? = nil) { + self.iops = iops + self.mode = mode + } + + public func validate(name: String) throws { + try self.validate(self.iops, name: "iops", parent: name, max: 192000) + try self.validate(self.iops, name: "iops", parent: name, min: 1500) + } + + private enum CodingKeys: String, CodingKey { + case iops = "Iops" + case mode = "Mode" + } + } + public struct UpdateFileSystemOntapConfiguration: AWSEncodableShape { /// (Multi-AZ only) A list of IDs of new virtual private cloud (VPC) route tables to associate (add) with your Amazon FSx for NetApp ONTAP file system. public let addRouteTableIds: [String]? diff --git a/Sources/Soto/Services/Firehose/Firehose_api.swift b/Sources/Soto/Services/Firehose/Firehose_api.swift index 5e4b0c0004..f73377c140 100644 --- a/Sources/Soto/Services/Firehose/Firehose_api.swift +++ b/Sources/Soto/Services/Firehose/Firehose_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS Firehose service. /// -/// Amazon Data Firehose Amazon Data Firehose was previously known as Amazon Kinesis Data Firehose. Amazon Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations. +/// Amazon Data Firehose Amazon Data Firehose was previously known as Amazon Kinesis Data Firehose. Amazon Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supported destinations. public struct Firehose: AWSService { // MARK: Member variables @@ -182,7 +182,7 @@ public struct Firehose: AWSService { ) } - /// Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. For the KMS grant creation to be successful, Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period. + /// Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. For the KMS grant creation to be successful, the Firehose API operations StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period. @Sendable public func startDeliveryStreamEncryption(_ input: StartDeliveryStreamEncryptionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartDeliveryStreamEncryptionOutput { return try await self.client.execute( diff --git a/Sources/Soto/Services/Firehose/Firehose_shapes.swift b/Sources/Soto/Services/Firehose/Firehose_shapes.swift index 912704f78d..d5c8e0f27c 100644 --- a/Sources/Soto/Services/Firehose/Firehose_shapes.swift +++ b/Sources/Soto/Services/Firehose/Firehose_shapes.swift @@ -1942,7 +1942,7 @@ extension Firehose { /// The configuration of the HTTP endpoint selected as the destination. public let endpointConfiguration: HttpEndpointConfiguration public let processingConfiguration: ProcessingConfiguration? - /// The configuration of the requeste sent to the HTTP endpoint specified as the destination. + /// The configuration of the request sent to the HTTP endpoint that is specified as the destination. public let requestConfiguration: HttpEndpointRequestConfiguration? /// Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination. public let retryOptions: HttpEndpointRetryOptions? @@ -1951,8 +1951,10 @@ extension Firehose { /// Describes the S3 bucket backup options for the data that Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly). public let s3BackupMode: HttpEndpointS3BackupMode? public let s3Configuration: S3DestinationConfiguration + /// The configuration that defines how you access secrets for HTTP Endpoint destination. + public let secretsManagerConfiguration: SecretsManagerConfiguration? - public init(bufferingHints: HttpEndpointBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, endpointConfiguration: HttpEndpointConfiguration, processingConfiguration: ProcessingConfiguration? = nil, requestConfiguration: HttpEndpointRequestConfiguration? = nil, retryOptions: HttpEndpointRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: HttpEndpointS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration) { + public init(bufferingHints: HttpEndpointBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, endpointConfiguration: HttpEndpointConfiguration, processingConfiguration: ProcessingConfiguration? = nil, requestConfiguration: HttpEndpointRequestConfiguration? = nil, retryOptions: HttpEndpointRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: HttpEndpointS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration, secretsManagerConfiguration: SecretsManagerConfiguration? = nil) { self.bufferingHints = bufferingHints self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.endpointConfiguration = endpointConfiguration @@ -1962,6 +1964,7 @@ extension Firehose { self.roleARN = roleARN self.s3BackupMode = s3BackupMode self.s3Configuration = s3Configuration + self.secretsManagerConfiguration = secretsManagerConfiguration } public func validate(name: String) throws { @@ -1975,6 +1978,7 @@ extension Firehose { try self.validate(self.roleARN, name: "roleARN", parent: name, min: 1) try self.validate(self.roleARN, name: "roleARN", parent: name, pattern: "^arn:") try self.s3Configuration.validate(name: "\(name).s3Configuration") + try self.secretsManagerConfiguration?.validate(name: "\(name).secretsManagerConfiguration") } private enum CodingKeys: String, CodingKey { @@ -1987,6 +1991,7 @@ extension Firehose { case roleARN = "RoleARN" case s3BackupMode = "S3BackupMode" case s3Configuration = "S3Configuration" + case secretsManagerConfiguration = "SecretsManagerConfiguration" } } @@ -2006,8 +2011,10 @@ extension Firehose { /// Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly). public let s3BackupMode: HttpEndpointS3BackupMode? public let s3DestinationDescription: S3DestinationDescription? + /// The configuration that defines how you access secrets for HTTP Endpoint destination. + public let secretsManagerConfiguration: SecretsManagerConfiguration? - public init(bufferingHints: HttpEndpointBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, endpointConfiguration: HttpEndpointDescription? = nil, processingConfiguration: ProcessingConfiguration? = nil, requestConfiguration: HttpEndpointRequestConfiguration? = nil, retryOptions: HttpEndpointRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: HttpEndpointS3BackupMode? = nil, s3DestinationDescription: S3DestinationDescription? = nil) { + public init(bufferingHints: HttpEndpointBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, endpointConfiguration: HttpEndpointDescription? = nil, processingConfiguration: ProcessingConfiguration? = nil, requestConfiguration: HttpEndpointRequestConfiguration? = nil, retryOptions: HttpEndpointRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: HttpEndpointS3BackupMode? = nil, s3DestinationDescription: S3DestinationDescription? = nil, secretsManagerConfiguration: SecretsManagerConfiguration? = nil) { self.bufferingHints = bufferingHints self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.endpointConfiguration = endpointConfiguration @@ -2017,6 +2024,7 @@ extension Firehose { self.roleARN = roleARN self.s3BackupMode = s3BackupMode self.s3DestinationDescription = s3DestinationDescription + self.secretsManagerConfiguration = secretsManagerConfiguration } private enum CodingKeys: String, CodingKey { @@ -2029,6 +2037,7 @@ extension Firehose { case roleARN = "RoleARN" case s3BackupMode = "S3BackupMode" case s3DestinationDescription = "S3DestinationDescription" + case secretsManagerConfiguration = "SecretsManagerConfiguration" } } @@ -2048,8 +2057,10 @@ extension Firehose { /// Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly). public let s3BackupMode: HttpEndpointS3BackupMode? public let s3Update: S3DestinationUpdate? + /// The configuration that defines how you access secrets for HTTP Endpoint destination. + public let secretsManagerConfiguration: SecretsManagerConfiguration? - public init(bufferingHints: HttpEndpointBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, endpointConfiguration: HttpEndpointConfiguration? = nil, processingConfiguration: ProcessingConfiguration? = nil, requestConfiguration: HttpEndpointRequestConfiguration? = nil, retryOptions: HttpEndpointRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: HttpEndpointS3BackupMode? = nil, s3Update: S3DestinationUpdate? = nil) { + public init(bufferingHints: HttpEndpointBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, endpointConfiguration: HttpEndpointConfiguration? = nil, processingConfiguration: ProcessingConfiguration? = nil, requestConfiguration: HttpEndpointRequestConfiguration? = nil, retryOptions: HttpEndpointRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: HttpEndpointS3BackupMode? = nil, s3Update: S3DestinationUpdate? = nil, secretsManagerConfiguration: SecretsManagerConfiguration? = nil) { self.bufferingHints = bufferingHints self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.endpointConfiguration = endpointConfiguration @@ -2059,6 +2070,7 @@ extension Firehose { self.roleARN = roleARN self.s3BackupMode = s3BackupMode self.s3Update = s3Update + self.secretsManagerConfiguration = secretsManagerConfiguration } public func validate(name: String) throws { @@ -2072,6 +2084,7 @@ extension Firehose { try self.validate(self.roleARN, name: "roleARN", parent: name, min: 1) try self.validate(self.roleARN, name: "roleARN", parent: name, pattern: "^arn:") try self.s3Update?.validate(name: "\(name).s3Update") + try self.secretsManagerConfiguration?.validate(name: "\(name).secretsManagerConfiguration") } private enum CodingKeys: String, CodingKey { @@ -2084,6 +2097,7 @@ extension Firehose { case roleARN = "RoleARN" case s3BackupMode = "S3BackupMode" case s3Update = "S3Update" + case secretsManagerConfiguration = "SecretsManagerConfiguration" } } @@ -2722,7 +2736,7 @@ extension Firehose { /// The COPY command. public let copyCommand: CopyCommand /// The user password. - public let password: String + public let password: String? /// The data processing configuration. public let processingConfiguration: ProcessingConfiguration? /// The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes). @@ -2735,10 +2749,12 @@ extension Firehose { public let s3BackupMode: RedshiftS3BackupMode? /// The configuration for the intermediate Amazon S3 location from which Amazon Redshift obtains data. Restrictions are described in the topic for CreateDeliveryStream. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. public let s3Configuration: S3DestinationConfiguration + /// The configuration that defines how you access secrets for Amazon Redshift. + public let secretsManagerConfiguration: SecretsManagerConfiguration? /// The name of the user. - public let username: String + public let username: String? - public init(cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, clusterJDBCURL: String, copyCommand: CopyCommand, password: String, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: RedshiftRetryOptions? = nil, roleARN: String, s3BackupConfiguration: S3DestinationConfiguration? = nil, s3BackupMode: RedshiftS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration, username: String) { + public init(cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, clusterJDBCURL: String, copyCommand: CopyCommand, password: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: RedshiftRetryOptions? = nil, roleARN: String, s3BackupConfiguration: S3DestinationConfiguration? = nil, s3BackupMode: RedshiftS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, username: String? = nil) { self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.clusterJDBCURL = clusterJDBCURL self.copyCommand = copyCommand @@ -2749,6 +2765,7 @@ extension Firehose { self.s3BackupConfiguration = s3BackupConfiguration self.s3BackupMode = s3BackupMode self.s3Configuration = s3Configuration + self.secretsManagerConfiguration = secretsManagerConfiguration self.username = username } @@ -2768,6 +2785,7 @@ extension Firehose { try self.validate(self.roleARN, name: "roleARN", parent: name, pattern: "^arn:") try self.s3BackupConfiguration?.validate(name: "\(name).s3BackupConfiguration") try self.s3Configuration.validate(name: "\(name).s3Configuration") + try self.secretsManagerConfiguration?.validate(name: "\(name).secretsManagerConfiguration") try self.validate(self.username, name: "username", parent: name, max: 512) try self.validate(self.username, name: "username", parent: name, min: 1) try self.validate(self.username, name: "username", parent: name, pattern: ".*") @@ -2784,6 +2802,7 @@ extension Firehose { case s3BackupConfiguration = "S3BackupConfiguration" case s3BackupMode = "S3BackupMode" case s3Configuration = "S3Configuration" + case secretsManagerConfiguration = "SecretsManagerConfiguration" case username = "Username" } } @@ -2807,10 +2826,12 @@ extension Firehose { public let s3BackupMode: RedshiftS3BackupMode? /// The Amazon S3 destination. public let s3DestinationDescription: S3DestinationDescription + /// The configuration that defines how you access secrets for Amazon Redshift. + public let secretsManagerConfiguration: SecretsManagerConfiguration? /// The name of the user. - public let username: String + public let username: String? - public init(cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, clusterJDBCURL: String, copyCommand: CopyCommand, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: RedshiftRetryOptions? = nil, roleARN: String, s3BackupDescription: S3DestinationDescription? = nil, s3BackupMode: RedshiftS3BackupMode? = nil, s3DestinationDescription: S3DestinationDescription, username: String) { + public init(cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, clusterJDBCURL: String, copyCommand: CopyCommand, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: RedshiftRetryOptions? = nil, roleARN: String, s3BackupDescription: S3DestinationDescription? = nil, s3BackupMode: RedshiftS3BackupMode? = nil, s3DestinationDescription: S3DestinationDescription, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, username: String? = nil) { self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.clusterJDBCURL = clusterJDBCURL self.copyCommand = copyCommand @@ -2820,6 +2841,7 @@ extension Firehose { self.s3BackupDescription = s3BackupDescription self.s3BackupMode = s3BackupMode self.s3DestinationDescription = s3DestinationDescription + self.secretsManagerConfiguration = secretsManagerConfiguration self.username = username } @@ -2833,6 +2855,7 @@ extension Firehose { case s3BackupDescription = "S3BackupDescription" case s3BackupMode = "S3BackupMode" case s3DestinationDescription = "S3DestinationDescription" + case secretsManagerConfiguration = "SecretsManagerConfiguration" case username = "Username" } } @@ -2858,10 +2881,12 @@ extension Firehose { public let s3BackupUpdate: S3DestinationUpdate? /// The Amazon S3 destination. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. public let s3Update: S3DestinationUpdate? + /// The configuration that defines how you access secrets for Amazon Redshift. + public let secretsManagerConfiguration: SecretsManagerConfiguration? /// The name of the user. public let username: String? - public init(cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, clusterJDBCURL: String? = nil, copyCommand: CopyCommand? = nil, password: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: RedshiftRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: RedshiftS3BackupMode? = nil, s3BackupUpdate: S3DestinationUpdate? = nil, s3Update: S3DestinationUpdate? = nil, username: String? = nil) { + public init(cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, clusterJDBCURL: String? = nil, copyCommand: CopyCommand? = nil, password: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: RedshiftRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: RedshiftS3BackupMode? = nil, s3BackupUpdate: S3DestinationUpdate? = nil, s3Update: S3DestinationUpdate? = nil, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, username: String? = nil) { self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.clusterJDBCURL = clusterJDBCURL self.copyCommand = copyCommand @@ -2872,6 +2897,7 @@ extension Firehose { self.s3BackupMode = s3BackupMode self.s3BackupUpdate = s3BackupUpdate self.s3Update = s3Update + self.secretsManagerConfiguration = secretsManagerConfiguration self.username = username } @@ -2891,6 +2917,7 @@ extension Firehose { try self.validate(self.roleARN, name: "roleARN", parent: name, pattern: "^arn:") try self.s3BackupUpdate?.validate(name: "\(name).s3BackupUpdate") try self.s3Update?.validate(name: "\(name).s3Update") + try self.secretsManagerConfiguration?.validate(name: "\(name).secretsManagerConfiguration") try self.validate(self.username, name: "username", parent: name, max: 512) try self.validate(self.username, name: "username", parent: name, min: 1) try self.validate(self.username, name: "username", parent: name, pattern: ".*") @@ -2907,6 +2934,7 @@ extension Firehose { case s3BackupMode = "S3BackupMode" case s3BackupUpdate = "S3BackupUpdate" case s3Update = "S3Update" + case secretsManagerConfiguration = "SecretsManagerConfiguration" case username = "Username" } } @@ -3156,6 +3184,36 @@ extension Firehose { } } + public struct SecretsManagerConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Specifies whether you want to use the the secrets manager feature. When set as True the secrets manager configuration overwrites the existing secrets in the destination configuration. When it's set to False Firehose falls back to the credentials in the destination configuration. + public let enabled: Bool + /// Specifies the role that Firehose assumes when calling the Secrets Manager API operation. When you provide the role, it overrides any destination specific role defined in the destination configuration. If you do not provide the then we use the destination specific role. This parameter is required for Splunk. + public let roleARN: String? + /// The ARN of the secret that stores your credentials. It must be in the same region as the Firehose stream and the role. The secret ARN can reside in a different account than the delivery stream and role as Firehose supports cross-account secret access. This parameter is required when Enabled is set to True. + public let secretARN: String? + + public init(enabled: Bool, roleARN: String? = nil, secretARN: String? = nil) { + self.enabled = enabled + self.roleARN = roleARN + self.secretARN = secretARN + } + + public func validate(name: String) throws { + try self.validate(self.roleARN, name: "roleARN", parent: name, max: 512) + try self.validate(self.roleARN, name: "roleARN", parent: name, min: 1) + try self.validate(self.roleARN, name: "roleARN", parent: name, pattern: "^arn:") + try self.validate(self.secretARN, name: "secretARN", parent: name, max: 2048) + try self.validate(self.secretARN, name: "secretARN", parent: name, min: 1) + try self.validate(self.secretARN, name: "secretARN", parent: name, pattern: "^arn:") + } + + private enum CodingKeys: String, CodingKey { + case enabled = "Enabled" + case roleARN = "RoleARN" + case secretARN = "SecretARN" + } + } + public struct Serializer: AWSEncodableShape & AWSDecodableShape { /// A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC. public let orcSerDe: OrcSerDe? @@ -3193,7 +3251,7 @@ extension Firehose { /// The name of the record metadata column public let metaDataColumnName: String? /// The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation. - public let privateKey: String + public let privateKey: String? public let processingConfiguration: ProcessingConfiguration? /// The time period where Firehose will retry sending data to the chosen HTTP endpoint. public let retryOptions: SnowflakeRetryOptions? @@ -3204,6 +3262,8 @@ extension Firehose { public let s3Configuration: S3DestinationConfiguration /// Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views public let schema: String + /// The configuration that defines how you access secrets for Snowflake. + public let secretsManagerConfiguration: SecretsManagerConfiguration? /// Optionally configure a Snowflake role. Otherwise the default user role will be used. public let snowflakeRoleConfiguration: SnowflakeRoleConfiguration? /// The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-. For more information, see Amazon PrivateLink & Snowflake @@ -3211,9 +3271,9 @@ extension Firehose { /// All data in Snowflake is stored in database tables, logically structured as collections of columns and rows. public let table: String /// User login name for the Snowflake account. - public let user: String + public let user: String? - public init(accountUrl: String, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String, dataLoadingOption: SnowflakeDataLoadingOption? = nil, keyPassphrase: String? = nil, metaDataColumnName: String? = nil, privateKey: String, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String, s3BackupMode: SnowflakeS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration, schema: String, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, snowflakeVpcConfiguration: SnowflakeVpcConfiguration? = nil, table: String, user: String) { + public init(accountUrl: String, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String, dataLoadingOption: SnowflakeDataLoadingOption? = nil, keyPassphrase: String? = nil, metaDataColumnName: String? = nil, privateKey: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String, s3BackupMode: SnowflakeS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration, schema: String, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, snowflakeVpcConfiguration: SnowflakeVpcConfiguration? = nil, table: String, user: String? = nil) { self.accountUrl = accountUrl self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.contentColumnName = contentColumnName @@ -3228,6 +3288,7 @@ extension Firehose { self.s3BackupMode = s3BackupMode self.s3Configuration = s3Configuration self.schema = schema + self.secretsManagerConfiguration = secretsManagerConfiguration self.snowflakeRoleConfiguration = snowflakeRoleConfiguration self.snowflakeVpcConfiguration = snowflakeVpcConfiguration self.table = table @@ -3258,6 +3319,7 @@ extension Firehose { try self.s3Configuration.validate(name: "\(name).s3Configuration") try self.validate(self.schema, name: "schema", parent: name, max: 255) try self.validate(self.schema, name: "schema", parent: name, min: 1) + try self.secretsManagerConfiguration?.validate(name: "\(name).secretsManagerConfiguration") try self.snowflakeRoleConfiguration?.validate(name: "\(name).snowflakeRoleConfiguration") try self.snowflakeVpcConfiguration?.validate(name: "\(name).snowflakeVpcConfiguration") try self.validate(self.table, name: "table", parent: name, max: 255) @@ -3281,6 +3343,7 @@ extension Firehose { case s3BackupMode = "S3BackupMode" case s3Configuration = "S3Configuration" case schema = "Schema" + case secretsManagerConfiguration = "SecretsManagerConfiguration" case snowflakeRoleConfiguration = "SnowflakeRoleConfiguration" case snowflakeVpcConfiguration = "SnowflakeVpcConfiguration" case table = "Table" @@ -3310,6 +3373,8 @@ extension Firehose { public let s3DestinationDescription: S3DestinationDescription? /// Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views public let schema: String? + /// The configuration that defines how you access secrets for Snowflake. + public let secretsManagerConfiguration: SecretsManagerConfiguration? /// Optionally configure a Snowflake role. Otherwise the default user role will be used. public let snowflakeRoleConfiguration: SnowflakeRoleConfiguration? /// The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-. For more information, see Amazon PrivateLink & Snowflake @@ -3319,7 +3384,7 @@ extension Firehose { /// User login name for the Snowflake account. public let user: String? - public init(accountUrl: String? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String? = nil, dataLoadingOption: SnowflakeDataLoadingOption? = nil, metaDataColumnName: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: SnowflakeS3BackupMode? = nil, s3DestinationDescription: S3DestinationDescription? = nil, schema: String? = nil, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, snowflakeVpcConfiguration: SnowflakeVpcConfiguration? = nil, table: String? = nil, user: String? = nil) { + public init(accountUrl: String? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String? = nil, dataLoadingOption: SnowflakeDataLoadingOption? = nil, metaDataColumnName: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: SnowflakeS3BackupMode? = nil, s3DestinationDescription: S3DestinationDescription? = nil, schema: String? = nil, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, snowflakeVpcConfiguration: SnowflakeVpcConfiguration? = nil, table: String? = nil, user: String? = nil) { self.accountUrl = accountUrl self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.contentColumnName = contentColumnName @@ -3332,6 +3397,7 @@ extension Firehose { self.s3BackupMode = s3BackupMode self.s3DestinationDescription = s3DestinationDescription self.schema = schema + self.secretsManagerConfiguration = secretsManagerConfiguration self.snowflakeRoleConfiguration = snowflakeRoleConfiguration self.snowflakeVpcConfiguration = snowflakeVpcConfiguration self.table = table @@ -3351,6 +3417,7 @@ extension Firehose { case s3BackupMode = "S3BackupMode" case s3DestinationDescription = "S3DestinationDescription" case schema = "Schema" + case secretsManagerConfiguration = "SecretsManagerConfiguration" case snowflakeRoleConfiguration = "SnowflakeRoleConfiguration" case snowflakeVpcConfiguration = "SnowflakeVpcConfiguration" case table = "Table" @@ -3384,6 +3451,8 @@ extension Firehose { public let s3Update: S3DestinationUpdate? /// Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views public let schema: String? + /// Describes the Secrets Manager configuration in Snowflake. + public let secretsManagerConfiguration: SecretsManagerConfiguration? /// Optionally configure a Snowflake role. Otherwise the default user role will be used. public let snowflakeRoleConfiguration: SnowflakeRoleConfiguration? /// All data in Snowflake is stored in database tables, logically structured as collections of columns and rows. @@ -3391,7 +3460,7 @@ extension Firehose { /// User login name for the Snowflake account. public let user: String? - public init(accountUrl: String? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String? = nil, dataLoadingOption: SnowflakeDataLoadingOption? = nil, keyPassphrase: String? = nil, metaDataColumnName: String? = nil, privateKey: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: SnowflakeS3BackupMode? = nil, s3Update: S3DestinationUpdate? = nil, schema: String? = nil, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, table: String? = nil, user: String? = nil) { + public init(accountUrl: String? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, contentColumnName: String? = nil, database: String? = nil, dataLoadingOption: SnowflakeDataLoadingOption? = nil, keyPassphrase: String? = nil, metaDataColumnName: String? = nil, privateKey: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SnowflakeRetryOptions? = nil, roleARN: String? = nil, s3BackupMode: SnowflakeS3BackupMode? = nil, s3Update: S3DestinationUpdate? = nil, schema: String? = nil, secretsManagerConfiguration: SecretsManagerConfiguration? = nil, snowflakeRoleConfiguration: SnowflakeRoleConfiguration? = nil, table: String? = nil, user: String? = nil) { self.accountUrl = accountUrl self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.contentColumnName = contentColumnName @@ -3406,6 +3475,7 @@ extension Firehose { self.s3BackupMode = s3BackupMode self.s3Update = s3Update self.schema = schema + self.secretsManagerConfiguration = secretsManagerConfiguration self.snowflakeRoleConfiguration = snowflakeRoleConfiguration self.table = table self.user = user @@ -3435,6 +3505,7 @@ extension Firehose { try self.s3Update?.validate(name: "\(name).s3Update") try self.validate(self.schema, name: "schema", parent: name, max: 255) try self.validate(self.schema, name: "schema", parent: name, min: 1) + try self.secretsManagerConfiguration?.validate(name: "\(name).secretsManagerConfiguration") try self.snowflakeRoleConfiguration?.validate(name: "\(name).snowflakeRoleConfiguration") try self.validate(self.table, name: "table", parent: name, max: 255) try self.validate(self.table, name: "table", parent: name, min: 1) @@ -3457,6 +3528,7 @@ extension Firehose { case s3BackupMode = "S3BackupMode" case s3Update = "S3Update" case schema = "Schema" + case secretsManagerConfiguration = "SecretsManagerConfiguration" case snowflakeRoleConfiguration = "SnowflakeRoleConfiguration" case table = "Table" case user = "User" @@ -3575,7 +3647,7 @@ extension Firehose { /// This type can be either "Raw" or "Event." public let hecEndpointType: HECEndpointType /// This is a GUID that you obtain from your Splunk cluster when you create a new HEC endpoint. - public let hecToken: String + public let hecToken: String? /// The data processing configuration. public let processingConfiguration: ProcessingConfiguration? /// The retry behavior in case Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk. @@ -3584,8 +3656,10 @@ extension Firehose { public let s3BackupMode: SplunkS3BackupMode? /// The configuration for the backup Amazon S3 location. public let s3Configuration: S3DestinationConfiguration + /// The configuration that defines how you access secrets for Splunk. + public let secretsManagerConfiguration: SecretsManagerConfiguration? - public init(bufferingHints: SplunkBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, hecAcknowledgmentTimeoutInSeconds: Int? = nil, hecEndpoint: String, hecEndpointType: HECEndpointType, hecToken: String, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SplunkRetryOptions? = nil, s3BackupMode: SplunkS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration) { + public init(bufferingHints: SplunkBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, hecAcknowledgmentTimeoutInSeconds: Int? = nil, hecEndpoint: String, hecEndpointType: HECEndpointType, hecToken: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SplunkRetryOptions? = nil, s3BackupMode: SplunkS3BackupMode? = nil, s3Configuration: S3DestinationConfiguration, secretsManagerConfiguration: SecretsManagerConfiguration? = nil) { self.bufferingHints = bufferingHints self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.hecAcknowledgmentTimeoutInSeconds = hecAcknowledgmentTimeoutInSeconds @@ -3596,6 +3670,7 @@ extension Firehose { self.retryOptions = retryOptions self.s3BackupMode = s3BackupMode self.s3Configuration = s3Configuration + self.secretsManagerConfiguration = secretsManagerConfiguration } public func validate(name: String) throws { @@ -3610,6 +3685,7 @@ extension Firehose { try self.processingConfiguration?.validate(name: "\(name).processingConfiguration") try self.retryOptions?.validate(name: "\(name).retryOptions") try self.s3Configuration.validate(name: "\(name).s3Configuration") + try self.secretsManagerConfiguration?.validate(name: "\(name).secretsManagerConfiguration") } private enum CodingKeys: String, CodingKey { @@ -3623,6 +3699,7 @@ extension Firehose { case retryOptions = "RetryOptions" case s3BackupMode = "S3BackupMode" case s3Configuration = "S3Configuration" + case secretsManagerConfiguration = "SecretsManagerConfiguration" } } @@ -3647,8 +3724,10 @@ extension Firehose { public let s3BackupMode: SplunkS3BackupMode? /// The Amazon S3 destination.> public let s3DestinationDescription: S3DestinationDescription? + /// The configuration that defines how you access secrets for Splunk. + public let secretsManagerConfiguration: SecretsManagerConfiguration? - public init(bufferingHints: SplunkBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, hecAcknowledgmentTimeoutInSeconds: Int? = nil, hecEndpoint: String? = nil, hecEndpointType: HECEndpointType? = nil, hecToken: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SplunkRetryOptions? = nil, s3BackupMode: SplunkS3BackupMode? = nil, s3DestinationDescription: S3DestinationDescription? = nil) { + public init(bufferingHints: SplunkBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, hecAcknowledgmentTimeoutInSeconds: Int? = nil, hecEndpoint: String? = nil, hecEndpointType: HECEndpointType? = nil, hecToken: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SplunkRetryOptions? = nil, s3BackupMode: SplunkS3BackupMode? = nil, s3DestinationDescription: S3DestinationDescription? = nil, secretsManagerConfiguration: SecretsManagerConfiguration? = nil) { self.bufferingHints = bufferingHints self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.hecAcknowledgmentTimeoutInSeconds = hecAcknowledgmentTimeoutInSeconds @@ -3659,6 +3738,7 @@ extension Firehose { self.retryOptions = retryOptions self.s3BackupMode = s3BackupMode self.s3DestinationDescription = s3DestinationDescription + self.secretsManagerConfiguration = secretsManagerConfiguration } private enum CodingKeys: String, CodingKey { @@ -3672,6 +3752,7 @@ extension Firehose { case retryOptions = "RetryOptions" case s3BackupMode = "S3BackupMode" case s3DestinationDescription = "S3DestinationDescription" + case secretsManagerConfiguration = "SecretsManagerConfiguration" } } @@ -3696,8 +3777,10 @@ extension Firehose { public let s3BackupMode: SplunkS3BackupMode? /// Your update to the configuration of the backup Amazon S3 location. public let s3Update: S3DestinationUpdate? + /// The configuration that defines how you access secrets for Splunk. + public let secretsManagerConfiguration: SecretsManagerConfiguration? - public init(bufferingHints: SplunkBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, hecAcknowledgmentTimeoutInSeconds: Int? = nil, hecEndpoint: String? = nil, hecEndpointType: HECEndpointType? = nil, hecToken: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SplunkRetryOptions? = nil, s3BackupMode: SplunkS3BackupMode? = nil, s3Update: S3DestinationUpdate? = nil) { + public init(bufferingHints: SplunkBufferingHints? = nil, cloudWatchLoggingOptions: CloudWatchLoggingOptions? = nil, hecAcknowledgmentTimeoutInSeconds: Int? = nil, hecEndpoint: String? = nil, hecEndpointType: HECEndpointType? = nil, hecToken: String? = nil, processingConfiguration: ProcessingConfiguration? = nil, retryOptions: SplunkRetryOptions? = nil, s3BackupMode: SplunkS3BackupMode? = nil, s3Update: S3DestinationUpdate? = nil, secretsManagerConfiguration: SecretsManagerConfiguration? = nil) { self.bufferingHints = bufferingHints self.cloudWatchLoggingOptions = cloudWatchLoggingOptions self.hecAcknowledgmentTimeoutInSeconds = hecAcknowledgmentTimeoutInSeconds @@ -3708,6 +3791,7 @@ extension Firehose { self.retryOptions = retryOptions self.s3BackupMode = s3BackupMode self.s3Update = s3Update + self.secretsManagerConfiguration = secretsManagerConfiguration } public func validate(name: String) throws { @@ -3722,6 +3806,7 @@ extension Firehose { try self.processingConfiguration?.validate(name: "\(name).processingConfiguration") try self.retryOptions?.validate(name: "\(name).retryOptions") try self.s3Update?.validate(name: "\(name).s3Update") + try self.secretsManagerConfiguration?.validate(name: "\(name).secretsManagerConfiguration") } private enum CodingKeys: String, CodingKey { @@ -3735,6 +3820,7 @@ extension Firehose { case retryOptions = "RetryOptions" case s3BackupMode = "S3BackupMode" case s3Update = "S3Update" + case secretsManagerConfiguration = "SecretsManagerConfiguration" } } @@ -3919,7 +4005,7 @@ extension Firehose { public let redshiftDestinationUpdate: RedshiftDestinationUpdate? /// [Deprecated] Describes an update for a destination in Amazon S3. public let s3DestinationUpdate: S3DestinationUpdate? - /// Update to the Snowflake destination condiguration settings + /// Update to the Snowflake destination configuration settings. public let snowflakeDestinationUpdate: SnowflakeDestinationUpdate? /// Describes an update for a destination in Splunk. public let splunkDestinationUpdate: SplunkDestinationUpdate? diff --git a/Sources/Soto/Services/GlobalAccelerator/GlobalAccelerator_shapes.swift b/Sources/Soto/Services/GlobalAccelerator/GlobalAccelerator_shapes.swift index 60d1879dc0..4d9eff6403 100644 --- a/Sources/Soto/Services/GlobalAccelerator/GlobalAccelerator_shapes.swift +++ b/Sources/Soto/Services/GlobalAccelerator/GlobalAccelerator_shapes.swift @@ -2824,27 +2824,35 @@ extension GlobalAccelerator { public let acceleratorArn: String /// Indicates whether an accelerator is enabled. The value is true or false. The default value is true. If the value is set to true, the accelerator cannot be deleted. If set to false, the accelerator can be deleted. public let enabled: Bool? + /// The IP addresses for an accelerator. + public let ipAddresses: [String]? /// The IP address type that an accelerator supports. For a standard accelerator, the value can be IPV4 or DUAL_STACK. public let ipAddressType: IpAddressType? /// The name of the accelerator. The name can have a maximum of 64 characters, must contain only alphanumeric characters, /// periods (.), or hyphens (-), and must not begin or end with a hyphen or period. public let name: String? - public init(acceleratorArn: String, enabled: Bool? = nil, ipAddressType: IpAddressType? = nil, name: String? = nil) { + public init(acceleratorArn: String, enabled: Bool? = nil, ipAddresses: [String]? = nil, ipAddressType: IpAddressType? = nil, name: String? = nil) { self.acceleratorArn = acceleratorArn self.enabled = enabled + self.ipAddresses = ipAddresses self.ipAddressType = ipAddressType self.name = name } public func validate(name: String) throws { try self.validate(self.acceleratorArn, name: "acceleratorArn", parent: name, max: 255) + try self.ipAddresses?.forEach { + try validate($0, name: "ipAddresses[]", parent: name, max: 45) + } + try self.validate(self.ipAddresses, name: "ipAddresses", parent: name, max: 2) try self.validate(self.name, name: "name", parent: name, max: 255) } private enum CodingKeys: String, CodingKey { case acceleratorArn = "AcceleratorArn" case enabled = "Enabled" + case ipAddresses = "IpAddresses" case ipAddressType = "IpAddressType" case name = "Name" } @@ -2990,27 +2998,35 @@ extension GlobalAccelerator { public let acceleratorArn: String /// Indicates whether an accelerator is enabled. The value is true or false. The default value is true. If the value is set to true, the accelerator cannot be deleted. If set to false, the accelerator can be deleted. public let enabled: Bool? + /// The IP addresses for an accelerator. + public let ipAddresses: [String]? /// The IP address type that an accelerator supports. For a custom routing accelerator, the value must be IPV4. public let ipAddressType: IpAddressType? /// The name of the accelerator. The name can have a maximum of 64 characters, must contain only alphanumeric characters, /// periods (.), or hyphens (-), and must not begin or end with a hyphen or period. public let name: String? - public init(acceleratorArn: String, enabled: Bool? = nil, ipAddressType: IpAddressType? = nil, name: String? = nil) { + public init(acceleratorArn: String, enabled: Bool? = nil, ipAddresses: [String]? = nil, ipAddressType: IpAddressType? = nil, name: String? = nil) { self.acceleratorArn = acceleratorArn self.enabled = enabled + self.ipAddresses = ipAddresses self.ipAddressType = ipAddressType self.name = name } public func validate(name: String) throws { try self.validate(self.acceleratorArn, name: "acceleratorArn", parent: name, max: 255) + try self.ipAddresses?.forEach { + try validate($0, name: "ipAddresses[]", parent: name, max: 45) + } + try self.validate(self.ipAddresses, name: "ipAddresses", parent: name, max: 2) try self.validate(self.name, name: "name", parent: name, max: 255) } private enum CodingKeys: String, CodingKey { case acceleratorArn = "AcceleratorArn" case enabled = "Enabled" + case ipAddresses = "IpAddresses" case ipAddressType = "IpAddressType" case name = "Name" } diff --git a/Sources/Soto/Services/Glue/Glue_api.swift b/Sources/Soto/Services/Glue/Glue_api.swift index 6e2c15729f..c92e2be25c 100644 --- a/Sources/Soto/Services/Glue/Glue_api.swift +++ b/Sources/Soto/Services/Glue/Glue_api.swift @@ -640,6 +640,19 @@ public struct Glue: AWSService { ) } + /// Creates an Glue usage profile. + @Sendable + public func createUsageProfile(_ input: CreateUsageProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateUsageProfileResponse { + return try await self.client.execute( + operation: "CreateUsageProfile", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a new function definition in the Data Catalog. @Sendable public func createUserDefinedFunction(_ input: CreateUserDefinedFunctionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateUserDefinedFunctionResponse { @@ -978,6 +991,19 @@ public struct Glue: AWSService { ) } + /// Deletes the Glue specified usage profile. + @Sendable + public func deleteUsageProfile(_ input: DeleteUsageProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteUsageProfileResponse { + return try await self.client.execute( + operation: "DeleteUsageProfile", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes an existing function definition from the Data Catalog. @Sendable public func deleteUserDefinedFunction(_ input: DeleteUserDefinedFunctionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteUserDefinedFunctionResponse { @@ -1368,7 +1394,7 @@ public struct Glue: AWSService { ) } - /// Retrieves the metadata for a given job run. + /// Retrieves the metadata for a given job run. Job run history is accessible for 90 days for your workflow and job run. @Sendable public func getJobRun(_ input: GetJobRunRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetJobRunResponse { return try await self.client.execute( @@ -1810,6 +1836,19 @@ public struct Glue: AWSService { ) } + /// Retrieves information about the specified Glue usage profile. + @Sendable + public func getUsageProfile(_ input: GetUsageProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetUsageProfileResponse { + return try await self.client.execute( + operation: "GetUsageProfile", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves a specified function definition from the Data Catalog. @Sendable public func getUserDefinedFunction(_ input: GetUserDefinedFunctionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetUserDefinedFunctionResponse { @@ -1849,7 +1888,7 @@ public struct Glue: AWSService { ) } - /// Retrieves the metadata for a given workflow run. + /// Retrieves the metadata for a given workflow run. Job run history is accessible for 90 days for your workflow and job run. @Sendable public func getWorkflowRun(_ input: GetWorkflowRunRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetWorkflowRunResponse { return try await self.client.execute( @@ -2148,6 +2187,19 @@ public struct Glue: AWSService { ) } + /// List all the Glue usage profiles. + @Sendable + public func listUsageProfiles(_ input: ListUsageProfilesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListUsageProfilesResponse { + return try await self.client.execute( + operation: "ListUsageProfiles", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists names of workflows created in the account. @Sendable public func listWorkflows(_ input: ListWorkflowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWorkflowsResponse { @@ -2837,6 +2889,19 @@ public struct Glue: AWSService { ) } + /// Update an Glue usage profile. + @Sendable + public func updateUsageProfile(_ input: UpdateUsageProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateUsageProfileResponse { + return try await self.client.execute( + operation: "UpdateUsageProfile", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates an existing function definition in the Data Catalog. @Sendable public func updateUserDefinedFunction(_ input: UpdateUserDefinedFunctionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateUserDefinedFunctionResponse { @@ -3618,6 +3683,25 @@ extension Glue { ) } + /// List all the Glue usage profiles. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listUsageProfilesPaginator( + _ input: ListUsageProfilesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listUsageProfiles, + inputKey: \ListUsageProfilesRequest.nextToken, + outputKey: \ListUsageProfilesResponse.nextToken, + logger: logger + ) + } + /// Lists names of workflows created in the account. /// Return PaginatorSequence for operation. /// @@ -4079,6 +4163,15 @@ extension Glue.ListTriggersRequest: AWSPaginateToken { } } +extension Glue.ListUsageProfilesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Glue.ListUsageProfilesRequest { + return .init( + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension Glue.ListWorkflowsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Glue.ListWorkflowsRequest { return .init( diff --git a/Sources/Soto/Services/Glue/Glue_shapes.swift b/Sources/Soto/Services/Glue/Glue_shapes.swift index 5797c3a8be..f5500381c4 100644 --- a/Sources/Soto/Services/Glue/Glue_shapes.swift +++ b/Sources/Soto/Services/Glue/Glue_shapes.swift @@ -51,6 +51,13 @@ extension Glue { public var description: String { return self.rawValue } } + public enum AuthenticationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case basic = "BASIC" + case custom = "CUSTOM" + case oauth2 = "OAUTH2" + public var description: String { return self.rawValue } + } + public enum BackfillErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case encryptedPartitionError = "ENCRYPTED_PARTITION_ERROR" case internalError = "INTERNAL_ERROR" @@ -176,12 +183,20 @@ extension Glue { case kafkaSslEnabled = "KAFKA_SSL_ENABLED" case password = "PASSWORD" case port = "PORT" + case roleArn = "ROLE_ARN" case secretId = "SECRET_ID" case skipCustomJdbcCertValidation = "SKIP_CUSTOM_JDBC_CERT_VALIDATION" case userName = "USERNAME" public var description: String { return self.rawValue } } + public enum ConnectionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failed = "FAILED" + case inProgress = "IN_PROGRESS" + case ready = "READY" + public var description: String { return self.rawValue } + } + public enum ConnectionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case custom = "CUSTOM" case jdbc = "JDBC" @@ -189,6 +204,7 @@ extension Glue { case marketplace = "MARKETPLACE" case mongodb = "MONGODB" case network = "NETWORK" + case salesforce = "SALESFORCE" case sftp = "SFTP" public var description: String { return self.rawValue } } @@ -238,6 +254,12 @@ extension Glue { public var description: String { return self.rawValue } } + public enum DQCompositeRuleEvaluationMethod: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case column = "COLUMN" + case row = "ROW" + public var description: String { return self.rawValue } + } + public enum DQStopJobOnFailureTiming: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case afterDataLoad = "AfterDataLoad" case immediate = "Immediate" @@ -424,8 +446,16 @@ extension Glue { public var description: String { return self.rawValue } } + public enum JobMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case notebook = "NOTEBOOK" + case script = "SCRIPT" + case visual = "VISUAL" + public var description: String { return self.rawValue } + } + public enum JobRunState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case error = "ERROR" + case expired = "EXPIRED" case failed = "FAILED" case running = "RUNNING" case starting = "STARTING" @@ -489,6 +519,13 @@ extension Glue { public var description: String { return self.rawValue } } + public enum OAuth2GrantType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case authorizationCode = "AUTHORIZATION_CODE" + case clientCredentials = "CLIENT_CREDENTIALS" + case jwtBearer = "JWT_BEARER" + public var description: String { return self.rawValue } + } + public enum ParamType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case bool = "bool" case complex = "complex" @@ -802,6 +839,14 @@ extension Glue { public var description: String { return self.rawValue } } + public enum ViewUpdateAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case add = "ADD" + case addOrReplace = "ADD_OR_REPLACE" + case drop = "DROP" + case replace = "REPLACE" + public var description: String { return self.rawValue } + } + public enum WorkerType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case g025X = "G.025X" case g1X = "G.1X" @@ -1250,6 +1295,78 @@ extension Glue { } } + public struct AuthenticationConfiguration: AWSDecodableShape { + /// A structure containing the authentication configuration. + public let authenticationType: AuthenticationType? + /// The properties for OAuth2 authentication. + public let oAuth2Properties: OAuth2Properties? + /// The secret manager ARN to store credentials. + public let secretArn: String? + + public init(authenticationType: AuthenticationType? = nil, oAuth2Properties: OAuth2Properties? = nil, secretArn: String? = nil) { + self.authenticationType = authenticationType + self.oAuth2Properties = oAuth2Properties + self.secretArn = secretArn + } + + private enum CodingKeys: String, CodingKey { + case authenticationType = "AuthenticationType" + case oAuth2Properties = "OAuth2Properties" + case secretArn = "SecretArn" + } + } + + public struct AuthenticationConfigurationInput: AWSEncodableShape { + /// A structure containing the authentication configuration in the CreateConnection request. + public let authenticationType: AuthenticationType? + /// The properties for OAuth2 authentication in the CreateConnection request. + public let oAuth2Properties: OAuth2PropertiesInput? + /// The secret manager ARN to store credentials in the CreateConnection request. + public let secretArn: String? + + public init(authenticationType: AuthenticationType? = nil, oAuth2Properties: OAuth2PropertiesInput? = nil, secretArn: String? = nil) { + self.authenticationType = authenticationType + self.oAuth2Properties = oAuth2Properties + self.secretArn = secretArn + } + + public func validate(name: String) throws { + try self.oAuth2Properties?.validate(name: "\(name).oAuth2Properties") + try self.validate(self.secretArn, name: "secretArn", parent: name, pattern: "^arn:aws(-(cn|us-gov|iso(-[bef])?))?:secretsmanager:.*$") + } + + private enum CodingKeys: String, CodingKey { + case authenticationType = "AuthenticationType" + case oAuth2Properties = "OAuth2Properties" + case secretArn = "SecretArn" + } + } + + public struct AuthorizationCodeProperties: AWSEncodableShape { + /// An authorization code to be used in the third leg of the AUTHORIZATION_CODE grant workflow. This is a single-use code which becomes invalid once exchanged for an access token, thus it is acceptable to have this value as a request parameter. + public let authorizationCode: String? + /// The redirect URI where the user gets redirected to by authorization server when issuing an authorization code. The URI is subsequently used when the authorization code is exchanged for an access token. + public let redirectUri: String? + + public init(authorizationCode: String? = nil, redirectUri: String? = nil) { + self.authorizationCode = authorizationCode + self.redirectUri = redirectUri + } + + public func validate(name: String) throws { + try self.validate(self.authorizationCode, name: "authorizationCode", parent: name, max: 4096) + try self.validate(self.authorizationCode, name: "authorizationCode", parent: name, min: 1) + try self.validate(self.authorizationCode, name: "authorizationCode", parent: name, pattern: "^\\S+$") + try self.validate(self.redirectUri, name: "redirectUri", parent: name, max: 512) + try self.validate(self.redirectUri, name: "redirectUri", parent: name, pattern: "^(https?):\\/\\/[^\\s/$.?#].[^\\s]*$") + } + + private enum CodingKeys: String, CodingKey { + case authorizationCode = "AuthorizationCode" + case redirectUri = "RedirectUri" + } + } + public struct BackfillError: AWSDecodableShape { /// The error code for an error that occurred when registering partition indexes for an existing table. public let code: BackfillErrorCode? @@ -3706,6 +3823,48 @@ extension Glue { } } + public struct ConfigurationObject: AWSEncodableShape & AWSDecodableShape { + /// A list of allowed values for the parameter. + public let allowedValues: [String]? + /// A default value for the parameter. + public let defaultValue: String? + /// A maximum allowed value for the parameter. + public let maxValue: String? + /// A minimum allowed value for the parameter. + public let minValue: String? + + public init(allowedValues: [String]? = nil, defaultValue: String? = nil, maxValue: String? = nil, minValue: String? = nil) { + self.allowedValues = allowedValues + self.defaultValue = defaultValue + self.maxValue = maxValue + self.minValue = minValue + } + + public func validate(name: String) throws { + try self.allowedValues?.forEach { + try validate($0, name: "allowedValues[]", parent: name, max: 128) + try validate($0, name: "allowedValues[]", parent: name, min: 1) + try validate($0, name: "allowedValues[]", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + } + try self.validate(self.defaultValue, name: "defaultValue", parent: name, max: 128) + try self.validate(self.defaultValue, name: "defaultValue", parent: name, min: 1) + try self.validate(self.defaultValue, name: "defaultValue", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + try self.validate(self.maxValue, name: "maxValue", parent: name, max: 128) + try self.validate(self.maxValue, name: "maxValue", parent: name, min: 1) + try self.validate(self.maxValue, name: "maxValue", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + try self.validate(self.minValue, name: "minValue", parent: name, max: 128) + try self.validate(self.minValue, name: "minValue", parent: name, min: 1) + try self.validate(self.minValue, name: "minValue", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + } + + private enum CodingKeys: String, CodingKey { + case allowedValues = "AllowedValues" + case defaultValue = "DefaultValue" + case maxValue = "MaxValue" + case minValue = "MinValue" + } + } + public struct ConfusionMatrix: AWSDecodableShape { /// The number of matches in the data that the transform didn't find, in the confusion matrix for your transform. public let numFalseNegatives: Int64? @@ -3732,74 +3891,97 @@ extension Glue { } public struct Connection: AWSDecodableShape { + /// The authentication properties of the connection. + public let authenticationConfiguration: AuthenticationConfiguration? /// These key-value pairs define parameters for the connection: HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host. PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections. USER_NAME - The name under which to log in to the database. The value string for USER_NAME is "USERNAME". PASSWORD - A password, if one is used, for the user name. ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password. JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use. JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use. JDBC_ENGINE - The name of the JDBC engine to use. JDBC_ENGINE_VERSION - The version of the JDBC engine to use. CONFIG_FILES - (Reserved for future use.) INSTANCE_ID - The instance ID to use. JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source. JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false. CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format. SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate. CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate. CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source. SECRET_ID - The secret ID used for the secret manager of credentials. CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection. CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection. CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection. KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself. KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is "true". KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string. KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is "false". KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional). KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional). KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional). ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected). ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_MECHANISM - "SCRAM-SHA-512", "GSSAPI", "AWS_MSK_IAM", or "PLAIN". These are the supported SASL Mechanisms. KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with the "PLAIN" mechanism. KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with the "PLAIN" mechanism. ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the "SCRAM-SHA-512" mechanism. KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the "SCRAM-SHA-512" mechanism. ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager. KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab. KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf. KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration. KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers. public let connectionProperties: [ConnectionPropertyKey: String]? /// The type of the connection. Currently, SFTP is not supported. public let connectionType: ConnectionType? - /// The time that this connection definition was created. + /// The timestamp of the time that this connection definition was created. public let creationTime: Date? /// The description of the connection. public let description: String? + /// A timestamp of the time this connection was last validated. + public let lastConnectionValidationTime: Date? /// The user, group, or role that last updated this connection definition. public let lastUpdatedBy: String? - /// The last time that this connection definition was updated. + /// The timestamp of the last time the connection definition was updated. public let lastUpdatedTime: Date? /// A list of criteria that can be used in selecting this connection. public let matchCriteria: [String]? /// The name of the connection definition. public let name: String? - /// A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to make this connection successfully. + /// The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to make this connection successfully. public let physicalConnectionRequirements: PhysicalConnectionRequirements? + /// The status of the connection. Can be one of: READY, IN_PROGRESS, or FAILED. + public let status: ConnectionStatus? + /// The reason for the connection status. + public let statusReason: String? - public init(connectionProperties: [ConnectionPropertyKey: String]? = nil, connectionType: ConnectionType? = nil, creationTime: Date? = nil, description: String? = nil, lastUpdatedBy: String? = nil, lastUpdatedTime: Date? = nil, matchCriteria: [String]? = nil, name: String? = nil, physicalConnectionRequirements: PhysicalConnectionRequirements? = nil) { + public init(authenticationConfiguration: AuthenticationConfiguration? = nil, connectionProperties: [ConnectionPropertyKey: String]? = nil, connectionType: ConnectionType? = nil, creationTime: Date? = nil, description: String? = nil, lastConnectionValidationTime: Date? = nil, lastUpdatedBy: String? = nil, lastUpdatedTime: Date? = nil, matchCriteria: [String]? = nil, name: String? = nil, physicalConnectionRequirements: PhysicalConnectionRequirements? = nil, status: ConnectionStatus? = nil, statusReason: String? = nil) { + self.authenticationConfiguration = authenticationConfiguration self.connectionProperties = connectionProperties self.connectionType = connectionType self.creationTime = creationTime self.description = description + self.lastConnectionValidationTime = lastConnectionValidationTime self.lastUpdatedBy = lastUpdatedBy self.lastUpdatedTime = lastUpdatedTime self.matchCriteria = matchCriteria self.name = name self.physicalConnectionRequirements = physicalConnectionRequirements + self.status = status + self.statusReason = statusReason } private enum CodingKeys: String, CodingKey { + case authenticationConfiguration = "AuthenticationConfiguration" case connectionProperties = "ConnectionProperties" case connectionType = "ConnectionType" case creationTime = "CreationTime" case description = "Description" + case lastConnectionValidationTime = "LastConnectionValidationTime" case lastUpdatedBy = "LastUpdatedBy" case lastUpdatedTime = "LastUpdatedTime" case matchCriteria = "MatchCriteria" case name = "Name" case physicalConnectionRequirements = "PhysicalConnectionRequirements" + case status = "Status" + case statusReason = "StatusReason" } } public struct ConnectionInput: AWSEncodableShape { + /// The authentication properties of the connection. Used for a Salesforce connection. + public let authenticationConfiguration: AuthenticationConfigurationInput? /// These key-value pairs define parameters for the connection. public let connectionProperties: [ConnectionPropertyKey: String] - /// The type of the connection. Currently, these types are supported: JDBC - Designates a connection to a database through Java Database Connectivity (JDBC). JDBC Connections use the following ConnectionParameters. Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL. Required: All of (USERNAME, PASSWORD) or SECRET_ID. Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC. KAFKA - Designates a connection to an Apache Kafka streaming platform. KAFKA Connections use the following ConnectionParameters. Required: KAFKA_BOOTSTRAP_SERVERS. Optional: KAFKA_SSL_ENABLED, KAFKA_CUSTOM_CERT, KAFKA_SKIP_CUSTOM_CERT_VALIDATION. These parameters are used to configure SSL with KAFKA. Optional: KAFKA_CLIENT_KEYSTORE, KAFKA_CLIENT_KEYSTORE_PASSWORD, KAFKA_CLIENT_KEY_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD. These parameters are used to configure TLS client configuration with SSL in KAFKA. Optional: KAFKA_SASL_MECHANISM. Can be specified as SCRAM-SHA-512, GSSAPI, or AWS_MSK_IAM. Optional: KAFKA_SASL_SCRAM_USERNAME, KAFKA_SASL_SCRAM_PASSWORD, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA. Optional: KAFKA_SASL_GSSAPI_KEYTAB, KAFKA_SASL_GSSAPI_KRB5_CONF, KAFKA_SASL_GSSAPI_SERVICE, KAFKA_SASL_GSSAPI_PRINCIPAL. These parameters are used to configure SASL/GSSAPI authentication with KAFKA. MONGODB - Designates a connection to a MongoDB document database. MONGODB Connections use the following ConnectionParameters. Required: CONNECTION_URL. Required: All of (USERNAME, PASSWORD) or SECRET_ID. NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC). NETWORK Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements. MARKETPLACE - Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue. MARKETPLACE Connections use the following ConnectionParameters. Required: CONNECTOR_TYPE, CONNECTOR_URL, CONNECTOR_CLASS_NAME, CONNECTION_URL. Required for JDBC CONNECTOR_TYPE connections: All of (USERNAME, PASSWORD) or SECRET_ID. CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue. SFTP is not supported. For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties. For more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections. + /// The type of the connection. Currently, these types are supported: JDBC - Designates a connection to a database through Java Database Connectivity (JDBC). JDBC Connections use the following ConnectionParameters. Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL. Required: All of (USERNAME, PASSWORD) or SECRET_ID. Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC. KAFKA - Designates a connection to an Apache Kafka streaming platform. KAFKA Connections use the following ConnectionParameters. Required: KAFKA_BOOTSTRAP_SERVERS. Optional: KAFKA_SSL_ENABLED, KAFKA_CUSTOM_CERT, KAFKA_SKIP_CUSTOM_CERT_VALIDATION. These parameters are used to configure SSL with KAFKA. Optional: KAFKA_CLIENT_KEYSTORE, KAFKA_CLIENT_KEYSTORE_PASSWORD, KAFKA_CLIENT_KEY_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD. These parameters are used to configure TLS client configuration with SSL in KAFKA. Optional: KAFKA_SASL_MECHANISM. Can be specified as SCRAM-SHA-512, GSSAPI, or AWS_MSK_IAM. Optional: KAFKA_SASL_SCRAM_USERNAME, KAFKA_SASL_SCRAM_PASSWORD, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA. Optional: KAFKA_SASL_GSSAPI_KEYTAB, KAFKA_SASL_GSSAPI_KRB5_CONF, KAFKA_SASL_GSSAPI_SERVICE, KAFKA_SASL_GSSAPI_PRINCIPAL. These parameters are used to configure SASL/GSSAPI authentication with KAFKA. MONGODB - Designates a connection to a MongoDB document database. MONGODB Connections use the following ConnectionParameters. Required: CONNECTION_URL. Required: All of (USERNAME, PASSWORD) or SECRET_ID. SALESFORCE - Designates a connection to Salesforce using OAuth authencation. Requires the AuthenticationConfiguration member to be configured. NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC). NETWORK Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements. MARKETPLACE - Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue. MARKETPLACE Connections use the following ConnectionParameters. Required: CONNECTOR_TYPE, CONNECTOR_URL, CONNECTOR_CLASS_NAME, CONNECTION_URL. Required for JDBC CONNECTOR_TYPE connections: All of (USERNAME, PASSWORD) or SECRET_ID. CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue. SFTP is not supported. For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties. For more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections. public let connectionType: ConnectionType /// The description of the connection. public let description: String? /// A list of criteria that can be used in selecting this connection. public let matchCriteria: [String]? - /// The name of the connection. Connection will not function as expected without a name. + /// The name of the connection. public let name: String - /// A map of physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to successfully make this connection. + /// The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to successfully make this connection. public let physicalConnectionRequirements: PhysicalConnectionRequirements? + /// A flag to validate the credentials during create connection. Used for a Salesforce connection. Default is true. + public let validateCredentials: Bool? - public init(connectionProperties: [ConnectionPropertyKey: String], connectionType: ConnectionType, description: String? = nil, matchCriteria: [String]? = nil, name: String, physicalConnectionRequirements: PhysicalConnectionRequirements? = nil) { + public init(authenticationConfiguration: AuthenticationConfigurationInput? = nil, connectionProperties: [ConnectionPropertyKey: String], connectionType: ConnectionType, description: String? = nil, matchCriteria: [String]? = nil, name: String, physicalConnectionRequirements: PhysicalConnectionRequirements? = nil, validateCredentials: Bool? = nil) { + self.authenticationConfiguration = authenticationConfiguration self.connectionProperties = connectionProperties self.connectionType = connectionType self.description = description self.matchCriteria = matchCriteria self.name = name self.physicalConnectionRequirements = physicalConnectionRequirements + self.validateCredentials = validateCredentials } public func validate(name: String) throws { + try self.authenticationConfiguration?.validate(name: "\(name).authenticationConfiguration") try self.connectionProperties.forEach { try validate($0.value, name: "connectionProperties[\"\($0.key)\"]", parent: name, max: 1024) } @@ -3819,12 +4001,14 @@ extension Glue { } private enum CodingKeys: String, CodingKey { + case authenticationConfiguration = "AuthenticationConfiguration" case connectionProperties = "ConnectionProperties" case connectionType = "ConnectionType" case description = "Description" case matchCriteria = "MatchCriteria" case name = "Name" case physicalConnectionRequirements = "PhysicalConnectionRequirements" + case validateCredentials = "ValidateCredentials" } } @@ -4353,7 +4537,16 @@ extension Glue { } public struct CreateConnectionResponse: AWSDecodableShape { - public init() {} + /// The status of the connection creation request. The request can take some time for certain authentication types, for example when creating an OAuth connection with token exchange over VPC. + public let createConnectionStatus: ConnectionStatus? + + public init(createConnectionStatus: ConnectionStatus? = nil) { + self.createConnectionStatus = createConnectionStatus + } + + private enum CodingKeys: String, CodingKey { + case createConnectionStatus = "CreateConnectionStatus" + } } public struct CreateCrawlerRequest: AWSEncodableShape { @@ -4914,8 +5107,12 @@ extension Glue { public let executionProperty: ExecutionProperty? /// In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9. public let glueVersion: String? + /// A mode that describes how a job was created. Valid values are: SCRIPT - The job was created using the Glue Studio script editor. VISUAL - The job was created using the Glue Studio visual editor. NOTEBOOK - The job was created using an interactive sessions notebook. When the JobMode field is missing or null, SCRIPT is assigned as the default value. + public let jobMode: JobMode? /// This field is reserved for future use. public let logUri: String? + /// This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs. Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT. + public let maintenanceWindow: String? /// For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation. public let maxCapacity: Double? /// The maximum number of times to retry this job if it fails. @@ -4936,12 +5133,12 @@ extension Glue { public let sourceControlDetails: SourceControlDetails? /// The tags to use with this job. You may use tags to limit access to the job. For more information about tags in Glue, see Amazon Web Services Tags in Glue in the developer guide. public let tags: [String: String]? - /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). + /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? - public init(codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand, connections: ConnectionsList? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, logUri: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, name: String, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, role: String, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, tags: [String: String]? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { + public init(codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand, connections: ConnectionsList? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, jobMode: JobMode? = nil, logUri: String? = nil, maintenanceWindow: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, name: String, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, role: String, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, tags: [String: String]? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { self.allocatedCapacity = nil self.codeGenConfigurationNodes = codeGenConfigurationNodes self.command = command @@ -4951,7 +5148,9 @@ extension Glue { self.executionClass = executionClass self.executionProperty = executionProperty self.glueVersion = glueVersion + self.jobMode = jobMode self.logUri = logUri + self.maintenanceWindow = maintenanceWindow self.maxCapacity = maxCapacity self.maxRetries = maxRetries self.name = name @@ -4967,7 +5166,7 @@ extension Glue { } @available(*, deprecated, message: "Members allocatedCapacity have been deprecated") - public init(allocatedCapacity: Int? = nil, codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand, connections: ConnectionsList? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, logUri: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, name: String, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, role: String, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, tags: [String: String]? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { + public init(allocatedCapacity: Int? = nil, codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand, connections: ConnectionsList? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, jobMode: JobMode? = nil, logUri: String? = nil, maintenanceWindow: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, name: String, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, role: String, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, tags: [String: String]? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { self.allocatedCapacity = allocatedCapacity self.codeGenConfigurationNodes = codeGenConfigurationNodes self.command = command @@ -4977,7 +5176,9 @@ extension Glue { self.executionClass = executionClass self.executionProperty = executionProperty self.glueVersion = glueVersion + self.jobMode = jobMode self.logUri = logUri + self.maintenanceWindow = maintenanceWindow self.maxCapacity = maxCapacity self.maxRetries = maxRetries self.name = name @@ -5003,6 +5204,7 @@ extension Glue { try self.validate(self.glueVersion, name: "glueVersion", parent: name, max: 255) try self.validate(self.glueVersion, name: "glueVersion", parent: name, min: 1) try self.validate(self.glueVersion, name: "glueVersion", parent: name, pattern: "^\\w+\\.\\w+$") + try self.validate(self.maintenanceWindow, name: "maintenanceWindow", parent: name, pattern: "^(Sun|Mon|Tue|Wed|Thu|Fri|Sat):([01]?[0-9]|2[0-3])$") try self.validate(self.name, name: "name", parent: name, max: 255) try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") @@ -5030,7 +5232,9 @@ extension Glue { case executionClass = "ExecutionClass" case executionProperty = "ExecutionProperty" case glueVersion = "GlueVersion" + case jobMode = "JobMode" case logUri = "LogUri" + case maintenanceWindow = "MaintenanceWindow" case maxCapacity = "MaxCapacity" case maxRetries = "MaxRetries" case name = "Name" @@ -5836,6 +6040,59 @@ extension Glue { } } + public struct CreateUsageProfileRequest: AWSEncodableShape { + /// A ProfileConfiguration object specifying the job and session values for the profile. + public let configuration: ProfileConfiguration + /// A description of the usage profile. + public let description: String? + /// The name of the usage profile. + public let name: String + /// A list of tags applied to the usage profile. + public let tags: [String: String]? + + public init(configuration: ProfileConfiguration, description: String? = nil, name: String, tags: [String: String]? = nil) { + self.configuration = configuration + self.description = description + self.name = name + self.tags = tags + } + + public func validate(name: String) throws { + try self.configuration.validate(name: "\(name).configuration") + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.description, name: "description", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") + try self.validate(self.name, name: "name", parent: name, max: 255) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 50) + } + + private enum CodingKeys: String, CodingKey { + case configuration = "Configuration" + case description = "Description" + case name = "Name" + case tags = "Tags" + } + } + + public struct CreateUsageProfileResponse: AWSDecodableShape { + /// The name of the usage profile that was created. + public let name: String? + + public init(name: String? = nil) { + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case name = "Name" + } + } + public struct CreateUserDefinedFunctionRequest: AWSEncodableShape { /// The ID of the Data Catalog in which to create the function. If none is provided, the Amazon Web Services account ID is used by default. public let catalogId: String? @@ -6192,16 +6449,20 @@ extension Glue { public struct DataQualityEvaluationRunAdditionalRunOptions: AWSEncodableShape & AWSDecodableShape { /// Whether or not to enable CloudWatch metrics. public let cloudWatchMetricsEnabled: Bool? + /// Set the evaluation method for composite rules in the ruleset to ROW/COLUMN + public let compositeRuleEvaluationMethod: DQCompositeRuleEvaluationMethod? /// Prefix for Amazon S3 to store results. public let resultsS3Prefix: String? - public init(cloudWatchMetricsEnabled: Bool? = nil, resultsS3Prefix: String? = nil) { + public init(cloudWatchMetricsEnabled: Bool? = nil, compositeRuleEvaluationMethod: DQCompositeRuleEvaluationMethod? = nil, resultsS3Prefix: String? = nil) { self.cloudWatchMetricsEnabled = cloudWatchMetricsEnabled + self.compositeRuleEvaluationMethod = compositeRuleEvaluationMethod self.resultsS3Prefix = resultsS3Prefix } private enum CodingKeys: String, CodingKey { case cloudWatchMetricsEnabled = "CloudWatchMetricsEnabled" + case compositeRuleEvaluationMethod = "CompositeRuleEvaluationMethod" case resultsS3Prefix = "ResultsS3Prefix" } } @@ -7698,6 +7959,29 @@ extension Glue { } } + public struct DeleteUsageProfileRequest: AWSEncodableShape { + /// The name of the usage profile to delete. + public let name: String + + public init(name: String) { + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 255) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + } + + private enum CodingKeys: String, CodingKey { + case name = "Name" + } + } + + public struct DeleteUsageProfileResponse: AWSDecodableShape { + public init() {} + } + public struct DeleteUserDefinedFunctionRequest: AWSEncodableShape { /// The ID of the Data Catalog where the function to be deleted is located. If none is supplied, the Amazon Web Services account ID is used by default. public let catalogId: String? @@ -9783,7 +10067,7 @@ extension Glue { public let resultIds: [String]? /// An IAM role supplied to encrypt the results of the run. public let role: String? - /// A list of ruleset names for the run. + /// A list of ruleset names for the run. Currently, this parameter takes only one Ruleset name. public let rulesetNames: [String]? /// The unique run identifier associated with this run. public let runId: String? @@ -12072,6 +12356,54 @@ extension Glue { } } + public struct GetUsageProfileRequest: AWSEncodableShape { + /// The name of the usage profile to retrieve. + public let name: String + + public init(name: String) { + self.name = name + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 255) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + } + + private enum CodingKeys: String, CodingKey { + case name = "Name" + } + } + + public struct GetUsageProfileResponse: AWSDecodableShape { + /// A ProfileConfiguration object specifying the job and session values for the profile. + public let configuration: ProfileConfiguration? + /// The date and time when the usage profile was created. + public let createdOn: Date? + /// A description of the usage profile. + public let description: String? + /// The date and time when the usage profile was last modified. + public let lastModifiedOn: Date? + /// The name of the usage profile. + public let name: String? + + public init(configuration: ProfileConfiguration? = nil, createdOn: Date? = nil, description: String? = nil, lastModifiedOn: Date? = nil, name: String? = nil) { + self.configuration = configuration + self.createdOn = createdOn + self.description = description + self.lastModifiedOn = lastModifiedOn + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case configuration = "Configuration" + case createdOn = "CreatedOn" + case description = "Description" + case lastModifiedOn = "LastModifiedOn" + case name = "Name" + } + } + public struct GetUserDefinedFunctionRequest: AWSEncodableShape { /// The ID of the Data Catalog where the function to be retrieved is located. If none is provided, the Amazon Web Services account ID is used by default. public let catalogId: String? @@ -12911,10 +13243,14 @@ extension Glue { public let executionProperty: ExecutionProperty? /// In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9. public let glueVersion: String? + /// A mode that describes how a job was created. Valid values are: SCRIPT - The job was created using the Glue Studio script editor. VISUAL - The job was created using the Glue Studio visual editor. NOTEBOOK - The job was created using an interactive sessions notebook. When the JobMode field is missing or null, SCRIPT is assigned as the default value. + public let jobMode: JobMode? /// The last point in time when this job definition was modified. public let lastModifiedOn: Date? /// This field is reserved for future use. public let logUri: String? + /// This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs. Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT. + public let maintenanceWindow: String? /// For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0 or later jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation. public let maxCapacity: Double? /// The maximum number of times to retry this job after a JobRun fails. @@ -12927,18 +13263,20 @@ extension Glue { public let notificationProperty: NotificationProperty? /// The number of workers of a defined workerType that are allocated when a job runs. public let numberOfWorkers: Int? + /// The name of an Glue usage profile associated with the job. + public let profileName: String? /// The name or Amazon Resource Name (ARN) of the IAM role associated with this job. public let role: String? /// The name of the SecurityConfiguration structure to be used with this job. public let securityConfiguration: String? /// The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository. public let sourceControlDetails: SourceControlDetails? - /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). + /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? - public init(codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand? = nil, connections: ConnectionsList? = nil, createdOn: Date? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, lastModifiedOn: Date? = nil, logUri: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, name: String? = nil, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, role: String? = nil, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { + public init(codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand? = nil, connections: ConnectionsList? = nil, createdOn: Date? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, jobMode: JobMode? = nil, lastModifiedOn: Date? = nil, logUri: String? = nil, maintenanceWindow: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, name: String? = nil, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, profileName: String? = nil, role: String? = nil, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { self.allocatedCapacity = nil self.codeGenConfigurationNodes = codeGenConfigurationNodes self.command = command @@ -12949,14 +13287,17 @@ extension Glue { self.executionClass = executionClass self.executionProperty = executionProperty self.glueVersion = glueVersion + self.jobMode = jobMode self.lastModifiedOn = lastModifiedOn self.logUri = logUri + self.maintenanceWindow = maintenanceWindow self.maxCapacity = maxCapacity self.maxRetries = maxRetries self.name = name self.nonOverridableArguments = nonOverridableArguments self.notificationProperty = notificationProperty self.numberOfWorkers = numberOfWorkers + self.profileName = profileName self.role = role self.securityConfiguration = securityConfiguration self.sourceControlDetails = sourceControlDetails @@ -12965,7 +13306,7 @@ extension Glue { } @available(*, deprecated, message: "Members allocatedCapacity have been deprecated") - public init(allocatedCapacity: Int? = nil, codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand? = nil, connections: ConnectionsList? = nil, createdOn: Date? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, lastModifiedOn: Date? = nil, logUri: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, name: String? = nil, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, role: String? = nil, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { + public init(allocatedCapacity: Int? = nil, codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand? = nil, connections: ConnectionsList? = nil, createdOn: Date? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, jobMode: JobMode? = nil, lastModifiedOn: Date? = nil, logUri: String? = nil, maintenanceWindow: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, name: String? = nil, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, profileName: String? = nil, role: String? = nil, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { self.allocatedCapacity = allocatedCapacity self.codeGenConfigurationNodes = codeGenConfigurationNodes self.command = command @@ -12976,14 +13317,17 @@ extension Glue { self.executionClass = executionClass self.executionProperty = executionProperty self.glueVersion = glueVersion + self.jobMode = jobMode self.lastModifiedOn = lastModifiedOn self.logUri = logUri + self.maintenanceWindow = maintenanceWindow self.maxCapacity = maxCapacity self.maxRetries = maxRetries self.name = name self.nonOverridableArguments = nonOverridableArguments self.notificationProperty = notificationProperty self.numberOfWorkers = numberOfWorkers + self.profileName = profileName self.role = role self.securityConfiguration = securityConfiguration self.sourceControlDetails = sourceControlDetails @@ -13002,14 +13346,17 @@ extension Glue { case executionClass = "ExecutionClass" case executionProperty = "ExecutionProperty" case glueVersion = "GlueVersion" + case jobMode = "JobMode" case lastModifiedOn = "LastModifiedOn" case logUri = "LogUri" + case maintenanceWindow = "MaintenanceWindow" case maxCapacity = "MaxCapacity" case maxRetries = "MaxRetries" case name = "Name" case nonOverridableArguments = "NonOverridableArguments" case notificationProperty = "NotificationProperty" case numberOfWorkers = "NumberOfWorkers" + case profileName = "ProfileName" case role = "Role" case securityConfiguration = "SecurityConfiguration" case sourceControlDetails = "SourceControlDetails" @@ -13130,7 +13477,7 @@ extension Glue { public let attempt: Int? /// The date and time that this job run completed. public let completedOn: Date? - /// This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for G.025X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity. + /// This field can be set for either job runs with execution class FLEX or when Auto Scaling is enabled, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for G.025X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity. public let dpuSeconds: Double? /// An error message associated with this job run. public let errorMessage: String? @@ -13142,6 +13489,8 @@ extension Glue { public let glueVersion: String? /// The ID of this job run. public let id: String? + /// A mode that describes how a job was created. Valid values are: SCRIPT - The job was created using the Glue Studio script editor. VISUAL - The job was created using the Glue Studio visual editor. NOTEBOOK - The job was created using an interactive sessions notebook. When the JobMode field is missing or null, SCRIPT is assigned as the default value. + public let jobMode: JobMode? /// The name of the job definition being used in this run. public let jobName: String? /// The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see Glue Job Run Statuses. @@ -13150,6 +13499,8 @@ extension Glue { public let lastModifiedOn: Date? /// The name of the log group for secure logging that can be server-side encrypted in Amazon CloudWatch using KMS. This name can be /aws-glue/jobs/, in which case the default encryption is NONE. If you add a role name and SecurityConfiguration name (in other words, /aws-glue/jobs-yourRoleName-yourSecurityConfigurationName/), then that security configuration is used to encrypt the log group. public let logGroupName: String? + /// This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs. Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT. + public let maintenanceWindow: String? /// For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation. public let maxCapacity: Double? /// Specifies configuration properties of a job run notification. @@ -13160,18 +13511,20 @@ extension Glue { public let predecessorRuns: [Predecessor]? /// The ID of the previous run of this job. For example, the JobRunId specified in the StartJobRun action. public let previousRunId: String? + /// The name of an Glue usage profile associated with the job run. + public let profileName: String? /// The name of the SecurityConfiguration structure to be used with this job run. public let securityConfiguration: String? /// The date and time at which this job run was started. public let startedOn: Date? - /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). + /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? /// The name of the trigger that started this job run. public let triggerName: String? /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? - public init(arguments: [String: String]? = nil, attempt: Int? = nil, completedOn: Date? = nil, dpuSeconds: Double? = nil, errorMessage: String? = nil, executionClass: ExecutionClass? = nil, executionTime: Int? = nil, glueVersion: String? = nil, id: String? = nil, jobName: String? = nil, jobRunState: JobRunState? = nil, lastModifiedOn: Date? = nil, logGroupName: String? = nil, maxCapacity: Double? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, predecessorRuns: [Predecessor]? = nil, previousRunId: String? = nil, securityConfiguration: String? = nil, startedOn: Date? = nil, timeout: Int? = nil, triggerName: String? = nil, workerType: WorkerType? = nil) { + public init(arguments: [String: String]? = nil, attempt: Int? = nil, completedOn: Date? = nil, dpuSeconds: Double? = nil, errorMessage: String? = nil, executionClass: ExecutionClass? = nil, executionTime: Int? = nil, glueVersion: String? = nil, id: String? = nil, jobMode: JobMode? = nil, jobName: String? = nil, jobRunState: JobRunState? = nil, lastModifiedOn: Date? = nil, logGroupName: String? = nil, maintenanceWindow: String? = nil, maxCapacity: Double? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, predecessorRuns: [Predecessor]? = nil, previousRunId: String? = nil, profileName: String? = nil, securityConfiguration: String? = nil, startedOn: Date? = nil, timeout: Int? = nil, triggerName: String? = nil, workerType: WorkerType? = nil) { self.allocatedCapacity = nil self.arguments = arguments self.attempt = attempt @@ -13182,15 +13535,18 @@ extension Glue { self.executionTime = executionTime self.glueVersion = glueVersion self.id = id + self.jobMode = jobMode self.jobName = jobName self.jobRunState = jobRunState self.lastModifiedOn = lastModifiedOn self.logGroupName = logGroupName + self.maintenanceWindow = maintenanceWindow self.maxCapacity = maxCapacity self.notificationProperty = notificationProperty self.numberOfWorkers = numberOfWorkers self.predecessorRuns = predecessorRuns self.previousRunId = previousRunId + self.profileName = profileName self.securityConfiguration = securityConfiguration self.startedOn = startedOn self.timeout = timeout @@ -13199,7 +13555,7 @@ extension Glue { } @available(*, deprecated, message: "Members allocatedCapacity have been deprecated") - public init(allocatedCapacity: Int? = nil, arguments: [String: String]? = nil, attempt: Int? = nil, completedOn: Date? = nil, dpuSeconds: Double? = nil, errorMessage: String? = nil, executionClass: ExecutionClass? = nil, executionTime: Int? = nil, glueVersion: String? = nil, id: String? = nil, jobName: String? = nil, jobRunState: JobRunState? = nil, lastModifiedOn: Date? = nil, logGroupName: String? = nil, maxCapacity: Double? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, predecessorRuns: [Predecessor]? = nil, previousRunId: String? = nil, securityConfiguration: String? = nil, startedOn: Date? = nil, timeout: Int? = nil, triggerName: String? = nil, workerType: WorkerType? = nil) { + public init(allocatedCapacity: Int? = nil, arguments: [String: String]? = nil, attempt: Int? = nil, completedOn: Date? = nil, dpuSeconds: Double? = nil, errorMessage: String? = nil, executionClass: ExecutionClass? = nil, executionTime: Int? = nil, glueVersion: String? = nil, id: String? = nil, jobMode: JobMode? = nil, jobName: String? = nil, jobRunState: JobRunState? = nil, lastModifiedOn: Date? = nil, logGroupName: String? = nil, maintenanceWindow: String? = nil, maxCapacity: Double? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, predecessorRuns: [Predecessor]? = nil, previousRunId: String? = nil, profileName: String? = nil, securityConfiguration: String? = nil, startedOn: Date? = nil, timeout: Int? = nil, triggerName: String? = nil, workerType: WorkerType? = nil) { self.allocatedCapacity = allocatedCapacity self.arguments = arguments self.attempt = attempt @@ -13210,15 +13566,18 @@ extension Glue { self.executionTime = executionTime self.glueVersion = glueVersion self.id = id + self.jobMode = jobMode self.jobName = jobName self.jobRunState = jobRunState self.lastModifiedOn = lastModifiedOn self.logGroupName = logGroupName + self.maintenanceWindow = maintenanceWindow self.maxCapacity = maxCapacity self.notificationProperty = notificationProperty self.numberOfWorkers = numberOfWorkers self.predecessorRuns = predecessorRuns self.previousRunId = previousRunId + self.profileName = profileName self.securityConfiguration = securityConfiguration self.startedOn = startedOn self.timeout = timeout @@ -13237,15 +13596,18 @@ extension Glue { case executionTime = "ExecutionTime" case glueVersion = "GlueVersion" case id = "Id" + case jobMode = "JobMode" case jobName = "JobName" case jobRunState = "JobRunState" case lastModifiedOn = "LastModifiedOn" case logGroupName = "LogGroupName" + case maintenanceWindow = "MaintenanceWindow" case maxCapacity = "MaxCapacity" case notificationProperty = "NotificationProperty" case numberOfWorkers = "NumberOfWorkers" case predecessorRuns = "PredecessorRuns" case previousRunId = "PreviousRunId" + case profileName = "ProfileName" case securityConfiguration = "SecurityConfiguration" case startedOn = "StartedOn" case timeout = "Timeout" @@ -13273,8 +13635,12 @@ extension Glue { public let executionProperty: ExecutionProperty? /// In Spark jobs, GlueVersion determines the versions of Apache Spark and Python that Glue available in a job. The Python version indicates the version supported for jobs of type Spark. Ray jobs should set GlueVersion to 4.0 or greater. However, the versions of Ray, Python and additional libraries available in your Ray job are determined by the Runtime parameter of the Job command. For more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9. public let glueVersion: String? + /// A mode that describes how a job was created. Valid values are: SCRIPT - The job was created using the Glue Studio script editor. VISUAL - The job was created using the Glue Studio visual editor. NOTEBOOK - The job was created using an interactive sessions notebook. When the JobMode field is missing or null, SCRIPT is assigned as the default value. + public let jobMode: JobMode? /// This field is reserved for future use. public let logUri: String? + /// This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs. Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT. + public let maintenanceWindow: String? /// For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page. For Glue version 2.0+ jobs, you cannot specify a Maximum capacity. Instead, you should specify a Worker type and the Number of workers. Do not set MaxCapacity if using WorkerType and NumberOfWorkers. The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job: When you specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation. public let maxCapacity: Double? /// The maximum number of times to retry this job if it fails. @@ -13291,12 +13657,12 @@ extension Glue { public let securityConfiguration: String? /// The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository. public let sourceControlDetails: SourceControlDetails? - /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). + /// The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours) for batch jobs. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? - public init(codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand? = nil, connections: ConnectionsList? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, logUri: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, role: String? = nil, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { + public init(codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand? = nil, connections: ConnectionsList? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, jobMode: JobMode? = nil, logUri: String? = nil, maintenanceWindow: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, role: String? = nil, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { self.allocatedCapacity = nil self.codeGenConfigurationNodes = codeGenConfigurationNodes self.command = command @@ -13306,7 +13672,9 @@ extension Glue { self.executionClass = executionClass self.executionProperty = executionProperty self.glueVersion = glueVersion + self.jobMode = jobMode self.logUri = logUri + self.maintenanceWindow = maintenanceWindow self.maxCapacity = maxCapacity self.maxRetries = maxRetries self.nonOverridableArguments = nonOverridableArguments @@ -13320,7 +13688,7 @@ extension Glue { } @available(*, deprecated, message: "Members allocatedCapacity have been deprecated") - public init(allocatedCapacity: Int? = nil, codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand? = nil, connections: ConnectionsList? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, logUri: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, role: String? = nil, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { + public init(allocatedCapacity: Int? = nil, codeGenConfigurationNodes: [String: CodeGenConfigurationNode]? = nil, command: JobCommand? = nil, connections: ConnectionsList? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, executionClass: ExecutionClass? = nil, executionProperty: ExecutionProperty? = nil, glueVersion: String? = nil, jobMode: JobMode? = nil, logUri: String? = nil, maintenanceWindow: String? = nil, maxCapacity: Double? = nil, maxRetries: Int? = nil, nonOverridableArguments: [String: String]? = nil, notificationProperty: NotificationProperty? = nil, numberOfWorkers: Int? = nil, role: String? = nil, securityConfiguration: String? = nil, sourceControlDetails: SourceControlDetails? = nil, timeout: Int? = nil, workerType: WorkerType? = nil) { self.allocatedCapacity = allocatedCapacity self.codeGenConfigurationNodes = codeGenConfigurationNodes self.command = command @@ -13330,7 +13698,9 @@ extension Glue { self.executionClass = executionClass self.executionProperty = executionProperty self.glueVersion = glueVersion + self.jobMode = jobMode self.logUri = logUri + self.maintenanceWindow = maintenanceWindow self.maxCapacity = maxCapacity self.maxRetries = maxRetries self.nonOverridableArguments = nonOverridableArguments @@ -13354,6 +13724,7 @@ extension Glue { try self.validate(self.glueVersion, name: "glueVersion", parent: name, max: 255) try self.validate(self.glueVersion, name: "glueVersion", parent: name, min: 1) try self.validate(self.glueVersion, name: "glueVersion", parent: name, pattern: "^\\w+\\.\\w+$") + try self.validate(self.maintenanceWindow, name: "maintenanceWindow", parent: name, pattern: "^(Sun|Mon|Tue|Wed|Thu|Fri|Sat):([01]?[0-9]|2[0-3])$") try self.notificationProperty?.validate(name: "\(name).notificationProperty") try self.validate(self.securityConfiguration, name: "securityConfiguration", parent: name, max: 255) try self.validate(self.securityConfiguration, name: "securityConfiguration", parent: name, min: 1) @@ -13372,7 +13743,9 @@ extension Glue { case executionClass = "ExecutionClass" case executionProperty = "ExecutionProperty" case glueVersion = "GlueVersion" + case jobMode = "JobMode" case logUri = "LogUri" + case maintenanceWindow = "MaintenanceWindow" case maxCapacity = "MaxCapacity" case maxRetries = "MaxRetries" case nonOverridableArguments = "NonOverridableArguments" @@ -14765,6 +15138,46 @@ extension Glue { } } + public struct ListUsageProfilesRequest: AWSEncodableShape { + /// The maximum number of usage profiles to return in a single response. + public let maxResults: Int? + /// A continuation token, included if this is a continuation call. + public let nextToken: String? + + public init(maxResults: Int? = nil, nextToken: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 200) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 400000) + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case nextToken = "NextToken" + } + } + + public struct ListUsageProfilesResponse: AWSDecodableShape { + /// A continuation token, present if the current list segment is not the last. + public let nextToken: String? + /// A list of usage profile (UsageProfileDefinition) objects. + public let profiles: [UsageProfileDefinition]? + + public init(nextToken: String? = nil, profiles: [UsageProfileDefinition]? = nil) { + self.nextToken = nextToken + self.profiles = profiles + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case profiles = "Profiles" + } + } + public struct ListWorkflowsRequest: AWSEncodableShape { /// The maximum size of a list to return. public let maxResults: Int? @@ -15392,6 +15805,97 @@ extension Glue { } } + public struct OAuth2ClientApplication: AWSEncodableShape & AWSDecodableShape { + /// The reference to the SaaS-side client app that is Amazon Web Services managed. + public let awsManagedClientApplicationReference: String? + /// The client application clientID if the ClientAppType is USER_MANAGED. + public let userManagedClientApplicationClientId: String? + + public init(awsManagedClientApplicationReference: String? = nil, userManagedClientApplicationClientId: String? = nil) { + self.awsManagedClientApplicationReference = awsManagedClientApplicationReference + self.userManagedClientApplicationClientId = userManagedClientApplicationClientId + } + + public func validate(name: String) throws { + try self.validate(self.awsManagedClientApplicationReference, name: "awsManagedClientApplicationReference", parent: name, max: 2048) + try self.validate(self.awsManagedClientApplicationReference, name: "awsManagedClientApplicationReference", parent: name, pattern: "^\\S+$") + try self.validate(self.userManagedClientApplicationClientId, name: "userManagedClientApplicationClientId", parent: name, max: 2048) + try self.validate(self.userManagedClientApplicationClientId, name: "userManagedClientApplicationClientId", parent: name, pattern: "^\\S+$") + } + + private enum CodingKeys: String, CodingKey { + case awsManagedClientApplicationReference = "AWSManagedClientApplicationReference" + case userManagedClientApplicationClientId = "UserManagedClientApplicationClientId" + } + } + + public struct OAuth2Properties: AWSDecodableShape { + /// The client application type. For example, AWS_MANAGED or USER_MANAGED. + public let oAuth2ClientApplication: OAuth2ClientApplication? + /// The OAuth2 grant type. For example, AUTHORIZATION_CODE, JWT_BEARER, or CLIENT_CREDENTIALS. + public let oAuth2GrantType: OAuth2GrantType? + /// The URL of the provider's authentication server, to exchange an authorization code for an access token. + public let tokenUrl: String? + /// A map of parameters that are added to the token GET request. + public let tokenUrlParametersMap: [String: String]? + + public init(oAuth2ClientApplication: OAuth2ClientApplication? = nil, oAuth2GrantType: OAuth2GrantType? = nil, tokenUrl: String? = nil, tokenUrlParametersMap: [String: String]? = nil) { + self.oAuth2ClientApplication = oAuth2ClientApplication + self.oAuth2GrantType = oAuth2GrantType + self.tokenUrl = tokenUrl + self.tokenUrlParametersMap = tokenUrlParametersMap + } + + private enum CodingKeys: String, CodingKey { + case oAuth2ClientApplication = "OAuth2ClientApplication" + case oAuth2GrantType = "OAuth2GrantType" + case tokenUrl = "TokenUrl" + case tokenUrlParametersMap = "TokenUrlParametersMap" + } + } + + public struct OAuth2PropertiesInput: AWSEncodableShape { + /// The set of properties required for the the OAuth2 AUTHORIZATION_CODE grant type. + public let authorizationCodeProperties: AuthorizationCodeProperties? + /// The client application type in the CreateConnection request. For example, AWS_MANAGED or USER_MANAGED. + public let oAuth2ClientApplication: OAuth2ClientApplication? + /// The OAuth2 grant type in the CreateConnection request. For example, AUTHORIZATION_CODE, JWT_BEARER, or CLIENT_CREDENTIALS. + public let oAuth2GrantType: OAuth2GrantType? + /// The URL of the provider's authentication server, to exchange an authorization code for an access token. + public let tokenUrl: String? + /// A map of parameters that are added to the token GET request. + public let tokenUrlParametersMap: [String: String]? + + public init(authorizationCodeProperties: AuthorizationCodeProperties? = nil, oAuth2ClientApplication: OAuth2ClientApplication? = nil, oAuth2GrantType: OAuth2GrantType? = nil, tokenUrl: String? = nil, tokenUrlParametersMap: [String: String]? = nil) { + self.authorizationCodeProperties = authorizationCodeProperties + self.oAuth2ClientApplication = oAuth2ClientApplication + self.oAuth2GrantType = oAuth2GrantType + self.tokenUrl = tokenUrl + self.tokenUrlParametersMap = tokenUrlParametersMap + } + + public func validate(name: String) throws { + try self.authorizationCodeProperties?.validate(name: "\(name).authorizationCodeProperties") + try self.oAuth2ClientApplication?.validate(name: "\(name).oAuth2ClientApplication") + try self.validate(self.tokenUrl, name: "tokenUrl", parent: name, max: 256) + try self.validate(self.tokenUrl, name: "tokenUrl", parent: name, pattern: "^(https?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$") + try self.tokenUrlParametersMap?.forEach { + try validate($0.key, name: "tokenUrlParametersMap.key", parent: name, max: 128) + try validate($0.key, name: "tokenUrlParametersMap.key", parent: name, min: 1) + try validate($0.value, name: "tokenUrlParametersMap[\"\($0.key)\"]", parent: name, max: 512) + try validate($0.value, name: "tokenUrlParametersMap[\"\($0.key)\"]", parent: name, min: 1) + } + } + + private enum CodingKeys: String, CodingKey { + case authorizationCodeProperties = "AuthorizationCodeProperties" + case oAuth2ClientApplication = "OAuth2ClientApplication" + case oAuth2GrantType = "OAuth2GrantType" + case tokenUrl = "TokenUrl" + case tokenUrlParametersMap = "TokenUrlParametersMap" + } + } + public struct OpenTableFormatInput: AWSEncodableShape { /// Specifies an IcebergInput structure that defines an Apache Iceberg metadata table. public let icebergInput: IcebergInput? @@ -15779,7 +16283,7 @@ extension Glue { } public struct PhysicalConnectionRequirements: AWSEncodableShape & AWSDecodableShape { - /// The connection's Availability Zone. This field is redundant because the specified subnet implies the Availability Zone to be used. Currently the field must be populated, but it will be deprecated in the future. + /// The connection's Availability Zone. public let availabilityZone: String? /// The security group ID list used by the connection. public let securityGroupIdList: [String]? @@ -15938,6 +16442,38 @@ extension Glue { } } + public struct ProfileConfiguration: AWSEncodableShape & AWSDecodableShape { + /// A key-value map of configuration parameters for Glue jobs. + public let jobConfiguration: [String: ConfigurationObject]? + /// A key-value map of configuration parameters for Glue sessions. + public let sessionConfiguration: [String: ConfigurationObject]? + + public init(jobConfiguration: [String: ConfigurationObject]? = nil, sessionConfiguration: [String: ConfigurationObject]? = nil) { + self.jobConfiguration = jobConfiguration + self.sessionConfiguration = sessionConfiguration + } + + public func validate(name: String) throws { + try self.jobConfiguration?.forEach { + try validate($0.key, name: "jobConfiguration.key", parent: name, max: 255) + try validate($0.key, name: "jobConfiguration.key", parent: name, min: 1) + try validate($0.key, name: "jobConfiguration.key", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try $0.value.validate(name: "\(name).jobConfiguration[\"\($0.key)\"]") + } + try self.sessionConfiguration?.forEach { + try validate($0.key, name: "sessionConfiguration.key", parent: name, max: 255) + try validate($0.key, name: "sessionConfiguration.key", parent: name, min: 1) + try validate($0.key, name: "sessionConfiguration.key", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try $0.value.validate(name: "\(name).sessionConfiguration[\"\($0.key)\"]") + } + } + + private enum CodingKeys: String, CodingKey { + case jobConfiguration = "JobConfiguration" + case sessionConfiguration = "SessionConfiguration" + } + } + public struct PropertyPredicate: AWSEncodableShape { /// The comparator used to compare this property to others. public let comparator: Comparator? @@ -18226,6 +18762,8 @@ extension Glue { public let maxCapacity: Double? /// The number of workers of a defined WorkerType to use for the session. public let numberOfWorkers: Int? + /// The name of an Glue usage profile associated with the session. + public let profileName: String? /// The code execution progress of the session. public let progress: Double? /// The name or Amazon Resource Name (ARN) of the IAM role associated with the Session. @@ -18237,7 +18775,7 @@ extension Glue { /// The type of predefined worker that is allocated when a session runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark sessions. Accepts the value Z.2X for Ray sessions. public let workerType: WorkerType? - public init(command: SessionCommand? = nil, completedOn: Date? = nil, connections: ConnectionsList? = nil, createdOn: Date? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, dpuSeconds: Double? = nil, errorMessage: String? = nil, executionTime: Double? = nil, glueVersion: String? = nil, id: String? = nil, idleTimeout: Int? = nil, maxCapacity: Double? = nil, numberOfWorkers: Int? = nil, progress: Double? = nil, role: String? = nil, securityConfiguration: String? = nil, status: SessionStatus? = nil, workerType: WorkerType? = nil) { + public init(command: SessionCommand? = nil, completedOn: Date? = nil, connections: ConnectionsList? = nil, createdOn: Date? = nil, defaultArguments: [String: String]? = nil, description: String? = nil, dpuSeconds: Double? = nil, errorMessage: String? = nil, executionTime: Double? = nil, glueVersion: String? = nil, id: String? = nil, idleTimeout: Int? = nil, maxCapacity: Double? = nil, numberOfWorkers: Int? = nil, profileName: String? = nil, progress: Double? = nil, role: String? = nil, securityConfiguration: String? = nil, status: SessionStatus? = nil, workerType: WorkerType? = nil) { self.command = command self.completedOn = completedOn self.connections = connections @@ -18252,6 +18790,7 @@ extension Glue { self.idleTimeout = idleTimeout self.maxCapacity = maxCapacity self.numberOfWorkers = numberOfWorkers + self.profileName = profileName self.progress = progress self.role = role self.securityConfiguration = securityConfiguration @@ -18274,6 +18813,7 @@ extension Glue { case idleTimeout = "IdleTimeout" case maxCapacity = "MaxCapacity" case numberOfWorkers = "NumberOfWorkers" + case profileName = "ProfileName" case progress = "Progress" case role = "Role" case securityConfiguration = "SecurityConfiguration" @@ -19221,7 +19761,7 @@ extension Glue { public let numberOfWorkers: Int? /// The name of the SecurityConfiguration structure to be used with this job run. public let securityConfiguration: String? - /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours). + /// The JobRun timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job. Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days. public let timeout: Int? /// The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler. public let workerType: WorkerType? @@ -20067,12 +20607,14 @@ extension Glue { public let tableType: String? /// A TableIdentifier structure that describes a target table for resource linking. public let targetTable: TableIdentifier? + /// A structure that contains all the information that defines the view, including the dialect or dialects for the view, and the query. + public let viewDefinition: ViewDefinitionInput? /// Included for Apache Hive compatibility. Not used in the normal course of Glue operations. public let viewExpandedText: String? /// Included for Apache Hive compatibility. Not used in the normal course of Glue operations. If the table is a VIRTUAL_VIEW, certain Athena configuration encoded in base64. public let viewOriginalText: String? - public init(description: String? = nil, lastAccessTime: Date? = nil, lastAnalyzedTime: Date? = nil, name: String, owner: String? = nil, parameters: [String: String]? = nil, partitionKeys: [Column]? = nil, retention: Int? = nil, storageDescriptor: StorageDescriptor? = nil, tableType: String? = nil, targetTable: TableIdentifier? = nil, viewExpandedText: String? = nil, viewOriginalText: String? = nil) { + public init(description: String? = nil, lastAccessTime: Date? = nil, lastAnalyzedTime: Date? = nil, name: String, owner: String? = nil, parameters: [String: String]? = nil, partitionKeys: [Column]? = nil, retention: Int? = nil, storageDescriptor: StorageDescriptor? = nil, tableType: String? = nil, targetTable: TableIdentifier? = nil, viewDefinition: ViewDefinitionInput? = nil, viewExpandedText: String? = nil, viewOriginalText: String? = nil) { self.description = description self.lastAccessTime = lastAccessTime self.lastAnalyzedTime = lastAnalyzedTime @@ -20084,6 +20626,7 @@ extension Glue { self.storageDescriptor = storageDescriptor self.tableType = tableType self.targetTable = targetTable + self.viewDefinition = viewDefinition self.viewExpandedText = viewExpandedText self.viewOriginalText = viewOriginalText } @@ -20110,6 +20653,7 @@ extension Glue { try self.storageDescriptor?.validate(name: "\(name).storageDescriptor") try self.validate(self.tableType, name: "tableType", parent: name, max: 255) try self.targetTable?.validate(name: "\(name).targetTable") + try self.viewDefinition?.validate(name: "\(name).viewDefinition") try self.validate(self.viewExpandedText, name: "viewExpandedText", parent: name, max: 409600) try self.validate(self.viewOriginalText, name: "viewOriginalText", parent: name, max: 409600) } @@ -20126,6 +20670,7 @@ extension Glue { case storageDescriptor = "StorageDescriptor" case tableType = "TableType" case targetTable = "TargetTable" + case viewDefinition = "ViewDefinition" case viewExpandedText = "ViewExpandedText" case viewOriginalText = "ViewOriginalText" } @@ -21840,6 +22385,8 @@ extension Glue { public let catalogId: String? /// The name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase. public let databaseName: String + /// A flag that can be set to true to ignore matching storage descriptor and subobject matching requirements. + public let force: Bool? /// By default, UpdateTable always creates an archived version of the table before updating it. However, if skipArchive is set to true, UpdateTable does not create the archived version. public let skipArchive: Bool? /// An updated TableInput object to define the metadata table in the catalog. @@ -21848,14 +22395,18 @@ extension Glue { public let transactionId: String? /// The version ID at which to update the table contents. public let versionId: String? + /// The operation to be performed when updating the view. + public let viewUpdateAction: ViewUpdateAction? - public init(catalogId: String? = nil, databaseName: String, skipArchive: Bool? = nil, tableInput: TableInput, transactionId: String? = nil, versionId: String? = nil) { + public init(catalogId: String? = nil, databaseName: String, force: Bool? = nil, skipArchive: Bool? = nil, tableInput: TableInput, transactionId: String? = nil, versionId: String? = nil, viewUpdateAction: ViewUpdateAction? = nil) { self.catalogId = catalogId self.databaseName = databaseName + self.force = force self.skipArchive = skipArchive self.tableInput = tableInput self.transactionId = transactionId self.versionId = versionId + self.viewUpdateAction = viewUpdateAction } public func validate(name: String) throws { @@ -21877,10 +22428,12 @@ extension Glue { private enum CodingKeys: String, CodingKey { case catalogId = "CatalogId" case databaseName = "DatabaseName" + case force = "Force" case skipArchive = "SkipArchive" case tableInput = "TableInput" case transactionId = "TransactionId" case versionId = "VersionId" + case viewUpdateAction = "ViewUpdateAction" } } @@ -21925,6 +22478,49 @@ extension Glue { } } + public struct UpdateUsageProfileRequest: AWSEncodableShape { + /// A ProfileConfiguration object specifying the job and session values for the profile. + public let configuration: ProfileConfiguration + /// A description of the usage profile. + public let description: String? + /// The name of the usage profile. + public let name: String + + public init(configuration: ProfileConfiguration, description: String? = nil, name: String) { + self.configuration = configuration + self.description = description + self.name = name + } + + public func validate(name: String) throws { + try self.configuration.validate(name: "\(name).configuration") + try self.validate(self.description, name: "description", parent: name, max: 2048) + try self.validate(self.description, name: "description", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") + try self.validate(self.name, name: "name", parent: name, max: 255) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + } + + private enum CodingKeys: String, CodingKey { + case configuration = "Configuration" + case description = "Description" + case name = "Name" + } + } + + public struct UpdateUsageProfileResponse: AWSDecodableShape { + /// The name of the usage profile that was updated. + public let name: String? + + public init(name: String? = nil) { + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case name = "Name" + } + } + public struct UpdateUserDefinedFunctionRequest: AWSEncodableShape { /// The ID of the Data Catalog where the function to be updated is located. If none is provided, the Amazon Web Services account ID is used by default. public let catalogId: String? @@ -22072,6 +22668,31 @@ extension Glue { } } + public struct UsageProfileDefinition: AWSDecodableShape { + /// The date and time when the usage profile was created. + public let createdOn: Date? + /// A description of the usage profile. + public let description: String? + /// The date and time when the usage profile was last modified. + public let lastModifiedOn: Date? + /// The name of the usage profile. + public let name: String? + + public init(createdOn: Date? = nil, description: String? = nil, lastModifiedOn: Date? = nil, name: String? = nil) { + self.createdOn = createdOn + self.description = description + self.lastModifiedOn = lastModifiedOn + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case createdOn = "CreatedOn" + case description = "Description" + case lastModifiedOn = "LastModifiedOn" + case name = "Name" + } + } + public struct UserDefinedFunction: AWSDecodableShape { /// The ID of the Data Catalog in which the function resides. public let catalogId: String? @@ -22183,6 +22804,46 @@ extension Glue { } } + public struct ViewDefinitionInput: AWSEncodableShape { + /// The definer of a view in SQL. + public let definer: String? + /// You can set this flag as true to instruct the engine not to push user-provided operations into the logical plan of the view during query planning. However, setting this flag does not guarantee that the engine will comply. Refer to the engine's documentation to understand the guarantees provided, if any. + public let isProtected: Bool? + /// A list of structures that contains the dialect of the view, and the query that defines the view. + public let representations: [ViewRepresentationInput]? + /// A list of base table ARNs that make up the view. + public let subObjects: [String]? + + public init(definer: String? = nil, isProtected: Bool? = nil, representations: [ViewRepresentationInput]? = nil, subObjects: [String]? = nil) { + self.definer = definer + self.isProtected = isProtected + self.representations = representations + self.subObjects = subObjects + } + + public func validate(name: String) throws { + try self.validate(self.definer, name: "definer", parent: name, max: 2048) + try self.validate(self.definer, name: "definer", parent: name, min: 20) + try self.representations?.forEach { + try $0.validate(name: "\(name).representations[]") + } + try self.validate(self.representations, name: "representations", parent: name, max: 10) + try self.validate(self.representations, name: "representations", parent: name, min: 1) + try self.subObjects?.forEach { + try validate($0, name: "subObjects[]", parent: name, max: 2048) + try validate($0, name: "subObjects[]", parent: name, min: 20) + } + try self.validate(self.subObjects, name: "subObjects", parent: name, max: 10) + } + + private enum CodingKeys: String, CodingKey { + case definer = "Definer" + case isProtected = "IsProtected" + case representations = "Representations" + case subObjects = "SubObjects" + } + } + public struct ViewRepresentation: AWSDecodableShape { /// The dialect of the query engine. public let dialect: ViewDialect? @@ -22190,15 +22851,18 @@ extension Glue { public let dialectVersion: String? /// Dialects marked as stale are no longer valid and must be updated before they can be queried in their respective query engines. public let isStale: Bool? - /// The expanded SQL for the view. This SQL is used by engines while processing a query on a view. Engines may perform operations during view creation to transform ViewOriginalText to ViewExpandedText. For example: Fully qualify identifiers: SELECT * from table1 → SELECT * from db1.table1 + /// The name of the connection to be used to validate the specific representation of the view. + public let validationConnection: String? + /// The expanded SQL for the view. This SQL is used by engines while processing a query on a view. Engines may perform operations during view creation to transform ViewOriginalText to ViewExpandedText. For example: Fully qualified identifiers: SELECT * from table1 -> SELECT * from db1.table1 public let viewExpandedText: String? /// The SELECT query provided by the customer during CREATE VIEW DDL. This SQL is not used during a query on a view (ViewExpandedText is used instead). ViewOriginalText is used for cases like SHOW CREATE VIEW where users want to see the original DDL command that created the view. public let viewOriginalText: String? - public init(dialect: ViewDialect? = nil, dialectVersion: String? = nil, isStale: Bool? = nil, viewExpandedText: String? = nil, viewOriginalText: String? = nil) { + public init(dialect: ViewDialect? = nil, dialectVersion: String? = nil, isStale: Bool? = nil, validationConnection: String? = nil, viewExpandedText: String? = nil, viewOriginalText: String? = nil) { self.dialect = dialect self.dialectVersion = dialectVersion self.isStale = isStale + self.validationConnection = validationConnection self.viewExpandedText = viewExpandedText self.viewOriginalText = viewOriginalText } @@ -22207,6 +22871,46 @@ extension Glue { case dialect = "Dialect" case dialectVersion = "DialectVersion" case isStale = "IsStale" + case validationConnection = "ValidationConnection" + case viewExpandedText = "ViewExpandedText" + case viewOriginalText = "ViewOriginalText" + } + } + + public struct ViewRepresentationInput: AWSEncodableShape { + /// A parameter that specifies the engine type of a specific representation. + public let dialect: ViewDialect? + /// A parameter that specifies the version of the engine of a specific representation. + public let dialectVersion: String? + /// The name of the connection to be used to validate the specific representation of the view. + public let validationConnection: String? + /// A string that represents the SQL query that describes the view with expanded resource ARNs + public let viewExpandedText: String? + /// A string that represents the original SQL query that describes the view. + public let viewOriginalText: String? + + public init(dialect: ViewDialect? = nil, dialectVersion: String? = nil, validationConnection: String? = nil, viewExpandedText: String? = nil, viewOriginalText: String? = nil) { + self.dialect = dialect + self.dialectVersion = dialectVersion + self.validationConnection = validationConnection + self.viewExpandedText = viewExpandedText + self.viewOriginalText = viewOriginalText + } + + public func validate(name: String) throws { + try self.validate(self.dialectVersion, name: "dialectVersion", parent: name, max: 255) + try self.validate(self.dialectVersion, name: "dialectVersion", parent: name, min: 1) + try self.validate(self.validationConnection, name: "validationConnection", parent: name, max: 255) + try self.validate(self.validationConnection, name: "validationConnection", parent: name, min: 1) + try self.validate(self.validationConnection, name: "validationConnection", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try self.validate(self.viewExpandedText, name: "viewExpandedText", parent: name, max: 409600) + try self.validate(self.viewOriginalText, name: "viewOriginalText", parent: name, max: 409600) + } + + private enum CodingKeys: String, CodingKey { + case dialect = "Dialect" + case dialectVersion = "DialectVersion" + case validationConnection = "ValidationConnection" case viewExpandedText = "ViewExpandedText" case viewOriginalText = "ViewOriginalText" } @@ -22433,6 +23137,7 @@ public struct GlueErrorType: AWSErrorType { case invalidStateException = "InvalidStateException" case mlTransformNotReadyException = "MLTransformNotReadyException" case noScheduleException = "NoScheduleException" + case operationNotSupportedException = "OperationNotSupportedException" case operationTimeoutException = "OperationTimeoutException" case permissionTypeMismatchException = "PermissionTypeMismatchException" case resourceNotReadyException = "ResourceNotReadyException" @@ -22514,6 +23219,8 @@ public struct GlueErrorType: AWSErrorType { public static var mlTransformNotReadyException: Self { .init(.mlTransformNotReadyException) } /// There is no applicable schedule. public static var noScheduleException: Self { .init(.noScheduleException) } + /// The operation is not available in the region. + public static var operationNotSupportedException: Self { .init(.operationNotSupportedException) } /// The operation timed out. public static var operationTimeoutException: Self { .init(.operationTimeoutException) } /// The operation timed out. diff --git a/Sources/Soto/Services/Grafana/Grafana_api.swift b/Sources/Soto/Services/Grafana/Grafana_api.swift index 86000cc517..dd928efb37 100644 --- a/Sources/Soto/Services/Grafana/Grafana_api.swift +++ b/Sources/Soto/Services/Grafana/Grafana_api.swift @@ -87,7 +87,7 @@ public struct Grafana: AWSService { // MARK: API Calls - /// Assigns a Grafana Enterprise license to a workspace. Upgrading to Grafana Enterprise incurs additional fees. For more information, see Upgrade a workspace to Grafana Enterprise. + /// Assigns a Grafana Enterprise license to a workspace. To upgrade, you must use ENTERPRISE for the licenseType, and pass in a valid Grafana Labs token for the grafanaToken. Upgrading to Grafana Enterprise incurs additional fees. For more information, see Upgrade a workspace to Grafana Enterprise. @Sendable public func associateLicense(_ input: AssociateLicenseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateLicenseResponse { return try await self.client.execute( @@ -113,7 +113,7 @@ public struct Grafana: AWSService { ) } - /// Creates a Grafana API key for the workspace. This key can be used to authenticate requests sent to the workspace's HTTP API. See https://docs.aws.amazon.com/grafana/latest/userguide/Using-Grafana-APIs.html for available APIs and example requests. + /// Creates a Grafana API key for the workspace. This key can be used to authenticate requests sent to the workspace's HTTP API. See https://docs.aws.amazon.com/grafana/latest/userguide/Using-Grafana-APIs.html for available APIs and example requests. In workspaces compatible with Grafana version 9 or above, use workspace service accounts instead of API keys. API keys will be removed in a future release. @Sendable public func createWorkspaceApiKey(_ input: CreateWorkspaceApiKeyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateWorkspaceApiKeyResponse { return try await self.client.execute( @@ -126,6 +126,32 @@ public struct Grafana: AWSService { ) } + /// Creates a service account for the workspace. A service account can be used to call Grafana HTTP APIs, and run automated workloads. After creating the service account with the correct GrafanaRole for your use case, use CreateWorkspaceServiceAccountToken to create a token that can be used to authenticate and authorize Grafana HTTP API calls. You can only create service accounts for workspaces that are compatible with Grafana version 9 and above. For more information about service accounts, see Service accounts in the Amazon Managed Grafana User Guide. For more information about the Grafana HTTP APIs, see Using Grafana HTTP APIs in the Amazon Managed Grafana User Guide. + @Sendable + public func createWorkspaceServiceAccount(_ input: CreateWorkspaceServiceAccountRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateWorkspaceServiceAccountResponse { + return try await self.client.execute( + operation: "CreateWorkspaceServiceAccount", + path: "/workspaces/{workspaceId}/serviceaccounts", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a token that can be used to authenticate and authorize Grafana HTTP API operations for the given workspace service account. The service account acts as a user for the API operations, and defines the permissions that are used by the API. When you create the service account token, you will receive a key that is used when calling Grafana APIs. Do not lose this key, as it will not be retrievable again. If you do lose the key, you can delete the token and recreate it to receive a new key. This will disable the initial key. Service accounts are only available for workspaces that are compatible with Grafana version 9 and above. + @Sendable + public func createWorkspaceServiceAccountToken(_ input: CreateWorkspaceServiceAccountTokenRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateWorkspaceServiceAccountTokenResponse { + return try await self.client.execute( + operation: "CreateWorkspaceServiceAccountToken", + path: "/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}/tokens", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes an Amazon Managed Grafana workspace. @Sendable public func deleteWorkspace(_ input: DeleteWorkspaceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteWorkspaceResponse { @@ -139,7 +165,7 @@ public struct Grafana: AWSService { ) } - /// Deletes a Grafana API key for the workspace. + /// Deletes a Grafana API key for the workspace. In workspaces compatible with Grafana version 9 or above, use workspace service accounts instead of API keys. API keys will be removed in a future release. @Sendable public func deleteWorkspaceApiKey(_ input: DeleteWorkspaceApiKeyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteWorkspaceApiKeyResponse { return try await self.client.execute( @@ -152,6 +178,32 @@ public struct Grafana: AWSService { ) } + /// Deletes a workspace service account from the workspace. This will delete any tokens created for the service account, as well. If the tokens are currently in use, the will fail to authenticate / authorize after they are deleted. Service accounts are only available for workspaces that are compatible with Grafana version 9 and above. + @Sendable + public func deleteWorkspaceServiceAccount(_ input: DeleteWorkspaceServiceAccountRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteWorkspaceServiceAccountResponse { + return try await self.client.execute( + operation: "DeleteWorkspaceServiceAccount", + path: "/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes a token for the workspace service account. This will disable the key associated with the token. If any automation is currently using the key, it will no longer be authenticated or authorized to perform actions with the Grafana HTTP APIs. Service accounts are only available for workspaces that are compatible with Grafana version 9 and above. + @Sendable + public func deleteWorkspaceServiceAccountToken(_ input: DeleteWorkspaceServiceAccountTokenRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteWorkspaceServiceAccountTokenResponse { + return try await self.client.execute( + operation: "DeleteWorkspaceServiceAccountToken", + path: "/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}/tokens/{tokenId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Displays information about one Amazon Managed Grafana workspace. @Sendable public func describeWorkspace(_ input: DescribeWorkspaceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeWorkspaceResponse { @@ -243,6 +295,32 @@ public struct Grafana: AWSService { ) } + /// Returns a list of tokens for a workspace service account. This does not return the key for each token. You cannot access keys after they are created. To create a new key, delete the token and recreate it. Service accounts are only available for workspaces that are compatible with Grafana version 9 and above. + @Sendable + public func listWorkspaceServiceAccountTokens(_ input: ListWorkspaceServiceAccountTokensRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWorkspaceServiceAccountTokensResponse { + return try await self.client.execute( + operation: "ListWorkspaceServiceAccountTokens", + path: "/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}/tokens", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of service accounts for a workspace. Service accounts are only available for workspaces that are compatible with Grafana version 9 and above. + @Sendable + public func listWorkspaceServiceAccounts(_ input: ListWorkspaceServiceAccountsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWorkspaceServiceAccountsResponse { + return try await self.client.execute( + operation: "ListWorkspaceServiceAccounts", + path: "/workspaces/{workspaceId}/serviceaccounts", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a list of Amazon Managed Grafana workspaces in the account, with some information about each workspace. For more complete information about one workspace, use DescribeWorkspace. @Sendable public func listWorkspaces(_ input: ListWorkspacesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWorkspacesResponse { @@ -386,6 +464,44 @@ extension Grafana { ) } + /// Returns a list of tokens for a workspace service account. This does not return the key for each token. You cannot access keys after they are created. To create a new key, delete the token and recreate it. Service accounts are only available for workspaces that are compatible with Grafana version 9 and above. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listWorkspaceServiceAccountTokensPaginator( + _ input: ListWorkspaceServiceAccountTokensRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listWorkspaceServiceAccountTokens, + inputKey: \ListWorkspaceServiceAccountTokensRequest.nextToken, + outputKey: \ListWorkspaceServiceAccountTokensResponse.nextToken, + logger: logger + ) + } + + /// Returns a list of service accounts for a workspace. Service accounts are only available for workspaces that are compatible with Grafana version 9 and above. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listWorkspaceServiceAccountsPaginator( + _ input: ListWorkspaceServiceAccountsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listWorkspaceServiceAccounts, + inputKey: \ListWorkspaceServiceAccountsRequest.nextToken, + outputKey: \ListWorkspaceServiceAccountsResponse.nextToken, + logger: logger + ) + } + /// Returns a list of Amazon Managed Grafana workspaces in the account, with some information about each workspace. For more complete information about one workspace, use DescribeWorkspace. /// Return PaginatorSequence for operation. /// @@ -429,6 +545,27 @@ extension Grafana.ListVersionsRequest: AWSPaginateToken { } } +extension Grafana.ListWorkspaceServiceAccountTokensRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Grafana.ListWorkspaceServiceAccountTokensRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + serviceAccountId: self.serviceAccountId, + workspaceId: self.workspaceId + ) + } +} + +extension Grafana.ListWorkspaceServiceAccountsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Grafana.ListWorkspaceServiceAccountsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + workspaceId: self.workspaceId + ) + } +} + extension Grafana.ListWorkspacesRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Grafana.ListWorkspacesRequest { return .init( diff --git a/Sources/Soto/Services/Grafana/Grafana_shapes.swift b/Sources/Soto/Services/Grafana/Grafana_shapes.swift index 19a9a09366..fa86328611 100644 --- a/Sources/Soto/Services/Grafana/Grafana_shapes.swift +++ b/Sources/Soto/Services/Grafana/Grafana_shapes.swift @@ -252,7 +252,7 @@ extension Grafana { } public struct AssociateLicenseRequest: AWSEncodableShape { - /// A token from Grafana Labs that ties your Amazon Web Services account with a Grafana Labs account. For more information, see Register with Grafana Labs. + /// A token from Grafana Labs that ties your Amazon Web Services account with a Grafana Labs account. For more information, see Link your account with Grafana Labs. public let grafanaToken: String? /// The type of license to associate with the workspace. Amazon Managed Grafana workspaces no longer support Grafana Enterprise free trials. public let licenseType: LicenseType @@ -349,7 +349,7 @@ extension Grafana { public struct CreateWorkspaceApiKeyRequest: AWSEncodableShape { /// Specifies the name of the key. Keynames must be unique to the workspace. public let keyName: String - /// Specifies the permission level of the key. Valid values: VIEWER|EDITOR|ADMIN + /// Specifies the permission level of the key. Valid values: ADMIN|EDITOR|VIEWER public let keyRole: String /// Specifies the time in seconds until the key expires. Keys can be valid for up to 30 days. public let secondsToLive: Int @@ -415,7 +415,7 @@ extension Grafana { public let clientToken: String? /// The configuration string for the workspace that you create. For more information about the format and configuration options available, see Working in your Grafana workspace. public let configuration: String? - /// Specifies the version of Grafana to support in the new workspace. If not specified, defaults to the latest version (for example, 9.4). To get a list of supported versions, use the ListVersions operation. + /// Specifies the version of Grafana to support in the new workspace. If not specified, defaults to the latest version (for example, 10.4). To get a list of supported versions, use the ListVersions operation. public let grafanaVersion: String? /// Configuration for network access to your workspace. When this is configured, only listed IP addresses and VPC endpoints will be able to access your workspace. Standard Grafana authentication and authorization will still be required. If this is not configured, or is removed, then all IP addresses and VPC endpoints will be allowed. Standard Grafana authentication and authorization will still be required. public let networkAccessControl: NetworkAccessConfiguration? @@ -518,6 +518,124 @@ extension Grafana { } } + public struct CreateWorkspaceServiceAccountRequest: AWSEncodableShape { + /// The permission level to use for this service account. For more information about the roles and the permissions each has, see User roles in the Amazon Managed Grafana User Guide. + public let grafanaRole: Role + /// A name for the service account. The name must be unique within the workspace, as it determines the ID associated with the service account. + public let name: String + /// The ID of the workspace within which to create the service account. + public let workspaceId: String + + public init(grafanaRole: Role, name: String, workspaceId: String) { + self.grafanaRole = grafanaRole + self.name = name + self.workspaceId = workspaceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.grafanaRole, forKey: .grafanaRole) + try container.encode(self.name, forKey: .name) + request.encodePath(self.workspaceId, key: "workspaceId") + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 128) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.workspaceId, name: "workspaceId", parent: name, pattern: "^g-[0-9a-f]{10}$") + } + + private enum CodingKeys: String, CodingKey { + case grafanaRole = "grafanaRole" + case name = "name" + } + } + + public struct CreateWorkspaceServiceAccountResponse: AWSDecodableShape { + /// The permission level given to the service account. + public let grafanaRole: Role + /// The ID of the service account. + public let id: String + /// The name of the service account. + public let name: String + /// The workspace with which the service account is associated. + public let workspaceId: String + + public init(grafanaRole: Role, id: String, name: String, workspaceId: String) { + self.grafanaRole = grafanaRole + self.id = id + self.name = name + self.workspaceId = workspaceId + } + + private enum CodingKeys: String, CodingKey { + case grafanaRole = "grafanaRole" + case id = "id" + case name = "name" + case workspaceId = "workspaceId" + } + } + + public struct CreateWorkspaceServiceAccountTokenRequest: AWSEncodableShape { + /// A name for the token to create. + public let name: String + /// Sets how long the token will be valid, in seconds. You can set the time up to 30 days in the future. + public let secondsToLive: Int + /// The ID of the service account for which to create a token. + public let serviceAccountId: String + /// The ID of the workspace the service account resides within. + public let workspaceId: String + + public init(name: String, secondsToLive: Int, serviceAccountId: String, workspaceId: String) { + self.name = name + self.secondsToLive = secondsToLive + self.serviceAccountId = serviceAccountId + self.workspaceId = workspaceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.name, forKey: .name) + try container.encode(self.secondsToLive, forKey: .secondsToLive) + request.encodePath(self.serviceAccountId, key: "serviceAccountId") + request.encodePath(self.workspaceId, key: "workspaceId") + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 128) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.workspaceId, name: "workspaceId", parent: name, pattern: "^g-[0-9a-f]{10}$") + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case secondsToLive = "secondsToLive" + } + } + + public struct CreateWorkspaceServiceAccountTokenResponse: AWSDecodableShape { + /// The ID of the service account where the token was created. + public let serviceAccountId: String + /// Information about the created token, including the key. Be sure to store the key securely. + public let serviceAccountToken: ServiceAccountTokenSummaryWithKey + /// The ID of the workspace where the token was created. + public let workspaceId: String + + public init(serviceAccountId: String, serviceAccountToken: ServiceAccountTokenSummaryWithKey, workspaceId: String) { + self.serviceAccountId = serviceAccountId + self.serviceAccountToken = serviceAccountToken + self.workspaceId = workspaceId + } + + private enum CodingKeys: String, CodingKey { + case serviceAccountId = "serviceAccountId" + case serviceAccountToken = "serviceAccountToken" + case workspaceId = "workspaceId" + } + } + public struct DeleteWorkspaceApiKeyRequest: AWSEncodableShape { /// The name of the API key to delete. public let keyName: String @@ -596,6 +714,98 @@ extension Grafana { } } + public struct DeleteWorkspaceServiceAccountRequest: AWSEncodableShape { + /// The ID of the service account to delete. + public let serviceAccountId: String + /// The ID of the workspace where the service account resides. + public let workspaceId: String + + public init(serviceAccountId: String, workspaceId: String) { + self.serviceAccountId = serviceAccountId + self.workspaceId = workspaceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.serviceAccountId, key: "serviceAccountId") + request.encodePath(self.workspaceId, key: "workspaceId") + } + + public func validate(name: String) throws { + try self.validate(self.workspaceId, name: "workspaceId", parent: name, pattern: "^g-[0-9a-f]{10}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteWorkspaceServiceAccountResponse: AWSDecodableShape { + /// The ID of the service account deleted. + public let serviceAccountId: String + /// The ID of the workspace where the service account was deleted. + public let workspaceId: String + + public init(serviceAccountId: String, workspaceId: String) { + self.serviceAccountId = serviceAccountId + self.workspaceId = workspaceId + } + + private enum CodingKeys: String, CodingKey { + case serviceAccountId = "serviceAccountId" + case workspaceId = "workspaceId" + } + } + + public struct DeleteWorkspaceServiceAccountTokenRequest: AWSEncodableShape { + /// The ID of the service account from which to delete the token. + public let serviceAccountId: String + /// The ID of the token to delete. + public let tokenId: String + /// The ID of the workspace from which to delete the token. + public let workspaceId: String + + public init(serviceAccountId: String, tokenId: String, workspaceId: String) { + self.serviceAccountId = serviceAccountId + self.tokenId = tokenId + self.workspaceId = workspaceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.serviceAccountId, key: "serviceAccountId") + request.encodePath(self.tokenId, key: "tokenId") + request.encodePath(self.workspaceId, key: "workspaceId") + } + + public func validate(name: String) throws { + try self.validate(self.workspaceId, name: "workspaceId", parent: name, pattern: "^g-[0-9a-f]{10}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteWorkspaceServiceAccountTokenResponse: AWSDecodableShape { + /// The ID of the service account where the token was deleted. + public let serviceAccountId: String + /// The ID of the token that was deleted. + public let tokenId: String + /// The ID of the workspace where the token was deleted. + public let workspaceId: String + + public init(serviceAccountId: String, tokenId: String, workspaceId: String) { + self.serviceAccountId = serviceAccountId + self.tokenId = tokenId + self.workspaceId = workspaceId + } + + private enum CodingKeys: String, CodingKey { + case serviceAccountId = "serviceAccountId" + case tokenId = "tokenId" + case workspaceId = "workspaceId" + } + } + public struct DescribeWorkspaceAuthenticationRequest: AWSEncodableShape { /// The ID of the workspace to return authentication information about. public let workspaceId: String @@ -878,6 +1088,114 @@ extension Grafana { } } + public struct ListWorkspaceServiceAccountTokensRequest: AWSEncodableShape { + /// The maximum number of tokens to include in the results. + public let maxResults: Int? + /// The token for the next set of service accounts to return. (You receive this token from a previous ListWorkspaceServiceAccountTokens operation.) + public let nextToken: String? + /// The ID of the service account for which to return tokens. + public let serviceAccountId: String + /// The ID of the workspace for which to return tokens. + public let workspaceId: String + + public init(maxResults: Int? = nil, nextToken: String? = nil, serviceAccountId: String, workspaceId: String) { + self.maxResults = maxResults + self.nextToken = nextToken + self.serviceAccountId = serviceAccountId + self.workspaceId = workspaceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodePath(self.serviceAccountId, key: "serviceAccountId") + request.encodePath(self.workspaceId, key: "workspaceId") + } + + public func validate(name: String) throws { + try self.validate(self.workspaceId, name: "workspaceId", parent: name, pattern: "^g-[0-9a-f]{10}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListWorkspaceServiceAccountTokensResponse: AWSDecodableShape { + /// The token to use when requesting the next set of service accounts. + public let nextToken: String? + /// The ID of the service account where the tokens reside. + public let serviceAccountId: String + /// An array of structures containing information about the tokens. + public let serviceAccountTokens: [ServiceAccountTokenSummary] + /// The ID of the workspace where the tokens reside. + public let workspaceId: String + + public init(nextToken: String? = nil, serviceAccountId: String, serviceAccountTokens: [ServiceAccountTokenSummary], workspaceId: String) { + self.nextToken = nextToken + self.serviceAccountId = serviceAccountId + self.serviceAccountTokens = serviceAccountTokens + self.workspaceId = workspaceId + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case serviceAccountId = "serviceAccountId" + case serviceAccountTokens = "serviceAccountTokens" + case workspaceId = "workspaceId" + } + } + + public struct ListWorkspaceServiceAccountsRequest: AWSEncodableShape { + /// The maximum number of service accounts to include in the results. + public let maxResults: Int? + /// The token for the next set of service accounts to return. (You receive this token from a previous ListWorkspaceServiceAccounts operation.) + public let nextToken: String? + /// The workspace for which to list service accounts. + public let workspaceId: String + + public init(maxResults: Int? = nil, nextToken: String? = nil, workspaceId: String) { + self.maxResults = maxResults + self.nextToken = nextToken + self.workspaceId = workspaceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodePath(self.workspaceId, key: "workspaceId") + } + + public func validate(name: String) throws { + try self.validate(self.workspaceId, name: "workspaceId", parent: name, pattern: "^g-[0-9a-f]{10}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListWorkspaceServiceAccountsResponse: AWSDecodableShape { + /// The token to use when requesting the next set of service accounts. + public let nextToken: String? + /// An array of structures containing information about the service accounts. + public let serviceAccounts: [ServiceAccountSummary] + /// The workspace to which the service accounts are associated. + public let workspaceId: String + + public init(nextToken: String? = nil, serviceAccounts: [ServiceAccountSummary], workspaceId: String) { + self.nextToken = nextToken + self.serviceAccounts = serviceAccounts + self.workspaceId = workspaceId + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case serviceAccounts = "serviceAccounts" + case workspaceId = "workspaceId" + } + } + public struct ListWorkspacesRequest: AWSEncodableShape { /// The maximum number of workspaces to include in the results. public let maxResults: Int? @@ -1045,6 +1363,81 @@ extension Grafana { } } + public struct ServiceAccountSummary: AWSDecodableShape { + /// The role of the service account, which sets the permission level used when calling Grafana APIs. + public let grafanaRole: Role + /// The unique ID of the service account. + public let id: String + /// Returns true if the service account is disabled. Service accounts can be disabled and enabled in the Amazon Managed Grafana console. + public let isDisabled: String + /// The name of the service account. + public let name: String + + public init(grafanaRole: Role, id: String, isDisabled: String, name: String) { + self.grafanaRole = grafanaRole + self.id = id + self.isDisabled = isDisabled + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case grafanaRole = "grafanaRole" + case id = "id" + case isDisabled = "isDisabled" + case name = "name" + } + } + + public struct ServiceAccountTokenSummary: AWSDecodableShape { + /// When the service account token was created. + public let createdAt: Date + /// When the service account token will expire. + public let expiresAt: Date + /// The unique ID of the service account token. + public let id: String + /// The last time the token was used to authorize a Grafana HTTP API. + public let lastUsedAt: Date? + /// The name of the service account token. + public let name: String + + public init(createdAt: Date, expiresAt: Date, id: String, lastUsedAt: Date? = nil, name: String) { + self.createdAt = createdAt + self.expiresAt = expiresAt + self.id = id + self.lastUsedAt = lastUsedAt + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "createdAt" + case expiresAt = "expiresAt" + case id = "id" + case lastUsedAt = "lastUsedAt" + case name = "name" + } + } + + public struct ServiceAccountTokenSummaryWithKey: AWSDecodableShape { + /// The unique ID of the service account token. + public let id: String + /// The key for the service account token. Used when making calls to the Grafana HTTP APIs to authenticate and authorize the requests. + public let key: String + /// The name of the service account token. + public let name: String + + public init(id: String, key: String, name: String) { + self.id = id + self.key = key + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case id = "id" + case key = "key" + case name = "name" + } + } + public struct TagResourceRequest: AWSEncodableShape { /// The ARN of the resource the tag is associated with. public let resourceArn: String @@ -1473,7 +1866,7 @@ extension Grafana { public let freeTrialConsumed: Bool? /// If this workspace is currently in the free trial period for Grafana Enterprise, this value specifies when that free trial ends. Amazon Managed Grafana workspaces no longer support Grafana Enterprise free trials. public let freeTrialExpiration: Date? - /// The token that ties this workspace to a Grafana Labs account. For more information, see Register with Grafana Labs. + /// The token that ties this workspace to a Grafana Labs account. For more information, see Link your account with Grafana Labs. public let grafanaToken: String? /// The version of Grafana supported in this workspace. public let grafanaVersion: String @@ -1574,7 +1967,7 @@ extension Grafana { public let description: String? /// The URL endpoint to use to access the Grafana console in the workspace. public let endpoint: String - /// The token that ties this workspace to a Grafana Labs account. For more information, see Register with Grafana Labs. + /// The token that ties this workspace to a Grafana Labs account. For more information, see Link your account with Grafana Labs. public let grafanaToken: String? /// The Grafana version that the workspace is running. public let grafanaVersion: String diff --git a/Sources/Soto/Services/GreengrassV2/GreengrassV2_api.swift b/Sources/Soto/Services/GreengrassV2/GreengrassV2_api.swift index 69d1b95dd3..7e0646967c 100644 --- a/Sources/Soto/Services/GreengrassV2/GreengrassV2_api.swift +++ b/Sources/Soto/Services/GreengrassV2/GreengrassV2_api.swift @@ -143,7 +143,7 @@ public struct GreengrassV2: AWSService { ) } - /// Creates a component. Components are software that run on Greengrass core devices. After you develop and test a component on your core device, you can use this operation to upload your component to IoT Greengrass. Then, you can deploy the component to other core devices. You can use this operation to do the following: Create components from recipes Create a component from a recipe, which is a file that defines the component's metadata, parameters, dependencies, lifecycle, artifacts, and platform capability. For more information, see IoT Greengrass component recipe reference in the IoT Greengrass V2 Developer Guide. To create a component from a recipe, specify inlineRecipe when you call this operation. Create components from Lambda functions Create a component from an Lambda function that runs on IoT Greengrass. This creates a recipe and artifacts from the Lambda function's deployment package. You can use this operation to migrate Lambda functions from IoT Greengrass V1 to IoT Greengrass V2. This function only accepts Lambda functions that use the following runtimes: Python 2.7 – python2.7 Python 3.7 – python3.7 Python 3.8 – python3.8 Python 3.9 – python3.9 Java 8 – java8 Java 11 – java11 Node.js 10 – nodejs10.x Node.js 12 – nodejs12.x Node.js 14 – nodejs14.x To create a component from a Lambda function, specify lambdaFunction when you call this operation. IoT Greengrass currently supports Lambda functions on only Linux core devices. + /// Creates a component. Components are software that run on Greengrass core devices. After you develop and test a component on your core device, you can use this operation to upload your component to IoT Greengrass. Then, you can deploy the component to other core devices. You can use this operation to do the following: Create components from recipes Create a component from a recipe, which is a file that defines the component's metadata, parameters, dependencies, lifecycle, artifacts, and platform capability. For more information, see IoT Greengrass component recipe reference in the IoT Greengrass V2 Developer Guide. To create a component from a recipe, specify inlineRecipe when you call this operation. Create components from Lambda functions Create a component from an Lambda function that runs on IoT Greengrass. This creates a recipe and artifacts from the Lambda function's deployment package. You can use this operation to migrate Lambda functions from IoT Greengrass V1 to IoT Greengrass V2. This function accepts Lambda functions in all supported versions of Python, Node.js, and Java runtimes. IoT Greengrass doesn't apply any additional restrictions on deprecated Lambda runtime versions. To create a component from a Lambda function, specify lambdaFunction when you call this operation. IoT Greengrass currently supports Lambda functions on only Linux core devices. @Sendable public func createComponentVersion(_ input: CreateComponentVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateComponentVersionResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/GreengrassV2/GreengrassV2_shapes.swift b/Sources/Soto/Services/GreengrassV2/GreengrassV2_shapes.swift index 3482f73902..ac2e60cbcf 100644 --- a/Sources/Soto/Services/GreengrassV2/GreengrassV2_shapes.swift +++ b/Sources/Soto/Services/GreengrassV2/GreengrassV2_shapes.swift @@ -123,6 +123,12 @@ extension GreengrassV2 { public var description: String { return self.rawValue } } + public enum IotEndpointType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case fips = "fips" + case standard = "standard" + public var description: String { return self.rawValue } + } + public enum LambdaEventSourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case iotCore = "IOT_CORE" case pubSub = "PUB_SUB" @@ -153,6 +159,12 @@ extension GreengrassV2 { public var description: String { return self.rawValue } } + public enum S3EndpointType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case global = "GLOBAL" + case regional = "REGIONAL" + public var description: String { return self.rawValue } + } + public enum VendorGuidance: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "ACTIVE" case deleted = "DELETED" @@ -500,13 +512,13 @@ extension GreengrassV2 { public struct ComponentDeploymentSpecification: AWSEncodableShape & AWSDecodableShape { /// The version of the component. - public let componentVersion: String? + public let componentVersion: String /// The configuration updates to deploy for the component. You can define reset updates and merge updates. A reset updates the keys that you specify to the default configuration for the component. A merge updates the core device's component configuration with the keys and values that you specify. The IoT Greengrass Core software applies reset updates before it applies merge updates. For more information, see Update component configurations in the IoT Greengrass V2 Developer Guide. public let configurationUpdate: ComponentConfigurationUpdate? /// The system user and group that the IoT Greengrass Core software uses to run component processes on the core device. If you omit this parameter, the IoT Greengrass Core software uses the system user and group that you configure for the core device. For more information, see Configure the user and group that run components in the IoT Greengrass V2 Developer Guide. public let runWith: ComponentRunWith? - public init(componentVersion: String? = nil, configurationUpdate: ComponentConfigurationUpdate? = nil, runWith: ComponentRunWith? = nil) { + public init(componentVersion: String, configurationUpdate: ComponentConfigurationUpdate? = nil, runWith: ComponentRunWith? = nil) { self.componentVersion = componentVersion self.configurationUpdate = configurationUpdate self.runWith = runWith @@ -1260,10 +1272,16 @@ extension GreengrassV2 { public let arn: String /// The name of the artifact. You can use the GetComponent operation to download the component recipe, which includes the URI of the artifact. The artifact name is the section of the URI after the scheme. For example, in the artifact URI greengrass:SomeArtifact.zip, the artifact name is SomeArtifact.zip. public let artifactName: String + /// Determines if the Amazon S3 URL returned is a FIPS pre-signed URL endpoint. Specify fips if you want the returned Amazon S3 pre-signed URL to point to an Amazon S3 FIPS endpoint. If you don't specify a value, the default is standard. + public let iotEndpointType: IotEndpointType? + /// Specifies the endpoint to use when getting Amazon S3 pre-signed URLs. All Amazon Web Services Regions except US East (N. Virginia) use REGIONAL in all cases. In the US East (N. Virginia) Region the default is GLOBAL, but you can change it to REGIONAL with this parameter. + public let s3EndpointType: S3EndpointType? - public init(arn: String, artifactName: String) { + public init(arn: String, artifactName: String, iotEndpointType: IotEndpointType? = nil, s3EndpointType: S3EndpointType? = nil) { self.arn = arn self.artifactName = artifactName + self.iotEndpointType = iotEndpointType + self.s3EndpointType = s3EndpointType } public func encode(to encoder: Encoder) throws { @@ -1271,6 +1289,8 @@ extension GreengrassV2 { _ = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.arn, key: "arn") request.encodePath(self.artifactName, key: "artifactName") + request.encodeHeader(self.iotEndpointType, key: "x-amz-iot-endpoint-type") + request.encodeQuery(self.s3EndpointType, key: "s3EndpointType") } public func validate(name: String) throws { @@ -1506,7 +1526,7 @@ extension GreengrassV2 { public let componentVersion: String? /// Whether or not the component is a root component. public let isRoot: Bool? - /// The most recent deployment source that brought the component to the Greengrass core device. For a thing group deployment or thing deployment, the source will be the The ID of the deployment. and for local deployments it will be LOCAL. Any deployment will attempt to reinstall currently broken components on the device, which will update the last installation source. + /// The most recent deployment source that brought the component to the Greengrass core device. For a thing group deployment or thing deployment, the source will be the ID of the last deployment that contained the component. For local deployments it will be LOCAL. Any deployment will attempt to reinstall currently broken components on the device, which will update the last installation source. public let lastInstallationSource: String? /// The last time the Greengrass core device sent a message containing a component's state to the Amazon Web Services Cloud. A component does not need to see a state change for this field to update. public let lastReportedTimestamp: Date? @@ -2093,7 +2113,7 @@ extension GreengrassV2 { public struct ListDeploymentsRequest: AWSEncodableShape { /// The filter for the list of deployments. Choose one of the following options: ALL – The list includes all deployments. LATEST_ONLY – The list includes only the latest revision of each deployment. Default: LATEST_ONLY public let historyFilter: DeploymentHistoryFilter? - /// The maximum number of results to be returned per paginated request. + /// The maximum number of results to be returned per paginated request. Default: 50 public let maxResults: Int? /// The token to be used for the next set of paginated results. public let nextToken: String? diff --git a/Sources/Soto/Services/GuardDuty/GuardDuty_api.swift b/Sources/Soto/Services/GuardDuty/GuardDuty_api.swift index 876c52ef36..6dfb2e1564 100644 --- a/Sources/Soto/Services/GuardDuty/GuardDuty_api.swift +++ b/Sources/Soto/Services/GuardDuty/GuardDuty_api.swift @@ -164,6 +164,19 @@ public struct GuardDuty: AWSService { ) } + /// Creates a new Malware Protection plan for the protected resource. When you create a Malware Protection plan, the Amazon Web Services service terms for GuardDuty Malware Protection apply. For more information, see Amazon Web Services service terms for GuardDuty Malware Protection. + @Sendable + public func createMalwareProtectionPlan(_ input: CreateMalwareProtectionPlanRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMalwareProtectionPlanResponse { + return try await self.client.execute( + operation: "CreateMalwareProtectionPlan", + path: "/malware-protection-plan", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates member accounts of the current Amazon Web Services account by specifying a list of Amazon Web Services account IDs. This step is a prerequisite for managing the associated member accounts either by invitation or through an organization. As a delegated administrator, using CreateMembers will enable GuardDuty in the added member accounts, with the exception of the organization delegated administrator account. A delegated administrator must enable GuardDuty prior to being added as a member. When you use CreateMembers as an Organizations delegated administrator, GuardDuty applies your organization's auto-enable settings to the member accounts in this request, irrespective of the accounts being new or existing members. For more information about the existing auto-enable settings for your organization, see DescribeOrganizationConfiguration. If you disassociate a member account that was added by invitation, the member account details obtained from this API, including the associated email addresses, will be retained. This is done so that the delegated administrator can invoke the InviteMembers API without the need to invoke the CreateMembers API again. To remove the details associated with a member account, the delegated administrator must invoke the DeleteMembers API. When the member accounts added through Organizations are later disassociated, you (administrator) can't invite them by calling the InviteMembers API. You can create an association with these member accounts again only by calling the CreateMembers API. @Sendable public func createMembers(_ input: CreateMembersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMembersResponse { @@ -281,6 +294,19 @@ public struct GuardDuty: AWSService { ) } + /// Deletes the Malware Protection plan ID associated with the Malware Protection plan resource. Use this API only when you no longer want to protect the resource associated with this Malware Protection plan ID. + @Sendable + public func deleteMalwareProtectionPlan(_ input: DeleteMalwareProtectionPlanRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteMalwareProtectionPlan", + path: "/malware-protection-plan/{MalwareProtectionPlanId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes GuardDuty member accounts (to the current GuardDuty administrator account) specified by the account IDs. With autoEnableOrganizationMembers configuration for your organization set to ALL, you'll receive an error if you attempt to disable GuardDuty for a member account in your organization. @Sendable public func deleteMembers(_ input: DeleteMembersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteMembersResponse { @@ -529,6 +555,19 @@ public struct GuardDuty: AWSService { ) } + /// Retrieves the Malware Protection plan details associated with a Malware Protection plan ID. + @Sendable + public func getMalwareProtectionPlan(_ input: GetMalwareProtectionPlanRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetMalwareProtectionPlanResponse { + return try await self.client.execute( + operation: "GetMalwareProtectionPlan", + path: "/malware-protection-plan/{MalwareProtectionPlanId}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns the details of the malware scan settings. There might be regional differences because some data sources might not be available in all the Amazon Web Services Regions where GuardDuty is presently supported. For more information, see Regions and endpoints. @Sendable public func getMalwareScanSettings(_ input: GetMalwareScanSettingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetMalwareScanSettingsResponse { @@ -724,6 +763,19 @@ public struct GuardDuty: AWSService { ) } + /// Lists the Malware Protection plan IDs associated with the protected resources in your Amazon Web Services account. + @Sendable + public func listMalwareProtectionPlans(_ input: ListMalwareProtectionPlansRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListMalwareProtectionPlansResponse { + return try await self.client.execute( + operation: "ListMalwareProtectionPlans", + path: "/malware-protection-plan", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists details about all member accounts for the current GuardDuty administrator account. @Sendable public func listMembers(_ input: ListMembersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListMembersResponse { @@ -919,6 +971,19 @@ public struct GuardDuty: AWSService { ) } + /// Updates an existing Malware Protection plan resource. + @Sendable + public func updateMalwareProtectionPlan(_ input: UpdateMalwareProtectionPlanRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "UpdateMalwareProtectionPlan", + path: "/malware-protection-plan/{MalwareProtectionPlanId}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates the malware scan settings. There might be regional differences because some data sources might not be available in all the Amazon Web Services Regions where GuardDuty is presently supported. For more information, see Regions and endpoints. @Sendable public func updateMalwareScanSettings(_ input: UpdateMalwareScanSettingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateMalwareScanSettingsResponse { diff --git a/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift b/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift index 519c51d8c7..cd55997563 100644 --- a/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift +++ b/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift @@ -221,6 +221,19 @@ extension GuardDuty { public var description: String { return self.rawValue } } + public enum MalwareProtectionPlanStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case error = "ERROR" + case warning = "WARNING" + public var description: String { return self.rawValue } + } + + public enum MalwareProtectionPlanTaggingActionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum ManagementType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case autoManaged = "AUTO_MANAGED" case disabled = "DISABLED" @@ -1492,6 +1505,61 @@ extension GuardDuty { } } + public struct CreateMalwareProtectionPlanRequest: AWSEncodableShape { + /// Information about whether the tags will be added to the S3 object after scanning. + public let actions: MalwareProtectionPlanActions? + /// The idempotency token for the create request. + public let clientToken: String? + /// Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource. + public let protectedResource: CreateProtectedResource? + /// IAM role with permissions required to scan and add tags to the associated protected resource. + public let role: String? + /// Tags added to the Malware Protection plan resource. + public let tags: [String: String]? + + public init(actions: MalwareProtectionPlanActions? = nil, clientToken: String? = CreateMalwareProtectionPlanRequest.idempotencyToken(), protectedResource: CreateProtectedResource? = nil, role: String? = nil, tags: [String: String]? = nil) { + self.actions = actions + self.clientToken = clientToken + self.protectedResource = protectedResource + self.role = role + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 64) + try self.protectedResource?.validate(name: "\(name).protectedResource") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.tags, name: "tags", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case actions = "actions" + case clientToken = "clientToken" + case protectedResource = "protectedResource" + case role = "role" + case tags = "tags" + } + } + + public struct CreateMalwareProtectionPlanResponse: AWSDecodableShape { + /// A unique identifier associated with the Malware Protection plan resource. + public let malwareProtectionPlanId: String? + + public init(malwareProtectionPlanId: String? = nil) { + self.malwareProtectionPlanId = malwareProtectionPlanId + } + + private enum CodingKeys: String, CodingKey { + case malwareProtectionPlanId = "malwareProtectionPlanId" + } + } + public struct CreateMembersRequest: AWSEncodableShape { /// A list of account ID and email address pairs of the accounts that you want to associate with the GuardDuty administrator account. public let accountDetails: [AccountDetail]? @@ -1538,6 +1606,23 @@ extension GuardDuty { } } + public struct CreateProtectedResource: AWSEncodableShape & AWSDecodableShape { + /// Information about the protected S3 bucket resource. + public let s3Bucket: CreateS3BucketResource? + + public init(s3Bucket: CreateS3BucketResource? = nil) { + self.s3Bucket = s3Bucket + } + + public func validate(name: String) throws { + try self.s3Bucket?.validate(name: "\(name).s3Bucket") + } + + private enum CodingKeys: String, CodingKey { + case s3Bucket = "s3Bucket" + } + } + public struct CreatePublishingDestinationRequest: AWSEncodableShape { /// The idempotency token for the request. public let clientToken: String? @@ -1590,6 +1675,27 @@ extension GuardDuty { } } + public struct CreateS3BucketResource: AWSEncodableShape & AWSDecodableShape { + /// Name of the S3 bucket. + public let bucketName: String? + /// Information about the specified object prefixes. The S3 object will be scanned only if it belongs to any of the specified object prefixes. + public let objectPrefixes: [String]? + + public init(bucketName: String? = nil, objectPrefixes: [String]? = nil) { + self.bucketName = bucketName + self.objectPrefixes = objectPrefixes + } + + public func validate(name: String) throws { + try self.validate(self.objectPrefixes, name: "objectPrefixes", parent: name, max: 5) + } + + private enum CodingKeys: String, CodingKey { + case bucketName = "bucketName" + case objectPrefixes = "objectPrefixes" + } + } + public struct CreateSampleFindingsRequest: AWSEncodableShape { /// The ID of the detector to create sample findings for. public let detectorId: String @@ -1992,6 +2098,23 @@ extension GuardDuty { } } + public struct DeleteMalwareProtectionPlanRequest: AWSEncodableShape { + /// A unique identifier associated with Malware Protection plan resource. + public let malwareProtectionPlanId: String + + public init(malwareProtectionPlanId: String) { + self.malwareProtectionPlanId = malwareProtectionPlanId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.malwareProtectionPlanId, key: "malwareProtectionPlanId") + } + + private enum CodingKeys: CodingKey {} + } + public struct DeleteMembersRequest: AWSEncodableShape { /// A list of account IDs of the GuardDuty member accounts that you want to delete. public let accountIds: [String]? @@ -3415,6 +3538,64 @@ extension GuardDuty { } } + public struct GetMalwareProtectionPlanRequest: AWSEncodableShape { + /// A unique identifier associated with Malware Protection plan resource. + public let malwareProtectionPlanId: String + + public init(malwareProtectionPlanId: String) { + self.malwareProtectionPlanId = malwareProtectionPlanId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.malwareProtectionPlanId, key: "malwareProtectionPlanId") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetMalwareProtectionPlanResponse: AWSDecodableShape { + /// Information about whether the tags will be added to the S3 object after scanning. + public let actions: MalwareProtectionPlanActions? + /// Amazon Resource Name (ARN) of the protected resource. + public let arn: String? + /// The timestamp when the Malware Protection plan resource was created. + public let createdAt: Date? + /// Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource. + public let protectedResource: CreateProtectedResource? + /// IAM role that includes the permissions required to scan and add tags to the associated protected resource. + public let role: String? + /// Malware Protection plan status. + public let status: MalwareProtectionPlanStatus? + /// Information about the issue code and message associated to the status of your Malware Protection plan. + public let statusReasons: [MalwareProtectionPlanStatusReason]? + /// Tags added to the Malware Protection plan resource. + public let tags: [String: String]? + + public init(actions: MalwareProtectionPlanActions? = nil, arn: String? = nil, createdAt: Date? = nil, protectedResource: CreateProtectedResource? = nil, role: String? = nil, status: MalwareProtectionPlanStatus? = nil, statusReasons: [MalwareProtectionPlanStatusReason]? = nil, tags: [String: String]? = nil) { + self.actions = actions + self.arn = arn + self.createdAt = createdAt + self.protectedResource = protectedResource + self.role = role + self.status = status + self.statusReasons = statusReasons + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case actions = "actions" + case arn = "arn" + case createdAt = "createdAt" + case protectedResource = "protectedResource" + case role = "role" + case status = "status" + case statusReasons = "statusReasons" + case tags = "tags" + } + } + public struct GetMalwareScanSettingsRequest: AWSEncodableShape { /// The unique ID of the detector that the scan setting is associated with. public let detectorId: String @@ -3989,6 +4170,23 @@ extension GuardDuty { } } + public struct ItemPath: AWSDecodableShape { + /// The hash value of the infected resource. + public let hash: String? + /// The nested item path where the infected file was found. + public let nestedItemPath: String? + + public init(hash: String? = nil, nestedItemPath: String? = nil) { + self.hash = hash + self.nestedItemPath = nestedItemPath + } + + private enum CodingKeys: String, CodingKey { + case hash = "hash" + case nestedItemPath = "nestedItemPath" + } + } + public struct KubernetesApiCallAction: AWSDecodableShape { /// The name of the namespace where the Kubernetes API call action takes place. public let namespace: String? @@ -4675,6 +4873,40 @@ extension GuardDuty { } } + public struct ListMalwareProtectionPlansRequest: AWSEncodableShape { + /// You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data. + public let nextToken: String? + + public init(nextToken: String? = nil) { + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.nextToken, key: "nextToken") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListMalwareProtectionPlansResponse: AWSDecodableShape { + /// A list of unique identifiers associated with each Malware Protection plan. + public let malwareProtectionPlans: [MalwareProtectionPlanSummary]? + /// You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action, fill nextToken in the request with the value of NextToken from the previous response to continue listing data. + public let nextToken: String? + + public init(malwareProtectionPlans: [MalwareProtectionPlanSummary]? = nil, nextToken: String? = nil) { + self.malwareProtectionPlans = malwareProtectionPlans + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case malwareProtectionPlans = "malwareProtectionPlans" + case nextToken = "nextToken" + } + } + public struct ListMembersRequest: AWSEncodableShape { /// The unique ID of the detector the member is associated with. public let detectorId: String @@ -5005,6 +5237,75 @@ extension GuardDuty { } } + public struct MalwareProtectionPlanActions: AWSEncodableShape & AWSDecodableShape { + /// Indicates whether the scanned S3 object will have tags about the scan result. + public let tagging: MalwareProtectionPlanTaggingAction? + + public init(tagging: MalwareProtectionPlanTaggingAction? = nil) { + self.tagging = tagging + } + + private enum CodingKeys: String, CodingKey { + case tagging = "tagging" + } + } + + public struct MalwareProtectionPlanStatusReason: AWSDecodableShape { + /// Issue code. + public let code: String? + /// Issue message that specifies the reason. For information about potential troubleshooting steps, see Troubleshooting Malware Protection for S3 status issues in the GuardDuty User Guide. + public let message: String? + + public init(code: String? = nil, message: String? = nil) { + self.code = code + self.message = message + } + + private enum CodingKeys: String, CodingKey { + case code = "code" + case message = "message" + } + } + + public struct MalwareProtectionPlanSummary: AWSDecodableShape { + /// A unique identifier associated with Malware Protection plan. + public let malwareProtectionPlanId: String? + + public init(malwareProtectionPlanId: String? = nil) { + self.malwareProtectionPlanId = malwareProtectionPlanId + } + + private enum CodingKeys: String, CodingKey { + case malwareProtectionPlanId = "malwareProtectionPlanId" + } + } + + public struct MalwareProtectionPlanTaggingAction: AWSEncodableShape & AWSDecodableShape { + /// Indicates whether or not the tags will added. + public let status: MalwareProtectionPlanTaggingActionStatus? + + public init(status: MalwareProtectionPlanTaggingActionStatus? = nil) { + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + } + } + + public struct MalwareScanDetails: AWSDecodableShape { + /// Information about the detected threats associated with the generated GuardDuty finding. + public let threats: [Threat]? + + public init(threats: [Threat]? = nil) { + self.threats = threats + } + + private enum CodingKeys: String, CodingKey { + case threats = "threats" + } + } + public struct Master: AWSDecodableShape { /// The ID of the account used as the administrator account. public let accountId: String? @@ -6047,7 +6348,7 @@ extension GuardDuty { } public struct ResourceDetails: AWSDecodableShape { - /// InstanceArn that was scanned in the scan entry. + /// Instance ARN that was scanned in the scan entry. public let instanceArn: String? public init(instanceArn: String? = nil) { @@ -6198,18 +6499,21 @@ extension GuardDuty { public let owner: Owner? /// Describes the public access policies that apply to the S3 bucket. public let publicAccess: PublicAccess? + /// Information about the S3 object that was scanned. + public let s3ObjectDetails: [S3ObjectDetail]? /// All tags attached to the S3 bucket public let tags: [Tag]? /// Describes whether the bucket is a source or destination bucket. public let type: String? - public init(arn: String? = nil, createdAt: Date? = nil, defaultServerSideEncryption: DefaultServerSideEncryption? = nil, name: String? = nil, owner: Owner? = nil, publicAccess: PublicAccess? = nil, tags: [Tag]? = nil, type: String? = nil) { + public init(arn: String? = nil, createdAt: Date? = nil, defaultServerSideEncryption: DefaultServerSideEncryption? = nil, name: String? = nil, owner: Owner? = nil, publicAccess: PublicAccess? = nil, s3ObjectDetails: [S3ObjectDetail]? = nil, tags: [Tag]? = nil, type: String? = nil) { self.arn = arn self.createdAt = createdAt self.defaultServerSideEncryption = defaultServerSideEncryption self.name = name self.owner = owner self.publicAccess = publicAccess + self.s3ObjectDetails = s3ObjectDetails self.tags = tags self.type = type } @@ -6221,6 +6525,7 @@ extension GuardDuty { case name = "name" case owner = "owner" case publicAccess = "publicAccess" + case s3ObjectDetails = "s3ObjectDetails" case tags = "tags" case type = "type" } @@ -6252,6 +6557,35 @@ extension GuardDuty { } } + public struct S3ObjectDetail: AWSDecodableShape { + /// The entity tag is a hash of the S3 object. The ETag reflects changes only to the contents of an object, and not its metadata. + public let eTag: String? + /// Hash of the threat detected in this finding. + public let hash: String? + /// Key of the S3 object. + public let key: String? + /// Amazon Resource Name (ARN) of the S3 object. + public let objectArn: String? + /// Version ID of the object. + public let versionId: String? + + public init(eTag: String? = nil, hash: String? = nil, key: String? = nil, objectArn: String? = nil, versionId: String? = nil) { + self.eTag = eTag + self.hash = hash + self.key = key + self.objectArn = objectArn + self.versionId = versionId + } + + private enum CodingKeys: String, CodingKey { + case eTag = "eTag" + case hash = "hash" + case key = "key" + case objectArn = "objectArn" + case versionId = "versionId" + } + } + public struct Scan: AWSDecodableShape { /// The ID for the account that belongs to the scan. public let accountId: String? @@ -6422,7 +6756,7 @@ extension GuardDuty { public let filePath: String? /// The hash value of the infected file. public let hash: String? - /// EBS volume Arn details of the infected file. + /// EBS volume ARN details of the infected file. public let volumeArn: String? public init(fileName: String? = nil, filePath: String? = nil, hash: String? = nil, volumeArn: String? = nil) { @@ -6582,6 +6916,8 @@ extension GuardDuty { public let evidence: Evidence? /// The name of the feature that generated a finding. public let featureName: String? + /// Returns details from the malware scan that generated a GuardDuty finding. + public let malwareScanDetails: MalwareScanDetails? /// The resource role information for this finding. public let resourceRole: String? /// Information about the process and any required context values for a specific finding @@ -6591,7 +6927,7 @@ extension GuardDuty { /// Feedback that was submitted about the finding. public let userFeedback: String? - public init(action: Action? = nil, additionalInfo: ServiceAdditionalInfo? = nil, archived: Bool? = nil, count: Int? = nil, detection: Detection? = nil, detectorId: String? = nil, ebsVolumeScanDetails: EbsVolumeScanDetails? = nil, eventFirstSeen: String? = nil, eventLastSeen: String? = nil, evidence: Evidence? = nil, featureName: String? = nil, resourceRole: String? = nil, runtimeDetails: RuntimeDetails? = nil, serviceName: String? = nil, userFeedback: String? = nil) { + public init(action: Action? = nil, additionalInfo: ServiceAdditionalInfo? = nil, archived: Bool? = nil, count: Int? = nil, detection: Detection? = nil, detectorId: String? = nil, ebsVolumeScanDetails: EbsVolumeScanDetails? = nil, eventFirstSeen: String? = nil, eventLastSeen: String? = nil, evidence: Evidence? = nil, featureName: String? = nil, malwareScanDetails: MalwareScanDetails? = nil, resourceRole: String? = nil, runtimeDetails: RuntimeDetails? = nil, serviceName: String? = nil, userFeedback: String? = nil) { self.action = action self.additionalInfo = additionalInfo self.archived = archived @@ -6603,6 +6939,7 @@ extension GuardDuty { self.eventLastSeen = eventLastSeen self.evidence = evidence self.featureName = featureName + self.malwareScanDetails = malwareScanDetails self.resourceRole = resourceRole self.runtimeDetails = runtimeDetails self.serviceName = serviceName @@ -6621,6 +6958,7 @@ extension GuardDuty { case eventLastSeen = "eventLastSeen" case evidence = "evidence" case featureName = "featureName" + case malwareScanDetails = "malwareScanDetails" case resourceRole = "resourceRole" case runtimeDetails = "runtimeDetails" case serviceName = "serviceName" @@ -6842,6 +7180,27 @@ extension GuardDuty { public init() {} } + public struct Threat: AWSDecodableShape { + /// Information about the nested item path and hash of the protected resource. + public let itemPaths: [ItemPath]? + /// Name of the detected threat that caused GuardDuty to generate this finding. + public let name: String? + /// Source of the threat that generated this finding. + public let source: String? + + public init(itemPaths: [ItemPath]? = nil, name: String? = nil, source: String? = nil) { + self.itemPaths = itemPaths + self.name = name + self.source = source + } + + private enum CodingKeys: String, CodingKey { + case itemPaths = "itemPaths" + case name = "name" + case source = "source" + } + } + public struct ThreatDetectedByName: AWSDecodableShape { /// Total number of infected files identified. public let itemCount: Int? @@ -7253,6 +7612,43 @@ extension GuardDuty { public init() {} } + public struct UpdateMalwareProtectionPlanRequest: AWSEncodableShape { + /// Information about whether the tags will be added to the S3 object after scanning. + public let actions: MalwareProtectionPlanActions? + /// A unique identifier associated with the Malware Protection plan. + public let malwareProtectionPlanId: String + /// Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource. + public let protectedResource: UpdateProtectedResource? + /// IAM role with permissions required to scan and add tags to the associated protected resource. + public let role: String? + + public init(actions: MalwareProtectionPlanActions? = nil, malwareProtectionPlanId: String, protectedResource: UpdateProtectedResource? = nil, role: String? = nil) { + self.actions = actions + self.malwareProtectionPlanId = malwareProtectionPlanId + self.protectedResource = protectedResource + self.role = role + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.actions, forKey: .actions) + request.encodePath(self.malwareProtectionPlanId, key: "malwareProtectionPlanId") + try container.encodeIfPresent(self.protectedResource, forKey: .protectedResource) + try container.encodeIfPresent(self.role, forKey: .role) + } + + public func validate(name: String) throws { + try self.protectedResource?.validate(name: "\(name).protectedResource") + } + + private enum CodingKeys: String, CodingKey { + case actions = "actions" + case protectedResource = "protectedResource" + case role = "role" + } + } + public struct UpdateMalwareScanSettingsRequest: AWSEncodableShape { /// The unique ID of the detector that specifies the GuardDuty service where you want to update scan settings. public let detectorId: String @@ -7412,6 +7808,23 @@ extension GuardDuty { public init() {} } + public struct UpdateProtectedResource: AWSEncodableShape { + /// Information about the protected S3 bucket resource. + public let s3Bucket: UpdateS3BucketResource? + + public init(s3Bucket: UpdateS3BucketResource? = nil) { + self.s3Bucket = s3Bucket + } + + public func validate(name: String) throws { + try self.s3Bucket?.validate(name: "\(name).s3Bucket") + } + + private enum CodingKeys: String, CodingKey { + case s3Bucket = "s3Bucket" + } + } + public struct UpdatePublishingDestinationRequest: AWSEncodableShape { /// The ID of the publishing destination to update. public let destinationId: String @@ -7448,6 +7861,23 @@ extension GuardDuty { public init() {} } + public struct UpdateS3BucketResource: AWSEncodableShape { + /// Information about the specified object prefixes. The S3 object will be scanned only if it belongs to any of the specified object prefixes. + public let objectPrefixes: [String]? + + public init(objectPrefixes: [String]? = nil) { + self.objectPrefixes = objectPrefixes + } + + public func validate(name: String) throws { + try self.validate(self.objectPrefixes, name: "objectPrefixes", parent: name, max: 5) + } + + private enum CodingKeys: String, CodingKey { + case objectPrefixes = "objectPrefixes" + } + } + public struct UpdateThreatIntelSetRequest: AWSEncodableShape { /// The updated Boolean value that specifies whether the ThreateIntelSet is active or not. public let activate: Bool? @@ -7695,11 +8125,11 @@ extension GuardDuty { public let deviceName: String? /// EBS volume encryption type. public let encryptionType: String? - /// KMS key Arn used to encrypt the EBS volume. + /// KMS key ARN used to encrypt the EBS volume. public let kmsKeyArn: String? - /// Snapshot Arn of the EBS volume. + /// Snapshot ARN of the EBS volume. public let snapshotArn: String? - /// EBS volume Arn information. + /// EBS volume ARN information. public let volumeArn: String? /// EBS volume size in GB. public let volumeSizeInGB: Int? @@ -7775,6 +8205,7 @@ public struct GuardDutyErrorType: AWSErrorType { case badRequestException = "BadRequestException" case conflictException = "ConflictException" case internalServerErrorException = "InternalServerErrorException" + case resourceNotFoundException = "ResourceNotFoundException" } private let error: Code @@ -7803,6 +8234,8 @@ public struct GuardDutyErrorType: AWSErrorType { public static var conflictException: Self { .init(.conflictException) } /// An internal server error exception object. public static var internalServerErrorException: Self { .init(.internalServerErrorException) } + /// The requested resource can't be found. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } } extension GuardDutyErrorType: Equatable { diff --git a/Sources/Soto/Services/Honeycode/Honeycode_api.swift b/Sources/Soto/Services/Honeycode/Honeycode_api.swift deleted file mode 100644 index f38ea0d1e1..0000000000 --- a/Sources/Soto/Services/Honeycode/Honeycode_api.swift +++ /dev/null @@ -1,404 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2023 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -@_exported import SotoCore - -/// Service object for interacting with AWS Honeycode service. -/// -/// Amazon Honeycode is a fully managed service that allows you to quickly build mobile and web apps for teams—without programming. Build Honeycode apps for managing almost anything, like projects, customers, operations, approvals, resources, and even your team. -public struct Honeycode: AWSService { - // MARK: Member variables - - /// Client used for communication with AWS - public let client: AWSClient - /// Service configuration - public let config: AWSServiceConfig - - // MARK: Initialization - - /// Initialize the Honeycode client - /// - parameters: - /// - client: AWSClient used to process requests - /// - region: Region of server you want to communicate with. This will override the partition parameter. - /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). - /// - endpoint: Custom endpoint URL to use instead of standard AWS servers - /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded - /// - timeout: Timeout value for HTTP requests - /// - byteBufferAllocator: Allocator for ByteBuffers - /// - options: Service options - public init( - client: AWSClient, - region: SotoCore.Region? = nil, - partition: AWSPartition = .aws, - endpoint: String? = nil, - middleware: AWSMiddlewareProtocol? = nil, - timeout: TimeAmount? = nil, - byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), - options: AWSServiceConfig.Options = [] - ) { - self.client = client - self.config = AWSServiceConfig( - region: region, - partition: region?.partition ?? partition, - serviceName: "Honeycode", - serviceIdentifier: "honeycode", - serviceProtocol: .restjson, - apiVersion: "2020-03-01", - endpoint: endpoint, - errorType: HoneycodeErrorType.self, - middleware: middleware, - timeout: timeout, - byteBufferAllocator: byteBufferAllocator, - options: options - ) - } - - - - - - // MARK: API Calls - - /// The BatchCreateTableRows API allows you to create one or more rows at the end of a table in a workbook. The API allows you to specify the values to set in some or all of the columns in the new rows. If a column is not explicitly set in a specific row, then the column level formula specified in the table will be applied to the new row. If there is no column level formula but the last row of the table has a formula, then that formula will be copied down to the new row. If there is no column level formula and no formula in the last row of the table, then that column will be left blank for the new rows. - @Sendable - public func batchCreateTableRows(_ input: BatchCreateTableRowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchCreateTableRowsResult { - return try await self.client.execute( - operation: "BatchCreateTableRows", - path: "/workbooks/{workbookId}/tables/{tableId}/rows/batchcreate", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The BatchDeleteTableRows API allows you to delete one or more rows from a table in a workbook. You need to specify the ids of the rows that you want to delete from the table. - @Sendable - public func batchDeleteTableRows(_ input: BatchDeleteTableRowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchDeleteTableRowsResult { - return try await self.client.execute( - operation: "BatchDeleteTableRows", - path: "/workbooks/{workbookId}/tables/{tableId}/rows/batchdelete", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The BatchUpdateTableRows API allows you to update one or more rows in a table in a workbook. You can specify the values to set in some or all of the columns in the table for the specified rows. If a column is not explicitly specified in a particular row, then that column will not be updated for that row. To clear out the data in a specific cell, you need to set the value as an empty string (""). - @Sendable - public func batchUpdateTableRows(_ input: BatchUpdateTableRowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchUpdateTableRowsResult { - return try await self.client.execute( - operation: "BatchUpdateTableRows", - path: "/workbooks/{workbookId}/tables/{tableId}/rows/batchupdate", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The BatchUpsertTableRows API allows you to upsert one or more rows in a table. The upsert operation takes a filter expression as input and evaluates it to find matching rows on the destination table. If matching rows are found, it will update the cells in the matching rows to new values specified in the request. If no matching rows are found, a new row is added at the end of the table and the cells in that row are set to the new values specified in the request. You can specify the values to set in some or all of the columns in the table for the matching or newly appended rows. If a column is not explicitly specified for a particular row, then that column will not be updated for that row. To clear out the data in a specific cell, you need to set the value as an empty string (""). - @Sendable - public func batchUpsertTableRows(_ input: BatchUpsertTableRowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchUpsertTableRowsResult { - return try await self.client.execute( - operation: "BatchUpsertTableRows", - path: "/workbooks/{workbookId}/tables/{tableId}/rows/batchupsert", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The DescribeTableDataImportJob API allows you to retrieve the status and details of a table data import job. - @Sendable - public func describeTableDataImportJob(_ input: DescribeTableDataImportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeTableDataImportJobResult { - return try await self.client.execute( - operation: "DescribeTableDataImportJob", - path: "/workbooks/{workbookId}/tables/{tableId}/import/{jobId}", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The GetScreenData API allows retrieval of data from a screen in a Honeycode app. The API allows setting local variables in the screen to filter, sort or otherwise affect what will be displayed on the screen. - @Sendable - public func getScreenData(_ input: GetScreenDataRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetScreenDataResult { - return try await self.client.execute( - operation: "GetScreenData", - path: "/screendata", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The InvokeScreenAutomation API allows invoking an action defined in a screen in a Honeycode app. The API allows setting local variables, which can then be used in the automation being invoked. This allows automating the Honeycode app interactions to write, update or delete data in the workbook. - @Sendable - public func invokeScreenAutomation(_ input: InvokeScreenAutomationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InvokeScreenAutomationResult { - return try await self.client.execute( - operation: "InvokeScreenAutomation", - path: "/workbooks/{workbookId}/apps/{appId}/screens/{screenId}/automations/{screenAutomationId}", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The ListTableColumns API allows you to retrieve a list of all the columns in a table in a workbook. - @Sendable - public func listTableColumns(_ input: ListTableColumnsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTableColumnsResult { - return try await self.client.execute( - operation: "ListTableColumns", - path: "/workbooks/{workbookId}/tables/{tableId}/columns", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The ListTableRows API allows you to retrieve a list of all the rows in a table in a workbook. - @Sendable - public func listTableRows(_ input: ListTableRowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTableRowsResult { - return try await self.client.execute( - operation: "ListTableRows", - path: "/workbooks/{workbookId}/tables/{tableId}/rows/list", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The ListTables API allows you to retrieve a list of all the tables in a workbook. - @Sendable - public func listTables(_ input: ListTablesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTablesResult { - return try await self.client.execute( - operation: "ListTables", - path: "/workbooks/{workbookId}/tables", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The ListTagsForResource API allows you to return a resource's tags. - @Sendable - public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResult { - return try await self.client.execute( - operation: "ListTagsForResource", - path: "/tags/{resourceArn}", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The QueryTableRows API allows you to use a filter formula to query for specific rows in a table. - @Sendable - public func queryTableRows(_ input: QueryTableRowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> QueryTableRowsResult { - return try await self.client.execute( - operation: "QueryTableRows", - path: "/workbooks/{workbookId}/tables/{tableId}/rows/query", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The StartTableDataImportJob API allows you to start an import job on a table. This API will only return the id of the job that was started. To find out the status of the import request, you need to call the DescribeTableDataImportJob API. - @Sendable - public func startTableDataImportJob(_ input: StartTableDataImportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartTableDataImportJobResult { - return try await self.client.execute( - operation: "StartTableDataImportJob", - path: "/workbooks/{workbookId}/tables/{destinationTableId}/import", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The TagResource API allows you to add tags to an ARN-able resource. Resource includes workbook, table, screen and screen-automation. - @Sendable - public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResult { - return try await self.client.execute( - operation: "TagResource", - path: "/tags/{resourceArn}", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - - /// The UntagResource API allows you to removes tags from an ARN-able resource. Resource includes workbook, table, screen and screen-automation. - @Sendable - public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResult { - return try await self.client.execute( - operation: "UntagResource", - path: "/tags/{resourceArn}", - httpMethod: .DELETE, - serviceConfig: self.config, - input: input, - logger: logger - ) - } -} - -extension Honeycode { - /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public - /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. - public init(from: Honeycode, patch: AWSServiceConfig.Patch) { - self.client = from.client - self.config = from.config.with(patch: patch) - } -} - -// MARK: Paginators - -@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) -extension Honeycode { - /// The ListTableColumns API allows you to retrieve a list of all the columns in a table in a workbook. - /// Return PaginatorSequence for operation. - /// - /// - Parameters: - /// - input: Input for request - /// - logger: Logger used flot logging - public func listTableColumnsPaginator( - _ input: ListTableColumnsRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listTableColumns, - inputKey: \ListTableColumnsRequest.nextToken, - outputKey: \ListTableColumnsResult.nextToken, - logger: logger - ) - } - - /// The ListTableRows API allows you to retrieve a list of all the rows in a table in a workbook. - /// Return PaginatorSequence for operation. - /// - /// - Parameters: - /// - input: Input for request - /// - logger: Logger used flot logging - public func listTableRowsPaginator( - _ input: ListTableRowsRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listTableRows, - inputKey: \ListTableRowsRequest.nextToken, - outputKey: \ListTableRowsResult.nextToken, - logger: logger - ) - } - - /// The ListTables API allows you to retrieve a list of all the tables in a workbook. - /// Return PaginatorSequence for operation. - /// - /// - Parameters: - /// - input: Input for request - /// - logger: Logger used flot logging - public func listTablesPaginator( - _ input: ListTablesRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listTables, - inputKey: \ListTablesRequest.nextToken, - outputKey: \ListTablesResult.nextToken, - logger: logger - ) - } - - /// The QueryTableRows API allows you to use a filter formula to query for specific rows in a table. - /// Return PaginatorSequence for operation. - /// - /// - Parameters: - /// - input: Input for request - /// - logger: Logger used flot logging - public func queryTableRowsPaginator( - _ input: QueryTableRowsRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.queryTableRows, - inputKey: \QueryTableRowsRequest.nextToken, - outputKey: \QueryTableRowsResult.nextToken, - logger: logger - ) - } -} - -extension Honeycode.ListTableColumnsRequest: AWSPaginateToken { - public func usingPaginationToken(_ token: String) -> Honeycode.ListTableColumnsRequest { - return .init( - nextToken: token, - tableId: self.tableId, - workbookId: self.workbookId - ) - } -} - -extension Honeycode.ListTableRowsRequest: AWSPaginateToken { - public func usingPaginationToken(_ token: String) -> Honeycode.ListTableRowsRequest { - return .init( - maxResults: self.maxResults, - nextToken: token, - rowIds: self.rowIds, - tableId: self.tableId, - workbookId: self.workbookId - ) - } -} - -extension Honeycode.ListTablesRequest: AWSPaginateToken { - public func usingPaginationToken(_ token: String) -> Honeycode.ListTablesRequest { - return .init( - maxResults: self.maxResults, - nextToken: token, - workbookId: self.workbookId - ) - } -} - -extension Honeycode.QueryTableRowsRequest: AWSPaginateToken { - public func usingPaginationToken(_ token: String) -> Honeycode.QueryTableRowsRequest { - return .init( - filterFormula: self.filterFormula, - maxResults: self.maxResults, - nextToken: token, - tableId: self.tableId, - workbookId: self.workbookId - ) - } -} diff --git a/Sources/Soto/Services/Honeycode/Honeycode_shapes.swift b/Sources/Soto/Services/Honeycode/Honeycode_shapes.swift deleted file mode 100644 index a5e111e62d..0000000000 --- a/Sources/Soto/Services/Honeycode/Honeycode_shapes.swift +++ /dev/null @@ -1,1654 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2023 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -#if os(Linux) && compiler(<5.10) -// swift-corelibs-foundation hasn't been updated with Sendable conformances -@preconcurrency import Foundation -#else -import Foundation -#endif -@_spi(SotoInternal) import SotoCore - -extension Honeycode { - // MARK: Enums - - public enum ErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case accessDenied = "ACCESS_DENIED" - case fileEmptyError = "FILE_EMPTY_ERROR" - case fileNotFoundError = "FILE_NOT_FOUND_ERROR" - case fileParsingError = "FILE_PARSING_ERROR" - case fileSizeLimitError = "FILE_SIZE_LIMIT_ERROR" - case invalidFileTypeError = "INVALID_FILE_TYPE_ERROR" - case invalidImportOptionsError = "INVALID_IMPORT_OPTIONS_ERROR" - case invalidTableColumnIdError = "INVALID_TABLE_COLUMN_ID_ERROR" - case invalidTableIdError = "INVALID_TABLE_ID_ERROR" - case invalidUrlError = "INVALID_URL_ERROR" - case resourceNotFoundError = "RESOURCE_NOT_FOUND_ERROR" - case systemLimitError = "SYSTEM_LIMIT_ERROR" - case tableNotFoundError = "TABLE_NOT_FOUND_ERROR" - case unknownError = "UNKNOWN_ERROR" - public var description: String { return self.rawValue } - } - - public enum Format: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case accounting = "ACCOUNTING" - case auto = "AUTO" - case contact = "CONTACT" - case currency = "CURRENCY" - case date = "DATE" - case dateTime = "DATE_TIME" - case number = "NUMBER" - case percentage = "PERCENTAGE" - case rowlink = "ROWLINK" - case rowset = "ROWSET" - case text = "TEXT" - case time = "TIME" - public var description: String { return self.rawValue } - } - - public enum ImportDataCharacterEncoding: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case iso88591 = "ISO-8859-1" - case usAscii = "US-ASCII" - case utf16 = "UTF-16" - case utf16Be = "UTF-16BE" - case utf16Le = "UTF-16LE" - case utf8 = "UTF-8" - public var description: String { return self.rawValue } - } - - public enum ImportSourceDataFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case delimitedText = "DELIMITED_TEXT" - public var description: String { return self.rawValue } - } - - public enum TableDataImportJobStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case completed = "COMPLETED" - case failed = "FAILED" - case inProgress = "IN_PROGRESS" - case submitted = "SUBMITTED" - public var description: String { return self.rawValue } - } - - public enum UpsertAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case appended = "APPENDED" - case updated = "UPDATED" - public var description: String { return self.rawValue } - } - - // MARK: Shapes - - public struct BatchCreateTableRowsRequest: AWSEncodableShape { - /// The request token for performing the batch create operation. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the operation again. Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days. - public let clientRequestToken: String? - /// The list of rows to create at the end of the table. Each item in this list needs to have a batch item id to uniquely identify the element in the request and the cells to create for that row. You need to specify at least one item in this list. Note that if one of the column ids in any of the rows in the request does not exist in the table, then the request fails and no updates are made to the table. - public let rowsToCreate: [CreateRowData] - /// The ID of the table where the new rows are being added. If a table with the specified ID could not be found, this API throws ResourceNotFoundException. - public let tableId: String - /// The ID of the workbook where the new rows are being added. If a workbook with the specified ID could not be found, this API throws ResourceNotFoundException. - public let workbookId: String - - public init(clientRequestToken: String? = nil, rowsToCreate: [CreateRowData], tableId: String, workbookId: String) { - self.clientRequestToken = clientRequestToken - self.rowsToCreate = rowsToCreate - self.tableId = tableId - self.workbookId = workbookId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encodeIfPresent(self.clientRequestToken, forKey: .clientRequestToken) - try container.encode(self.rowsToCreate, forKey: .rowsToCreate) - request.encodePath(self.tableId, key: "tableId") - request.encodePath(self.workbookId, key: "workbookId") - } - - public func validate(name: String) throws { - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 64) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 32) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^(?!\\s*$).+$") - try self.rowsToCreate.forEach { - try $0.validate(name: "\(name).rowsToCreate[]") - } - try self.validate(self.rowsToCreate, name: "rowsToCreate", parent: name, max: 100) - try self.validate(self.rowsToCreate, name: "rowsToCreate", parent: name, min: 1) - try self.validate(self.tableId, name: "tableId", parent: name, max: 36) - try self.validate(self.tableId, name: "tableId", parent: name, min: 36) - try self.validate(self.tableId, name: "tableId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: String, CodingKey { - case clientRequestToken = "clientRequestToken" - case rowsToCreate = "rowsToCreate" - } - } - - public struct BatchCreateTableRowsResult: AWSDecodableShape { - /// The map of batch item id to the row id that was created for that item. - public let createdRows: [String: String] - /// The list of batch items in the request that could not be added to the table. Each element in this list contains one item from the request that could not be added to the table along with the reason why that item could not be added. - public let failedBatchItems: [FailedBatchItem]? - /// The updated workbook cursor after adding the new rows at the end of the table. - public let workbookCursor: Int64 - - public init(createdRows: [String: String], failedBatchItems: [FailedBatchItem]? = nil, workbookCursor: Int64) { - self.createdRows = createdRows - self.failedBatchItems = failedBatchItems - self.workbookCursor = workbookCursor - } - - private enum CodingKeys: String, CodingKey { - case createdRows = "createdRows" - case failedBatchItems = "failedBatchItems" - case workbookCursor = "workbookCursor" - } - } - - public struct BatchDeleteTableRowsRequest: AWSEncodableShape { - /// The request token for performing the delete action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again. Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days. - public let clientRequestToken: String? - /// The list of row ids to delete from the table. You need to specify at least one row id in this list. Note that if one of the row ids provided in the request does not exist in the table, then the request fails and no rows are deleted from the table. - public let rowIds: [String] - /// The ID of the table where the rows are being deleted. If a table with the specified id could not be found, this API throws ResourceNotFoundException. - public let tableId: String - /// The ID of the workbook where the rows are being deleted. If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. - public let workbookId: String - - public init(clientRequestToken: String? = nil, rowIds: [String], tableId: String, workbookId: String) { - self.clientRequestToken = clientRequestToken - self.rowIds = rowIds - self.tableId = tableId - self.workbookId = workbookId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encodeIfPresent(self.clientRequestToken, forKey: .clientRequestToken) - try container.encode(self.rowIds, forKey: .rowIds) - request.encodePath(self.tableId, key: "tableId") - request.encodePath(self.workbookId, key: "workbookId") - } - - public func validate(name: String) throws { - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 64) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 32) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^(?!\\s*$).+$") - try self.rowIds.forEach { - try validate($0, name: "rowIds[]", parent: name, max: 77) - try validate($0, name: "rowIds[]", parent: name, min: 77) - try validate($0, name: "rowIds[]", parent: name, pattern: "^row:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\\/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - try self.validate(self.rowIds, name: "rowIds", parent: name, max: 100) - try self.validate(self.rowIds, name: "rowIds", parent: name, min: 1) - try self.validate(self.tableId, name: "tableId", parent: name, max: 36) - try self.validate(self.tableId, name: "tableId", parent: name, min: 36) - try self.validate(self.tableId, name: "tableId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: String, CodingKey { - case clientRequestToken = "clientRequestToken" - case rowIds = "rowIds" - } - } - - public struct BatchDeleteTableRowsResult: AWSDecodableShape { - /// The list of row ids in the request that could not be deleted from the table. Each element in this list contains one row id from the request that could not be deleted along with the reason why that item could not be deleted. - public let failedBatchItems: [FailedBatchItem]? - /// The updated workbook cursor after deleting the rows from the table. - public let workbookCursor: Int64 - - public init(failedBatchItems: [FailedBatchItem]? = nil, workbookCursor: Int64) { - self.failedBatchItems = failedBatchItems - self.workbookCursor = workbookCursor - } - - private enum CodingKeys: String, CodingKey { - case failedBatchItems = "failedBatchItems" - case workbookCursor = "workbookCursor" - } - } - - public struct BatchUpdateTableRowsRequest: AWSEncodableShape { - /// The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again. Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days. - public let clientRequestToken: String? - /// The list of rows to update in the table. Each item in this list needs to contain the row id to update along with the map of column id to cell values for each column in that row that needs to be updated. You need to specify at least one row in this list, and for each row, you need to specify at least one column to update. Note that if one of the row or column ids in the request does not exist in the table, then the request fails and no updates are made to the table. - public let rowsToUpdate: [UpdateRowData] - /// The ID of the table where the rows are being updated. If a table with the specified id could not be found, this API throws ResourceNotFoundException. - public let tableId: String - /// The ID of the workbook where the rows are being updated. If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. - public let workbookId: String - - public init(clientRequestToken: String? = nil, rowsToUpdate: [UpdateRowData], tableId: String, workbookId: String) { - self.clientRequestToken = clientRequestToken - self.rowsToUpdate = rowsToUpdate - self.tableId = tableId - self.workbookId = workbookId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encodeIfPresent(self.clientRequestToken, forKey: .clientRequestToken) - try container.encode(self.rowsToUpdate, forKey: .rowsToUpdate) - request.encodePath(self.tableId, key: "tableId") - request.encodePath(self.workbookId, key: "workbookId") - } - - public func validate(name: String) throws { - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 64) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 32) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^(?!\\s*$).+$") - try self.rowsToUpdate.forEach { - try $0.validate(name: "\(name).rowsToUpdate[]") - } - try self.validate(self.rowsToUpdate, name: "rowsToUpdate", parent: name, max: 100) - try self.validate(self.rowsToUpdate, name: "rowsToUpdate", parent: name, min: 1) - try self.validate(self.tableId, name: "tableId", parent: name, max: 36) - try self.validate(self.tableId, name: "tableId", parent: name, min: 36) - try self.validate(self.tableId, name: "tableId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: String, CodingKey { - case clientRequestToken = "clientRequestToken" - case rowsToUpdate = "rowsToUpdate" - } - } - - public struct BatchUpdateTableRowsResult: AWSDecodableShape { - /// The list of batch items in the request that could not be updated in the table. Each element in this list contains one item from the request that could not be updated in the table along with the reason why that item could not be updated. - public let failedBatchItems: [FailedBatchItem]? - /// The updated workbook cursor after adding the new rows at the end of the table. - public let workbookCursor: Int64 - - public init(failedBatchItems: [FailedBatchItem]? = nil, workbookCursor: Int64) { - self.failedBatchItems = failedBatchItems - self.workbookCursor = workbookCursor - } - - private enum CodingKeys: String, CodingKey { - case failedBatchItems = "failedBatchItems" - case workbookCursor = "workbookCursor" - } - } - - public struct BatchUpsertTableRowsRequest: AWSEncodableShape { - /// The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again. Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days. - public let clientRequestToken: String? - /// The list of rows to upsert in the table. Each item in this list needs to have a batch item id to uniquely identify the element in the request, a filter expression to find the rows to update for that element and the cell values to set for each column in the upserted rows. You need to specify at least one item in this list. Note that if one of the filter formulas in the request fails to evaluate because of an error or one of the column ids in any of the rows does not exist in the table, then the request fails and no updates are made to the table. - public let rowsToUpsert: [UpsertRowData] - /// The ID of the table where the rows are being upserted. If a table with the specified id could not be found, this API throws ResourceNotFoundException. - public let tableId: String - /// The ID of the workbook where the rows are being upserted. If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. - public let workbookId: String - - public init(clientRequestToken: String? = nil, rowsToUpsert: [UpsertRowData], tableId: String, workbookId: String) { - self.clientRequestToken = clientRequestToken - self.rowsToUpsert = rowsToUpsert - self.tableId = tableId - self.workbookId = workbookId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encodeIfPresent(self.clientRequestToken, forKey: .clientRequestToken) - try container.encode(self.rowsToUpsert, forKey: .rowsToUpsert) - request.encodePath(self.tableId, key: "tableId") - request.encodePath(self.workbookId, key: "workbookId") - } - - public func validate(name: String) throws { - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 64) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 32) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^(?!\\s*$).+$") - try self.rowsToUpsert.forEach { - try $0.validate(name: "\(name).rowsToUpsert[]") - } - try self.validate(self.tableId, name: "tableId", parent: name, max: 36) - try self.validate(self.tableId, name: "tableId", parent: name, min: 36) - try self.validate(self.tableId, name: "tableId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: String, CodingKey { - case clientRequestToken = "clientRequestToken" - case rowsToUpsert = "rowsToUpsert" - } - } - - public struct BatchUpsertTableRowsResult: AWSDecodableShape { - /// The list of batch items in the request that could not be updated or appended in the table. Each element in this list contains one item from the request that could not be updated in the table along with the reason why that item could not be updated or appended. - public let failedBatchItems: [FailedBatchItem]? - /// A map with the batch item id as the key and the result of the upsert operation as the value. The result of the upsert operation specifies whether existing rows were updated or a new row was appended, along with the list of row ids that were affected. - public let rows: [String: UpsertRowsResult] - /// The updated workbook cursor after updating or appending rows in the table. - public let workbookCursor: Int64 - - public init(failedBatchItems: [FailedBatchItem]? = nil, rows: [String: UpsertRowsResult], workbookCursor: Int64) { - self.failedBatchItems = failedBatchItems - self.rows = rows - self.workbookCursor = workbookCursor - } - - private enum CodingKeys: String, CodingKey { - case failedBatchItems = "failedBatchItems" - case rows = "rows" - case workbookCursor = "workbookCursor" - } - } - - public struct Cell: AWSDecodableShape { - /// The format of the cell. If this field is empty, then the format is either not specified in the workbook or the format is set to AUTO. - public let format: Format? - /// The formatted value of the cell. This is the value that you see displayed in the cell in the UI. Note that the formatted value of a cell is always represented as a string irrespective of the data that is stored in the cell. For example, if a cell contains a date, the formatted value of the cell is the string representation of the formatted date being shown in the cell in the UI. See details in the rawValue field below for how cells of different formats will have different raw and formatted values. - public let formattedValue: String? - /// A list of formatted values of the cell. This field is only returned when the cell is ROWSET format (aka multi-select or multi-record picklist). Values in the list are always represented as strings. The formattedValue field will be empty if this field is returned. - public let formattedValues: [String]? - /// The formula contained in the cell. This field is empty if a cell does not have a formula. - public let formula: String? - /// The raw value of the data contained in the cell. The raw value depends on the format of the data in the cell. However the attribute in the API return value is always a string containing the raw value. Cells with format DATE, DATE_TIME or TIME have the raw value as a floating point number where the whole number represents the number of days since 1/1/1900 and the fractional part represents the fraction of the day since midnight. For example, a cell with date 11/3/2020 has the raw value "44138". A cell with the time 9:00 AM has the raw value "0.375" and a cell with date/time value of 11/3/2020 9:00 AM has the raw value "44138.375". Notice that even though the raw value is a number in all three cases, it is still represented as a string. Cells with format NUMBER, CURRENCY, PERCENTAGE and ACCOUNTING have the raw value of the data as the number representing the data being displayed. For example, the number 1.325 with two decimal places in the format will have it's raw value as "1.325" and formatted value as "1.33". A currency value for $10 will have the raw value as "10" and formatted value as "$10.00". A value representing 20% with two decimal places in the format will have its raw value as "0.2" and the formatted value as "20.00%". An accounting value of -$25 will have "-25" as the raw value and "$ (25.00)" as the formatted value. Cells with format TEXT will have the raw text as the raw value. For example, a cell with text "John Smith" will have "John Smith" as both the raw value and the formatted value. Cells with format CONTACT will have the name of the contact as a formatted value and the email address of the contact as the raw value. For example, a contact for John Smith will have "John Smith" as the formatted value and "john.smith@example.com" as the raw value. Cells with format ROWLINK (aka picklist) will have the first column of the linked row as the formatted value and the row id of the linked row as the raw value. For example, a cell containing a picklist to a table that displays task status might have "Completed" as the formatted value and "row:dfcefaee-5b37-4355-8f28-40c3e4ff5dd4/ca432b2f-b8eb-431d-9fb5-cbe0342f9f03" as the raw value. Cells with format ROWSET (aka multi-select or multi-record picklist) will by default have the first column of each of the linked rows as the formatted value in the list, and the rowset id of the linked rows as the raw value. For example, a cell containing a multi-select picklist to a table that contains items might have "Item A", "Item B" in the formatted value list and "rows:b742c1f4-6cb0-4650-a845-35eb86fcc2bb/ [fdea123b-8f68-474a-aa8a-5ff87aa333af,6daf41f0-a138-4eee-89da-123086d36ecf]" as the raw value. Cells with format ATTACHMENT will have the name of the attachment as the formatted value and the attachment id as the raw value. For example, a cell containing an attachment named "image.jpeg" will have "image.jpeg" as the formatted value and "attachment:ca432b2f-b8eb-431d-9fb5-cbe0342f9f03" as the raw value. Cells with format AUTO or cells without any format that are auto-detected as one of the formats above will contain the raw and formatted values as mentioned above, based on the auto-detected formats. If there is no auto-detected format, the raw and formatted values will be the same as the data in the cell. - public let rawValue: String? - - public init(format: Format? = nil, formattedValue: String? = nil, formattedValues: [String]? = nil, formula: String? = nil, rawValue: String? = nil) { - self.format = format - self.formattedValue = formattedValue - self.formattedValues = formattedValues - self.formula = formula - self.rawValue = rawValue - } - - private enum CodingKeys: String, CodingKey { - case format = "format" - case formattedValue = "formattedValue" - case formattedValues = "formattedValues" - case formula = "formula" - case rawValue = "rawValue" - } - } - - public struct CellInput: AWSEncodableShape { - /// Fact represents the data that is entered into a cell. This data can be free text or a formula. Formulas need to start with the equals (=) sign. - public let fact: String? - /// A list representing the values that are entered into a ROWSET cell. Facts list can have either only values or rowIDs, and rowIDs should from the same table. - public let facts: [String]? - - public init(fact: String? = nil, facts: [String]? = nil) { - self.fact = fact - self.facts = facts - } - - public func validate(name: String) throws { - try self.validate(self.fact, name: "fact", parent: name, max: 8192) - try self.validate(self.fact, name: "fact", parent: name, pattern: "^[\\s\\S]*$") - try self.facts?.forEach { - try validate($0, name: "facts[]", parent: name, max: 8192) - try validate($0, name: "facts[]", parent: name, pattern: "^[\\s\\S]*$") - } - try self.validate(self.facts, name: "facts", parent: name, max: 220) - } - - private enum CodingKeys: String, CodingKey { - case fact = "fact" - case facts = "facts" - } - } - - public struct ColumnMetadata: AWSDecodableShape { - /// The format of the column. - public let format: Format - /// The name of the column. - public let name: String - - public init(format: Format, name: String) { - self.format = format - self.name = name - } - - private enum CodingKeys: String, CodingKey { - case format = "format" - case name = "name" - } - } - - public struct CreateRowData: AWSEncodableShape { - /// An external identifier that represents the single row that is being created as part of the BatchCreateTableRows request. This can be any string that you can use to identify the row in the request. The BatchCreateTableRows API puts the batch item id in the results to allow you to link data in the request to data in the results. - public let batchItemId: String - /// A map representing the cells to create in the new row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell. - public let cellsToCreate: [String: CellInput] - - public init(batchItemId: String, cellsToCreate: [String: CellInput]) { - self.batchItemId = batchItemId - self.cellsToCreate = cellsToCreate - } - - public func validate(name: String) throws { - try self.validate(self.batchItemId, name: "batchItemId", parent: name, max: 64) - try self.validate(self.batchItemId, name: "batchItemId", parent: name, min: 1) - try self.validate(self.batchItemId, name: "batchItemId", parent: name, pattern: "^(?!\\s*$).+$") - try self.cellsToCreate.forEach { - try validate($0.key, name: "cellsToCreate.key", parent: name, max: 36) - try validate($0.key, name: "cellsToCreate.key", parent: name, min: 36) - try validate($0.key, name: "cellsToCreate.key", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try $0.value.validate(name: "\(name).cellsToCreate[\"\($0.key)\"]") - } - try self.validate(self.cellsToCreate, name: "cellsToCreate", parent: name, max: 100) - try self.validate(self.cellsToCreate, name: "cellsToCreate", parent: name, min: 1) - } - - private enum CodingKeys: String, CodingKey { - case batchItemId = "batchItemId" - case cellsToCreate = "cellsToCreate" - } - } - - public struct DataItem: AWSDecodableShape { - /// The formatted value of the data. e.g. John Smith. - public let formattedValue: String? - /// The overrideFormat is optional and is specified only if a particular row of data has a different format for the data than the default format defined on the screen or the table. - public let overrideFormat: Format? - /// The raw value of the data. e.g. jsmith@example.com - public let rawValue: String? - - public init(formattedValue: String? = nil, overrideFormat: Format? = nil, rawValue: String? = nil) { - self.formattedValue = formattedValue - self.overrideFormat = overrideFormat - self.rawValue = rawValue - } - - private enum CodingKeys: String, CodingKey { - case formattedValue = "formattedValue" - case overrideFormat = "overrideFormat" - case rawValue = "rawValue" - } - } - - public struct DelimitedTextImportOptions: AWSEncodableShape & AWSDecodableShape { - /// The encoding of the data in the input file. - public let dataCharacterEncoding: ImportDataCharacterEncoding? - /// The delimiter to use for separating columns in a single row of the input. - public let delimiter: String - /// Indicates whether the input file has a header row at the top containing the column names. - public let hasHeaderRow: Bool? - /// A parameter to indicate whether empty rows should be ignored or be included in the import. - public let ignoreEmptyRows: Bool? - - public init(dataCharacterEncoding: ImportDataCharacterEncoding? = nil, delimiter: String, hasHeaderRow: Bool? = nil, ignoreEmptyRows: Bool? = nil) { - self.dataCharacterEncoding = dataCharacterEncoding - self.delimiter = delimiter - self.hasHeaderRow = hasHeaderRow - self.ignoreEmptyRows = ignoreEmptyRows - } - - public func validate(name: String) throws { - try self.validate(self.delimiter, name: "delimiter", parent: name, max: 1) - try self.validate(self.delimiter, name: "delimiter", parent: name, min: 1) - try self.validate(self.delimiter, name: "delimiter", parent: name, pattern: "^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]?$") - } - - private enum CodingKeys: String, CodingKey { - case dataCharacterEncoding = "dataCharacterEncoding" - case delimiter = "delimiter" - case hasHeaderRow = "hasHeaderRow" - case ignoreEmptyRows = "ignoreEmptyRows" - } - } - - public struct DescribeTableDataImportJobRequest: AWSEncodableShape { - /// The ID of the job that was returned by the StartTableDataImportJob request. If a job with the specified id could not be found, this API throws ResourceNotFoundException. - public let jobId: String - /// The ID of the table into which data was imported. If a table with the specified id could not be found, this API throws ResourceNotFoundException. - public let tableId: String - /// The ID of the workbook into which data was imported. If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. - public let workbookId: String - - public init(jobId: String, tableId: String, workbookId: String) { - self.jobId = jobId - self.tableId = tableId - self.workbookId = workbookId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.jobId, key: "jobId") - request.encodePath(self.tableId, key: "tableId") - request.encodePath(self.workbookId, key: "workbookId") - } - - public func validate(name: String) throws { - try self.validate(self.jobId, name: "jobId", parent: name, max: 100) - try self.validate(self.jobId, name: "jobId", parent: name, min: 1) - try self.validate(self.jobId, name: "jobId", parent: name, pattern: "^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$") - try self.validate(self.tableId, name: "tableId", parent: name, max: 36) - try self.validate(self.tableId, name: "tableId", parent: name, min: 36) - try self.validate(self.tableId, name: "tableId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct DescribeTableDataImportJobResult: AWSDecodableShape { - /// If job status is failed, error code to understand reason for the failure. - public let errorCode: ErrorCode? - /// The metadata about the job that was submitted for import. - public let jobMetadata: TableDataImportJobMetadata - /// The current status of the import job. - public let jobStatus: TableDataImportJobStatus - /// A message providing more details about the current status of the import job. - public let message: String - - public init(errorCode: ErrorCode? = nil, jobMetadata: TableDataImportJobMetadata, jobStatus: TableDataImportJobStatus, message: String) { - self.errorCode = errorCode - self.jobMetadata = jobMetadata - self.jobStatus = jobStatus - self.message = message - } - - private enum CodingKeys: String, CodingKey { - case errorCode = "errorCode" - case jobMetadata = "jobMetadata" - case jobStatus = "jobStatus" - case message = "message" - } - } - - public struct DestinationOptions: AWSEncodableShape & AWSDecodableShape { - /// A map of the column id to the import properties for each column. - public let columnMap: [String: SourceDataColumnProperties]? - - public init(columnMap: [String: SourceDataColumnProperties]? = nil) { - self.columnMap = columnMap - } - - public func validate(name: String) throws { - try self.columnMap?.forEach { - try validate($0.key, name: "columnMap.key", parent: name, max: 36) - try validate($0.key, name: "columnMap.key", parent: name, min: 36) - try validate($0.key, name: "columnMap.key", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try $0.value.validate(name: "\(name).columnMap[\"\($0.key)\"]") - } - try self.validate(self.columnMap, name: "columnMap", parent: name, max: 100) - } - - private enum CodingKeys: String, CodingKey { - case columnMap = "columnMap" - } - } - - public struct FailedBatchItem: AWSDecodableShape { - /// The error message that indicates why the batch item failed. - public let errorMessage: String - /// The id of the batch item that failed. This is the batch item id for the BatchCreateTableRows and BatchUpsertTableRows operations and the row id for the BatchUpdateTableRows and BatchDeleteTableRows operations. - public let id: String - - public init(errorMessage: String, id: String) { - self.errorMessage = errorMessage - self.id = id - } - - private enum CodingKeys: String, CodingKey { - case errorMessage = "errorMessage" - case id = "id" - } - } - - public struct Filter: AWSEncodableShape { - /// The optional contextRowId attribute can be used to specify the row id of the context row if the filter formula contains unqualified references to table columns and needs a context row to evaluate them successfully. - public let contextRowId: String? - /// A formula representing a filter function that returns zero or more matching rows from a table. Valid formulas in this field return a list of rows from a table. The most common ways of writing a formula to return a list of rows are to use the FindRow() or Filter() functions. Any other formula that returns zero or more rows is also acceptable. For example, you can use a formula that points to a cell that contains a filter function. - public let formula: String - - public init(contextRowId: String? = nil, formula: String) { - self.contextRowId = contextRowId - self.formula = formula - } - - public func validate(name: String) throws { - try self.validate(self.contextRowId, name: "contextRowId", parent: name, max: 77) - try self.validate(self.contextRowId, name: "contextRowId", parent: name, min: 77) - try self.validate(self.contextRowId, name: "contextRowId", parent: name, pattern: "^row:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\\/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.formula, name: "formula", parent: name, max: 8192) - try self.validate(self.formula, name: "formula", parent: name, pattern: "^=") - } - - private enum CodingKeys: String, CodingKey { - case contextRowId = "contextRowId" - case formula = "formula" - } - } - - public struct GetScreenDataRequest: AWSEncodableShape { - /// The ID of the app that contains the screen. - public let appId: String - /// The number of results to be returned on a single page. Specify a number between 1 and 100. The maximum value is 100. This parameter is optional. If you don't specify this parameter, the default page size is 100. - public let maxResults: Int? - /// This parameter is optional. If a nextToken is not specified, the API returns the first page of data. Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException. - public let nextToken: String? - /// The ID of the screen. - public let screenId: String - /// Variables are optional and are needed only if the screen requires them to render correctly. Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen. - public let variables: [String: VariableValue]? - /// The ID of the workbook that contains the screen. - public let workbookId: String - - public init(appId: String, maxResults: Int? = nil, nextToken: String? = nil, screenId: String, variables: [String: VariableValue]? = nil, workbookId: String) { - self.appId = appId - self.maxResults = maxResults - self.nextToken = nextToken - self.screenId = screenId - self.variables = variables - self.workbookId = workbookId - } - - public func validate(name: String) throws { - try self.validate(self.appId, name: "appId", parent: name, max: 36) - try self.validate(self.appId, name: "appId", parent: name, min: 36) - try self.validate(self.appId, name: "appId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?!\\s*$).+$") - try self.validate(self.screenId, name: "screenId", parent: name, max: 36) - try self.validate(self.screenId, name: "screenId", parent: name, min: 36) - try self.validate(self.screenId, name: "screenId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.variables?.forEach { - try validate($0.key, name: "variables.key", parent: name, max: 255) - try validate($0.key, name: "variables.key", parent: name, min: 1) - try validate($0.key, name: "variables.key", parent: name, pattern: "^(?!\\s*$).+$") - try $0.value.validate(name: "\(name).variables[\"\($0.key)\"]") - } - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: String, CodingKey { - case appId = "appId" - case maxResults = "maxResults" - case nextToken = "nextToken" - case screenId = "screenId" - case variables = "variables" - case workbookId = "workbookId" - } - } - - public struct GetScreenDataResult: AWSDecodableShape { - /// Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the query has been loaded. - public let nextToken: String? - /// A map of all the rows on the screen keyed by block name. - public let results: [String: ResultSet] - /// Indicates the cursor of the workbook at which the data returned by this workbook is read. Workbook cursor keeps increasing with every update and the increments are not sequential. - public let workbookCursor: Int64 - - public init(nextToken: String? = nil, results: [String: ResultSet], workbookCursor: Int64) { - self.nextToken = nextToken - self.results = results - self.workbookCursor = workbookCursor - } - - private enum CodingKeys: String, CodingKey { - case nextToken = "nextToken" - case results = "results" - case workbookCursor = "workbookCursor" - } - } - - public struct ImportDataSource: AWSEncodableShape & AWSDecodableShape { - /// The configuration parameters for the data source of the import - public let dataSourceConfig: ImportDataSourceConfig - - public init(dataSourceConfig: ImportDataSourceConfig) { - self.dataSourceConfig = dataSourceConfig - } - - public func validate(name: String) throws { - try self.dataSourceConfig.validate(name: "\(name).dataSourceConfig") - } - - private enum CodingKeys: String, CodingKey { - case dataSourceConfig = "dataSourceConfig" - } - } - - public struct ImportDataSourceConfig: AWSEncodableShape & AWSDecodableShape { - /// The URL from which source data will be downloaded for the import request. - public let dataSourceUrl: String? - - public init(dataSourceUrl: String? = nil) { - self.dataSourceUrl = dataSourceUrl - } - - public func validate(name: String) throws { - try self.validate(self.dataSourceUrl, name: "dataSourceUrl", parent: name, max: 8000) - try self.validate(self.dataSourceUrl, name: "dataSourceUrl", parent: name, min: 1) - try self.validate(self.dataSourceUrl, name: "dataSourceUrl", parent: name, pattern: "^https:\\/\\/[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$") - } - - private enum CodingKeys: String, CodingKey { - case dataSourceUrl = "dataSourceUrl" - } - } - - public struct ImportJobSubmitter: AWSDecodableShape { - /// The email id of the submitter of the import job, if available. - public let email: String? - /// The AWS user ARN of the submitter of the import job, if available. - public let userArn: String? - - public init(email: String? = nil, userArn: String? = nil) { - self.email = email - self.userArn = userArn - } - - private enum CodingKeys: String, CodingKey { - case email = "email" - case userArn = "userArn" - } - } - - public struct ImportOptions: AWSEncodableShape & AWSDecodableShape { - /// Options relating to parsing delimited text. Required if dataFormat is DELIMITED_TEXT. - public let delimitedTextOptions: DelimitedTextImportOptions? - /// Options relating to the destination of the import request. - public let destinationOptions: DestinationOptions? - - public init(delimitedTextOptions: DelimitedTextImportOptions? = nil, destinationOptions: DestinationOptions? = nil) { - self.delimitedTextOptions = delimitedTextOptions - self.destinationOptions = destinationOptions - } - - public func validate(name: String) throws { - try self.delimitedTextOptions?.validate(name: "\(name).delimitedTextOptions") - try self.destinationOptions?.validate(name: "\(name).destinationOptions") - } - - private enum CodingKeys: String, CodingKey { - case delimitedTextOptions = "delimitedTextOptions" - case destinationOptions = "destinationOptions" - } - } - - public struct InvokeScreenAutomationRequest: AWSEncodableShape { - /// The ID of the app that contains the screen automation. - public let appId: String - /// The request token for performing the automation action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will return the response of the previous call rather than performing the action again. Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days. - public let clientRequestToken: String? - /// The row ID for the automation if the automation is defined inside a block with source or list. - public let rowId: String? - /// The ID of the automation action to be performed. - public let screenAutomationId: String - /// The ID of the screen that contains the screen automation. - public let screenId: String - /// Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an object which currently has only one property, rawValue, which holds the value of the variable to be passed to the screen. Any variables defined in a screen are required to be passed in the call. - public let variables: [String: VariableValue]? - /// The ID of the workbook that contains the screen automation. - public let workbookId: String - - public init(appId: String, clientRequestToken: String? = nil, rowId: String? = nil, screenAutomationId: String, screenId: String, variables: [String: VariableValue]? = nil, workbookId: String) { - self.appId = appId - self.clientRequestToken = clientRequestToken - self.rowId = rowId - self.screenAutomationId = screenAutomationId - self.screenId = screenId - self.variables = variables - self.workbookId = workbookId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.appId, key: "appId") - try container.encodeIfPresent(self.clientRequestToken, forKey: .clientRequestToken) - try container.encodeIfPresent(self.rowId, forKey: .rowId) - request.encodePath(self.screenAutomationId, key: "screenAutomationId") - request.encodePath(self.screenId, key: "screenId") - try container.encodeIfPresent(self.variables, forKey: .variables) - request.encodePath(self.workbookId, key: "workbookId") - } - - public func validate(name: String) throws { - try self.validate(self.appId, name: "appId", parent: name, max: 36) - try self.validate(self.appId, name: "appId", parent: name, min: 36) - try self.validate(self.appId, name: "appId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 64) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 32) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^(?!\\s*$).+$") - try self.validate(self.rowId, name: "rowId", parent: name, max: 77) - try self.validate(self.rowId, name: "rowId", parent: name, min: 77) - try self.validate(self.rowId, name: "rowId", parent: name, pattern: "^row:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\\/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.screenAutomationId, name: "screenAutomationId", parent: name, max: 36) - try self.validate(self.screenAutomationId, name: "screenAutomationId", parent: name, min: 36) - try self.validate(self.screenAutomationId, name: "screenAutomationId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.screenId, name: "screenId", parent: name, max: 36) - try self.validate(self.screenId, name: "screenId", parent: name, min: 36) - try self.validate(self.screenId, name: "screenId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.variables?.forEach { - try validate($0.key, name: "variables.key", parent: name, max: 255) - try validate($0.key, name: "variables.key", parent: name, min: 1) - try validate($0.key, name: "variables.key", parent: name, pattern: "^(?!\\s*$).+$") - try $0.value.validate(name: "\(name).variables[\"\($0.key)\"]") - } - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: String, CodingKey { - case clientRequestToken = "clientRequestToken" - case rowId = "rowId" - case variables = "variables" - } - } - - public struct InvokeScreenAutomationResult: AWSDecodableShape { - /// The updated workbook cursor after performing the automation action. - public let workbookCursor: Int64 - - public init(workbookCursor: Int64) { - self.workbookCursor = workbookCursor - } - - private enum CodingKeys: String, CodingKey { - case workbookCursor = "workbookCursor" - } - } - - public struct ListTableColumnsRequest: AWSEncodableShape { - /// This parameter is optional. If a nextToken is not specified, the API returns the first page of data. Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException. - public let nextToken: String? - /// The ID of the table whose columns are being retrieved. If a table with the specified id could not be found, this API throws ResourceNotFoundException. - public let tableId: String - /// The ID of the workbook that contains the table whose columns are being retrieved. If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. - public let workbookId: String - - public init(nextToken: String? = nil, tableId: String, workbookId: String) { - self.nextToken = nextToken - self.tableId = tableId - self.workbookId = workbookId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodeQuery(self.nextToken, key: "nextToken") - request.encodePath(self.tableId, key: "tableId") - request.encodePath(self.workbookId, key: "workbookId") - } - - public func validate(name: String) throws { - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?!\\s*$).+$") - try self.validate(self.tableId, name: "tableId", parent: name, max: 36) - try self.validate(self.tableId, name: "tableId", parent: name, min: 36) - try self.validate(self.tableId, name: "tableId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListTableColumnsResult: AWSDecodableShape { - /// Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded. - public let nextToken: String? - /// The list of columns in the table. - public let tableColumns: [TableColumn] - /// Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential. - public let workbookCursor: Int64? - - public init(nextToken: String? = nil, tableColumns: [TableColumn], workbookCursor: Int64? = nil) { - self.nextToken = nextToken - self.tableColumns = tableColumns - self.workbookCursor = workbookCursor - } - - private enum CodingKeys: String, CodingKey { - case nextToken = "nextToken" - case tableColumns = "tableColumns" - case workbookCursor = "workbookCursor" - } - } - - public struct ListTableRowsRequest: AWSEncodableShape { - /// The maximum number of rows to return in each page of the results. - public let maxResults: Int? - /// This parameter is optional. If a nextToken is not specified, the API returns the first page of data. Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException. - public let nextToken: String? - /// This parameter is optional. If one or more row ids are specified in this list, then only the specified row ids are returned in the result. If no row ids are specified here, then all the rows in the table are returned. - public let rowIds: [String]? - /// The ID of the table whose rows are being retrieved. If a table with the specified id could not be found, this API throws ResourceNotFoundException. - public let tableId: String - /// The ID of the workbook that contains the table whose rows are being retrieved. If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. - public let workbookId: String - - public init(maxResults: Int? = nil, nextToken: String? = nil, rowIds: [String]? = nil, tableId: String, workbookId: String) { - self.maxResults = maxResults - self.nextToken = nextToken - self.rowIds = rowIds - self.tableId = tableId - self.workbookId = workbookId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encodeIfPresent(self.maxResults, forKey: .maxResults) - try container.encodeIfPresent(self.nextToken, forKey: .nextToken) - try container.encodeIfPresent(self.rowIds, forKey: .rowIds) - request.encodePath(self.tableId, key: "tableId") - request.encodePath(self.workbookId, key: "workbookId") - } - - public func validate(name: String) throws { - try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?!\\s*$).+$") - try self.rowIds?.forEach { - try validate($0, name: "rowIds[]", parent: name, max: 77) - try validate($0, name: "rowIds[]", parent: name, min: 77) - try validate($0, name: "rowIds[]", parent: name, pattern: "^row:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\\/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - try self.validate(self.rowIds, name: "rowIds", parent: name, max: 100) - try self.validate(self.rowIds, name: "rowIds", parent: name, min: 1) - try self.validate(self.tableId, name: "tableId", parent: name, max: 36) - try self.validate(self.tableId, name: "tableId", parent: name, min: 36) - try self.validate(self.tableId, name: "tableId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: String, CodingKey { - case maxResults = "maxResults" - case nextToken = "nextToken" - case rowIds = "rowIds" - } - } - - public struct ListTableRowsResult: AWSDecodableShape { - /// The list of columns in the table whose row data is returned in the result. - public let columnIds: [String] - /// Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded. - public let nextToken: String? - /// The list of row ids included in the request that were not found in the table. - public let rowIdsNotFound: [String]? - /// The list of rows in the table. Note that this result is paginated, so this list contains a maximum of 100 rows. - public let rows: [TableRow] - /// Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential. - public let workbookCursor: Int64 - - public init(columnIds: [String], nextToken: String? = nil, rowIdsNotFound: [String]? = nil, rows: [TableRow], workbookCursor: Int64) { - self.columnIds = columnIds - self.nextToken = nextToken - self.rowIdsNotFound = rowIdsNotFound - self.rows = rows - self.workbookCursor = workbookCursor - } - - private enum CodingKeys: String, CodingKey { - case columnIds = "columnIds" - case nextToken = "nextToken" - case rowIdsNotFound = "rowIdsNotFound" - case rows = "rows" - case workbookCursor = "workbookCursor" - } - } - - public struct ListTablesRequest: AWSEncodableShape { - /// The maximum number of tables to return in each page of the results. - public let maxResults: Int? - /// This parameter is optional. If a nextToken is not specified, the API returns the first page of data. Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException. - public let nextToken: String? - /// The ID of the workbook whose tables are being retrieved. If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. - public let workbookId: String - - public init(maxResults: Int? = nil, nextToken: String? = nil, workbookId: String) { - self.maxResults = maxResults - self.nextToken = nextToken - self.workbookId = workbookId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodeQuery(self.maxResults, key: "maxResults") - request.encodeQuery(self.nextToken, key: "nextToken") - request.encodePath(self.workbookId, key: "workbookId") - } - - public func validate(name: String) throws { - try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?!\\s*$).+$") - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListTablesResult: AWSDecodableShape { - /// Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded. - public let nextToken: String? - /// The list of tables in the workbook. - public let tables: [Table] - /// Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential. - public let workbookCursor: Int64? - - public init(nextToken: String? = nil, tables: [Table], workbookCursor: Int64? = nil) { - self.nextToken = nextToken - self.tables = tables - self.workbookCursor = workbookCursor - } - - private enum CodingKeys: String, CodingKey { - case nextToken = "nextToken" - case tables = "tables" - case workbookCursor = "workbookCursor" - } - } - - public struct ListTagsForResourceRequest: AWSEncodableShape { - /// The resource's Amazon Resource Name (ARN). - public let resourceArn: String - - public init(resourceArn: String) { - self.resourceArn = resourceArn - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "resourceArn") - } - - public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 256) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:honeycode:.+:[0-9]{12}:.+:.+$") - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListTagsForResourceResult: AWSDecodableShape { - /// The resource's tags. - public let tags: [String: String]? - - public init(tags: [String: String]? = nil) { - self.tags = tags - } - - private enum CodingKeys: String, CodingKey { - case tags = "tags" - } - } - - public struct QueryTableRowsRequest: AWSEncodableShape { - /// An object that represents a filter formula along with the id of the context row under which the filter function needs to evaluate. - public let filterFormula: Filter - /// The maximum number of rows to return in each page of the results. - public let maxResults: Int? - /// This parameter is optional. If a nextToken is not specified, the API returns the first page of data. Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API will throw ValidationException. - public let nextToken: String? - /// The ID of the table whose rows are being queried. If a table with the specified id could not be found, this API throws ResourceNotFoundException. - public let tableId: String - /// The ID of the workbook whose table rows are being queried. If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. - public let workbookId: String - - public init(filterFormula: Filter, maxResults: Int? = nil, nextToken: String? = nil, tableId: String, workbookId: String) { - self.filterFormula = filterFormula - self.maxResults = maxResults - self.nextToken = nextToken - self.tableId = tableId - self.workbookId = workbookId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encode(self.filterFormula, forKey: .filterFormula) - try container.encodeIfPresent(self.maxResults, forKey: .maxResults) - try container.encodeIfPresent(self.nextToken, forKey: .nextToken) - request.encodePath(self.tableId, key: "tableId") - request.encodePath(self.workbookId, key: "workbookId") - } - - public func validate(name: String) throws { - try self.filterFormula.validate(name: "\(name).filterFormula") - try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?!\\s*$).+$") - try self.validate(self.tableId, name: "tableId", parent: name, max: 36) - try self.validate(self.tableId, name: "tableId", parent: name, min: 36) - try self.validate(self.tableId, name: "tableId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: String, CodingKey { - case filterFormula = "filterFormula" - case maxResults = "maxResults" - case nextToken = "nextToken" - } - } - - public struct QueryTableRowsResult: AWSDecodableShape { - /// The list of columns in the table whose row data is returned in the result. - public let columnIds: [String] - /// Provides the pagination token to load the next page if there are more results matching the request. If a pagination token is not present in the response, it means that all data matching the request has been loaded. - public let nextToken: String? - /// The list of rows in the table that match the query filter. - public let rows: [TableRow] - /// Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor keeps increasing with every update and the increments are not sequential. - public let workbookCursor: Int64 - - public init(columnIds: [String], nextToken: String? = nil, rows: [TableRow], workbookCursor: Int64) { - self.columnIds = columnIds - self.nextToken = nextToken - self.rows = rows - self.workbookCursor = workbookCursor - } - - private enum CodingKeys: String, CodingKey { - case columnIds = "columnIds" - case nextToken = "nextToken" - case rows = "rows" - case workbookCursor = "workbookCursor" - } - } - - public struct ResultRow: AWSDecodableShape { - /// List of all the data cells in a row. - public let dataItems: [DataItem] - /// The ID for a particular row. - public let rowId: String? - - public init(dataItems: [DataItem], rowId: String? = nil) { - self.dataItems = dataItems - self.rowId = rowId - } - - private enum CodingKeys: String, CodingKey { - case dataItems = "dataItems" - case rowId = "rowId" - } - } - - public struct ResultSet: AWSDecodableShape { - /// List of headers for all the data cells in the block. The header identifies the name and default format of the data cell. Data cells appear in the same order in all rows as defined in the header. The names and formats are not repeated in the rows. If a particular row does not have a value for a data cell, a blank value is used. For example, a task list that displays the task name, due date and assigned person might have headers [ { "name": "Task Name"}, {"name": "Due Date", "format": "DATE"}, {"name": "Assigned", "format": "CONTACT"} ]. Every row in the result will have the task name as the first item, due date as the second item and assigned person as the third item. If a particular task does not have a due date, that row will still have a blank value in the second element and the assigned person will still be in the third element. - public let headers: [ColumnMetadata] - /// List of rows returned by the request. Each row has a row Id and a list of data cells in that row. The data cells will be present in the same order as they are defined in the header. - public let rows: [ResultRow] - - public init(headers: [ColumnMetadata], rows: [ResultRow]) { - self.headers = headers - self.rows = rows - } - - private enum CodingKeys: String, CodingKey { - case headers = "headers" - case rows = "rows" - } - } - - public struct SourceDataColumnProperties: AWSEncodableShape & AWSDecodableShape { - /// The index of the column in the input file. - public let columnIndex: Int? - - public init(columnIndex: Int? = nil) { - self.columnIndex = columnIndex - } - - public func validate(name: String) throws { - try self.validate(self.columnIndex, name: "columnIndex", parent: name, min: 1) - } - - private enum CodingKeys: String, CodingKey { - case columnIndex = "columnIndex" - } - } - - public struct StartTableDataImportJobRequest: AWSEncodableShape { - /// The request token for performing the update action. Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error like a failed network connection, you can retry the call with the same request token. The service ensures that if the first call using that request token is successfully performed, the second call will not perform the action again. Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests spanning hours or days. - public let clientRequestToken: String - /// The format of the data that is being imported. Currently the only option supported is "DELIMITED_TEXT". - public let dataFormat: ImportSourceDataFormat - /// The source of the data that is being imported. The size of source must be no larger than 100 MB. Source must have no more than 100,000 cells and no more than 1,000 rows. - public let dataSource: ImportDataSource - /// The ID of the table where the rows are being imported. If a table with the specified id could not be found, this API throws ResourceNotFoundException. - public let destinationTableId: String - /// The options for customizing this import request. - public let importOptions: ImportOptions - /// The ID of the workbook where the rows are being imported. If a workbook with the specified id could not be found, this API throws ResourceNotFoundException. - public let workbookId: String - - public init(clientRequestToken: String, dataFormat: ImportSourceDataFormat, dataSource: ImportDataSource, destinationTableId: String, importOptions: ImportOptions, workbookId: String) { - self.clientRequestToken = clientRequestToken - self.dataFormat = dataFormat - self.dataSource = dataSource - self.destinationTableId = destinationTableId - self.importOptions = importOptions - self.workbookId = workbookId - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - try container.encode(self.clientRequestToken, forKey: .clientRequestToken) - try container.encode(self.dataFormat, forKey: .dataFormat) - try container.encode(self.dataSource, forKey: .dataSource) - request.encodePath(self.destinationTableId, key: "destinationTableId") - try container.encode(self.importOptions, forKey: .importOptions) - request.encodePath(self.workbookId, key: "workbookId") - } - - public func validate(name: String) throws { - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 64) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 32) - try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, pattern: "^(?!\\s*$).+$") - try self.dataSource.validate(name: "\(name).dataSource") - try self.validate(self.destinationTableId, name: "destinationTableId", parent: name, max: 36) - try self.validate(self.destinationTableId, name: "destinationTableId", parent: name, min: 36) - try self.validate(self.destinationTableId, name: "destinationTableId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try self.importOptions.validate(name: "\(name).importOptions") - try self.validate(self.workbookId, name: "workbookId", parent: name, max: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, min: 36) - try self.validate(self.workbookId, name: "workbookId", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: String, CodingKey { - case clientRequestToken = "clientRequestToken" - case dataFormat = "dataFormat" - case dataSource = "dataSource" - case importOptions = "importOptions" - } - } - - public struct StartTableDataImportJobResult: AWSDecodableShape { - /// The id that is assigned to this import job. Future requests to find out the status of this import job need to send this id in the appropriate parameter in the request. - public let jobId: String - /// The status of the import job immediately after submitting the request. - public let jobStatus: TableDataImportJobStatus - - public init(jobId: String, jobStatus: TableDataImportJobStatus) { - self.jobId = jobId - self.jobStatus = jobStatus - } - - private enum CodingKeys: String, CodingKey { - case jobId = "jobId" - case jobStatus = "jobStatus" - } - } - - public struct Table: AWSDecodableShape { - /// The id of the table. - public let tableId: String? - /// The name of the table. - public let tableName: String? - - public init(tableId: String? = nil, tableName: String? = nil) { - self.tableId = tableId - self.tableName = tableName - } - - private enum CodingKeys: String, CodingKey { - case tableId = "tableId" - case tableName = "tableName" - } - } - - public struct TableColumn: AWSDecodableShape { - /// The column level format that is applied in the table. An empty value in this field means that the column format is the default value 'AUTO'. - public let format: Format? - /// The id of the column in the table. - public let tableColumnId: String? - /// The name of the column in the table. - public let tableColumnName: String? - - public init(format: Format? = nil, tableColumnId: String? = nil, tableColumnName: String? = nil) { - self.format = format - self.tableColumnId = tableColumnId - self.tableColumnName = tableColumnName - } - - private enum CodingKeys: String, CodingKey { - case format = "format" - case tableColumnId = "tableColumnId" - case tableColumnName = "tableColumnName" - } - } - - public struct TableDataImportJobMetadata: AWSDecodableShape { - /// The source of the data that was submitted for import. - public let dataSource: ImportDataSource - /// The options that was specified at the time of submitting the import request. - public let importOptions: ImportOptions - /// Details about the submitter of the import request. - public let submitter: ImportJobSubmitter - /// The timestamp when the job was submitted for import. - public let submitTime: Date - - public init(dataSource: ImportDataSource, importOptions: ImportOptions, submitter: ImportJobSubmitter, submitTime: Date) { - self.dataSource = dataSource - self.importOptions = importOptions - self.submitter = submitter - self.submitTime = submitTime - } - - private enum CodingKeys: String, CodingKey { - case dataSource = "dataSource" - case importOptions = "importOptions" - case submitter = "submitter" - case submitTime = "submitTime" - } - } - - public struct TableRow: AWSDecodableShape { - /// A list of cells in the table row. The cells appear in the same order as the columns of the table. - public let cells: [Cell] - /// The id of the row in the table. - public let rowId: String - - public init(cells: [Cell], rowId: String) { - self.cells = cells - self.rowId = rowId - } - - private enum CodingKeys: String, CodingKey { - case cells = "cells" - case rowId = "rowId" - } - } - - public struct TagResourceRequest: AWSEncodableShape { - /// The resource's Amazon Resource Name (ARN). - public let resourceArn: String - /// A list of tags to apply to the resource. - public let tags: [String: String] - - public init(resourceArn: String, tags: [String: String]) { - self.resourceArn = resourceArn - self.tags = tags - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "resourceArn") - try container.encode(self.tags, forKey: .tags) - } - - public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 256) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:honeycode:.+:[0-9]{12}:.+:.+$") - try self.tags.forEach { - try validate($0.key, name: "tags.key", parent: name, max: 100) - try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$") - try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 100) - try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, min: 1) - try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$") - } - try self.validate(self.tags, name: "tags", parent: name, max: 100) - } - - private enum CodingKeys: String, CodingKey { - case tags = "tags" - } - } - - public struct TagResourceResult: AWSDecodableShape { - public init() {} - } - - public struct UntagResourceRequest: AWSEncodableShape { - /// The resource's Amazon Resource Name (ARN). - public let resourceArn: String - /// A list of tag keys to remove from the resource. - public let tagKeys: [String] - - public init(resourceArn: String, tagKeys: [String]) { - self.resourceArn = resourceArn - self.tagKeys = tagKeys - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "resourceArn") - request.encodeQuery(self.tagKeys, key: "tagKeys") - } - - public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 256) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:honeycode:.+:[0-9]{12}:.+:.+$") - try self.tagKeys.forEach { - try validate($0, name: "tagKeys[]", parent: name, max: 100) - try validate($0, name: "tagKeys[]", parent: name, min: 1) - try validate($0, name: "tagKeys[]", parent: name, pattern: "^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$") - } - try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 100) - } - - private enum CodingKeys: CodingKey {} - } - - public struct UntagResourceResult: AWSDecodableShape { - public init() {} - } - - public struct UpdateRowData: AWSEncodableShape { - /// A map representing the cells to update in the given row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell. - public let cellsToUpdate: [String: CellInput] - /// The id of the row that needs to be updated. - public let rowId: String - - public init(cellsToUpdate: [String: CellInput], rowId: String) { - self.cellsToUpdate = cellsToUpdate - self.rowId = rowId - } - - public func validate(name: String) throws { - try self.cellsToUpdate.forEach { - try validate($0.key, name: "cellsToUpdate.key", parent: name, max: 36) - try validate($0.key, name: "cellsToUpdate.key", parent: name, min: 36) - try validate($0.key, name: "cellsToUpdate.key", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try $0.value.validate(name: "\(name).cellsToUpdate[\"\($0.key)\"]") - } - try self.validate(self.cellsToUpdate, name: "cellsToUpdate", parent: name, max: 100) - try self.validate(self.cellsToUpdate, name: "cellsToUpdate", parent: name, min: 1) - try self.validate(self.rowId, name: "rowId", parent: name, max: 77) - try self.validate(self.rowId, name: "rowId", parent: name, min: 77) - try self.validate(self.rowId, name: "rowId", parent: name, pattern: "^row:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\\/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - } - - private enum CodingKeys: String, CodingKey { - case cellsToUpdate = "cellsToUpdate" - case rowId = "rowId" - } - } - - public struct UpsertRowData: AWSEncodableShape { - /// An external identifier that represents a single item in the request that is being upserted as part of the BatchUpsertTableRows request. This can be any string that you can use to identify the item in the request. The BatchUpsertTableRows API puts the batch item id in the results to allow you to link data in the request to data in the results. - public let batchItemId: String - /// A map representing the cells to update for the matching rows or an appended row. The key is the column id of the cell and the value is the CellInput object that represents the data to set in that cell. - public let cellsToUpdate: [String: CellInput] - /// The filter formula to use to find existing matching rows to update. The formula needs to return zero or more rows. If the formula returns 0 rows, then a new row will be appended in the target table. If the formula returns one or more rows, then the returned rows will be updated. Note that the filter formula needs to return rows from the target table for the upsert operation to succeed. If the filter formula has a syntax error or it doesn't evaluate to zero or more rows in the target table for any one item in the input list, then the entire BatchUpsertTableRows request fails and no updates are made to the table. - public let filter: Filter - - public init(batchItemId: String, cellsToUpdate: [String: CellInput], filter: Filter) { - self.batchItemId = batchItemId - self.cellsToUpdate = cellsToUpdate - self.filter = filter - } - - public func validate(name: String) throws { - try self.validate(self.batchItemId, name: "batchItemId", parent: name, max: 64) - try self.validate(self.batchItemId, name: "batchItemId", parent: name, min: 1) - try self.validate(self.batchItemId, name: "batchItemId", parent: name, pattern: "^(?!\\s*$).+$") - try self.cellsToUpdate.forEach { - try validate($0.key, name: "cellsToUpdate.key", parent: name, max: 36) - try validate($0.key, name: "cellsToUpdate.key", parent: name, min: 36) - try validate($0.key, name: "cellsToUpdate.key", parent: name, pattern: "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - try $0.value.validate(name: "\(name).cellsToUpdate[\"\($0.key)\"]") - } - try self.validate(self.cellsToUpdate, name: "cellsToUpdate", parent: name, max: 100) - try self.validate(self.cellsToUpdate, name: "cellsToUpdate", parent: name, min: 1) - try self.filter.validate(name: "\(name).filter") - } - - private enum CodingKeys: String, CodingKey { - case batchItemId = "batchItemId" - case cellsToUpdate = "cellsToUpdate" - case filter = "filter" - } - } - - public struct UpsertRowsResult: AWSDecodableShape { - /// The list of row ids that were changed as part of an upsert row operation. If the upsert resulted in an update, this list could potentially contain multiple rows that matched the filter and hence got updated. If the upsert resulted in an append, this list would only have the single row that was appended. - public let rowIds: [String] - /// The result of the upsert action. - public let upsertAction: UpsertAction - - public init(rowIds: [String], upsertAction: UpsertAction) { - self.rowIds = rowIds - self.upsertAction = upsertAction - } - - private enum CodingKeys: String, CodingKey { - case rowIds = "rowIds" - case upsertAction = "upsertAction" - } - } - - public struct VariableValue: AWSEncodableShape { - /// Raw value of the variable. - public let rawValue: String - - public init(rawValue: String) { - self.rawValue = rawValue - } - - public func validate(name: String) throws { - try self.validate(self.rawValue, name: "rawValue", parent: name, max: 32767) - try self.validate(self.rawValue, name: "rawValue", parent: name, pattern: "^[\\s\\S]*$") - } - - private enum CodingKeys: String, CodingKey { - case rawValue = "rawValue" - } - } -} - -// MARK: - Errors - -/// Error enum for Honeycode -public struct HoneycodeErrorType: AWSErrorType { - enum Code: String { - case accessDeniedException = "AccessDeniedException" - case automationExecutionException = "AutomationExecutionException" - case automationExecutionTimeoutException = "AutomationExecutionTimeoutException" - case internalServerException = "InternalServerException" - case requestTimeoutException = "RequestTimeoutException" - case resourceNotFoundException = "ResourceNotFoundException" - case serviceQuotaExceededException = "ServiceQuotaExceededException" - case serviceUnavailableException = "ServiceUnavailableException" - case throttlingException = "ThrottlingException" - case validationException = "ValidationException" - } - - private let error: Code - public let context: AWSErrorContext? - - /// initialize Honeycode - public init?(errorCode: String, context: AWSErrorContext) { - guard let error = Code(rawValue: errorCode) else { return nil } - self.error = error - self.context = context - } - - internal init(_ error: Code) { - self.error = error - self.context = nil - } - - /// return error code string - public var errorCode: String { self.error.rawValue } - - /// You do not have sufficient access to perform this action. Check that the workbook is owned by you and your IAM policy allows access to the resource in the request. - public static var accessDeniedException: Self { .init(.accessDeniedException) } - /// The automation execution did not end successfully. - public static var automationExecutionException: Self { .init(.automationExecutionException) } - /// The automation execution timed out. - public static var automationExecutionTimeoutException: Self { .init(.automationExecutionTimeoutException) } - /// There were unexpected errors from the server. - public static var internalServerException: Self { .init(.internalServerException) } - /// The request timed out. - public static var requestTimeoutException: Self { .init(.requestTimeoutException) } - /// A Workbook, Table, App, Screen or Screen Automation was not found with the given ID. - public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } - /// The request caused service quota to be breached. - public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } - /// Remote service is unreachable. - public static var serviceUnavailableException: Self { .init(.serviceUnavailableException) } - /// Tps(transactions per second) rate reached. - public static var throttlingException: Self { .init(.throttlingException) } - /// Request is invalid. The message in the response contains details on why the request is invalid. - public static var validationException: Self { .init(.validationException) } -} - -extension HoneycodeErrorType: Equatable { - public static func == (lhs: HoneycodeErrorType, rhs: HoneycodeErrorType) -> Bool { - lhs.error == rhs.error - } -} - -extension HoneycodeErrorType: CustomStringConvertible { - public var description: String { - return "\(self.error.rawValue): \(self.message ?? "")" - } -} diff --git a/Sources/Soto/Services/IVSRealTime/IVSRealTime_api.swift b/Sources/Soto/Services/IVSRealTime/IVSRealTime_api.swift index 5084d48ce4..4de9f4e727 100644 --- a/Sources/Soto/Services/IVSRealTime/IVSRealTime_api.swift +++ b/Sources/Soto/Services/IVSRealTime/IVSRealTime_api.swift @@ -19,11 +19,9 @@ /// Service object for interacting with AWS IVSRealTime service. /// -/// Introduction The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP +/// The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP /// API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, -/// including errors. Terminology: A stage is a virtual space where participants can exchange video in real time. A participant token is a token that authenticates a participant when they join a stage. A participant object represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants. Server-side composition: The composition process composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition endpoints support this process. Server-side composition: A composition controls the look of the outputs, including how participants are positioned in the video. Resources The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS Real-Time Streaming): Stage — A stage is a virtual space where participants can exchange video in real time. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS stages has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS real-time API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage. At most 50 tags can be applied to a resource. Stages Endpoints CreateParticipantToken — Creates an additional token for a specified stage. This can be done after stage creation or when tokens expire. CreateStage — Creates a new stage (and optionally participant tokens). DeleteStage — Shuts down and deletes the specified stage (disconnecting all participants). DisconnectParticipant — Disconnects a specified participant and revokes the participant permanently from a specified stage. GetParticipant — Gets information about the specified participant token. GetStage — Gets information for the specified stage. GetStageSession — Gets information for the specified stage session. ListParticipantEvents — Lists events for a specified participant that occurred during a specified stage session. ListParticipants — Lists all participants in a specified stage session. ListStages — Gets summary information about all stages in your account, in the AWS region where the API request is processed. ListStageSessions — Gets all sessions for a specified stage. UpdateStage — Updates a stage’s configuration. Composition Endpoints GetComposition — Gets information about the specified Composition resource. ListCompositions — Gets summary information about all Compositions in your account, in the AWS region where the API request is processed. StartComposition — Starts a Composition from a stage based on the configuration provided in the request. StopComposition — Stops and deletes a Composition resource. Any broadcast from the Composition resource is stopped. EncoderConfiguration Endpoints CreateEncoderConfiguration — Creates an EncoderConfiguration object. DeleteEncoderConfiguration — Deletes an EncoderConfiguration resource. Ensures that no Compositions are using this template; otherwise, returns an error. GetEncoderConfiguration — Gets information about the specified EncoderConfiguration resource. ListEncoderConfigurations — Gets summary information about all EncoderConfigurations in your account, in the AWS region where the API request is processed. StorageConfiguration Endpoints CreateStorageConfiguration — Creates a new storage configuration, used to enable -/// recording to Amazon S3. DeleteStorageConfiguration — Deletes the storage configuration for the specified ARN. GetStorageConfiguration — Gets the storage configuration for the specified ARN. ListStorageConfigurations — Gets summary information about all storage configurations in your -/// account, in the AWS region where the API request is processed. Tags Endpoints ListTagsForResource — Gets information about AWS tags for the specified ARN. TagResource — Adds or updates tags for the AWS resource with the specified ARN. UntagResource — Removes tags from the resource with the specified ARN. +/// including errors. Key Concepts Stage — A virtual space where participants can exchange video in real time. Participant token — A token that authenticates a participant when they join a stage. Participant object — Represents participants (people) in the stage and contains information about them. When a token is created, it includes a participant ID; when a participant uses that token to join a stage, the participant is associated with that participant ID. There is a 1:1 mapping between participant tokens and participants. For server-side composition: Composition process — Composites participants of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels). Composition endpoints support this process. Composition — Controls the look of the outputs, including how participants are positioned in the video. For more information about your IVS live stream, also see Getting Started with Amazon IVS Real-Time Streaming. Tagging A tag is a metadata label that you assign to an AWS resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging AWS Resources for more information, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS stages has no service-specific constraints beyond what is documented there. Tags can help you identify and organize your AWS resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags). The Amazon IVS real-time API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resource supports tagging: Stage. At most 50 tags can be applied to a resource. public struct IVSRealTime: AWSService { // MARK: Member variables @@ -145,6 +143,20 @@ public struct IVSRealTime: AWSService { ) } + /// Deletes the specified public key used to sign stage participant tokens. + /// This invalidates future participant tokens generated using the key pair’s private key. + @Sendable + public func deletePublicKey(_ input: DeletePublicKeyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeletePublicKeyResponse { + return try await self.client.execute( + operation: "DeletePublicKey", + path: "/DeletePublicKey", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Shuts down and deletes the specified stage (disconnecting all participants). @Sendable public func deleteStage(_ input: DeleteStageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteStageResponse { @@ -225,6 +237,19 @@ public struct IVSRealTime: AWSService { ) } + /// Gets information for the specified public key. + @Sendable + public func getPublicKey(_ input: GetPublicKeyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetPublicKeyResponse { + return try await self.client.execute( + operation: "GetPublicKey", + path: "/GetPublicKey", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets information for the specified stage. @Sendable public func getStage(_ input: GetStageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetStageResponse { @@ -264,6 +289,19 @@ public struct IVSRealTime: AWSService { ) } + /// Import a public key to be used for signing stage participant tokens. + @Sendable + public func importPublicKey(_ input: ImportPublicKeyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ImportPublicKeyResponse { + return try await self.client.execute( + operation: "ImportPublicKey", + path: "/ImportPublicKey", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets summary information about all Compositions in your account, in the AWS region where the API request is processed. @Sendable public func listCompositions(_ input: ListCompositionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListCompositionsResponse { @@ -316,6 +354,19 @@ public struct IVSRealTime: AWSService { ) } + /// Gets summary information about all public keys in your account, in the AWS region where the API request is processed. + @Sendable + public func listPublicKeys(_ input: ListPublicKeysRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListPublicKeysResponse { + return try await self.client.execute( + operation: "ListPublicKeys", + path: "/ListPublicKeys", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets all sessions for a specified stage. @Sendable public func listStageSessions(_ input: ListStageSessionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListStageSessionsResponse { @@ -524,6 +575,25 @@ extension IVSRealTime { ) } + /// Gets summary information about all public keys in your account, in the AWS region where the API request is processed. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listPublicKeysPaginator( + _ input: ListPublicKeysRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listPublicKeys, + inputKey: \ListPublicKeysRequest.nextToken, + outputKey: \ListPublicKeysResponse.nextToken, + logger: logger + ) + } + /// Gets all sessions for a specified stage. /// Return PaginatorSequence for operation. /// @@ -619,6 +689,7 @@ extension IVSRealTime.ListParticipantsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> IVSRealTime.ListParticipantsRequest { return .init( filterByPublished: self.filterByPublished, + filterByRecordingState: self.filterByRecordingState, filterByState: self.filterByState, filterByUserId: self.filterByUserId, maxResults: self.maxResults, @@ -629,6 +700,15 @@ extension IVSRealTime.ListParticipantsRequest: AWSPaginateToken { } } +extension IVSRealTime.ListPublicKeysRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> IVSRealTime.ListPublicKeysRequest { + return .init( + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension IVSRealTime.ListStageSessionsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> IVSRealTime.ListStageSessionsRequest { return .init( diff --git a/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift b/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift index 236aa3c72c..830b7ce2e7 100644 --- a/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift +++ b/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift @@ -65,6 +65,31 @@ extension IVSRealTime { public var description: String { return self.rawValue } } + public enum ParticipantRecordingFilterByRecordingState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case failed = "FAILED" + case starting = "STARTING" + case stopped = "STOPPED" + case stopping = "STOPPING" + public var description: String { return self.rawValue } + } + + public enum ParticipantRecordingMediaType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case audioOnly = "AUDIO_ONLY" + case audioVideo = "AUDIO_VIDEO" + public var description: String { return self.rawValue } + } + + public enum ParticipantRecordingState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case disabled = "DISABLED" + case failed = "FAILED" + case starting = "STARTING" + case stopped = "STOPPED" + case stopping = "STOPPING" + public var description: String { return self.rawValue } + } + public enum ParticipantState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case connected = "CONNECTED" case disconnected = "DISCONNECTED" @@ -113,6 +138,29 @@ extension IVSRealTime { // MARK: Shapes + public struct AutoParticipantRecordingConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Types of media to be recorded. Default: AUDIO_VIDEO. + public let mediaTypes: [ParticipantRecordingMediaType]? + /// ARN of the StorageConfiguration resource to use for individual participant recording. Default: "" (empty string, no storage configuration is specified). Individual participant recording cannot be started unless a storage configuration is specified, when a Stage is created or updated. + public let storageConfigurationArn: String + + public init(mediaTypes: [ParticipantRecordingMediaType]? = nil, storageConfigurationArn: String) { + self.mediaTypes = mediaTypes + self.storageConfigurationArn = storageConfigurationArn + } + + public func validate(name: String) throws { + try self.validate(self.mediaTypes, name: "mediaTypes", parent: name, max: 1) + try self.validate(self.storageConfigurationArn, name: "storageConfigurationArn", parent: name, max: 128) + try self.validate(self.storageConfigurationArn, name: "storageConfigurationArn", parent: name, pattern: "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:storage-configuration/[a-zA-Z0-9-]+$$") + } + + private enum CodingKeys: String, CodingKey { + case mediaTypes = "mediaTypes" + case storageConfigurationArn = "storageConfigurationArn" + } + } + public struct ChannelDestinationConfiguration: AWSEncodableShape & AWSDecodableShape { /// ARN of the channel to use for broadcasting. The channel and stage resources must be in the same AWS account and region. The channel must be offline (not broadcasting). public let channelArn: String @@ -321,6 +369,8 @@ extension IVSRealTime { } public struct CreateStageRequest: AWSEncodableShape { + /// Configuration object for individual participant recording, to attach to the new stage. + public let autoParticipantRecordingConfiguration: AutoParticipantRecordingConfiguration? /// Optional name that can be specified for the stage being created. public let name: String? /// Array of participant token configuration objects to attach to the new stage. @@ -328,13 +378,15 @@ extension IVSRealTime { /// Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there. public let tags: [String: String]? - public init(name: String? = nil, participantTokenConfigurations: [ParticipantTokenConfiguration]? = nil, tags: [String: String]? = nil) { + public init(autoParticipantRecordingConfiguration: AutoParticipantRecordingConfiguration? = nil, name: String? = nil, participantTokenConfigurations: [ParticipantTokenConfiguration]? = nil, tags: [String: String]? = nil) { + self.autoParticipantRecordingConfiguration = autoParticipantRecordingConfiguration self.name = name self.participantTokenConfigurations = participantTokenConfigurations self.tags = tags } public func validate(name: String) throws { + try self.autoParticipantRecordingConfiguration?.validate(name: "\(name).autoParticipantRecordingConfiguration") try self.validate(self.name, name: "name", parent: name, max: 128) try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9-_]*$") try self.participantTokenConfigurations?.forEach { @@ -350,6 +402,7 @@ extension IVSRealTime { } private enum CodingKeys: String, CodingKey { + case autoParticipantRecordingConfiguration = "autoParticipantRecordingConfiguration" case name = "name" case participantTokenConfigurations = "participantTokenConfigurations" case tags = "tags" @@ -442,6 +495,29 @@ extension IVSRealTime { public init() {} } + public struct DeletePublicKeyRequest: AWSEncodableShape { + /// ARN of the public key to be deleted. + public let arn: String + + public init(arn: String) { + self.arn = arn + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, max: 128) + try self.validate(self.arn, name: "arn", parent: name, min: 1) + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws:ivs:[a-z0-9-]+:[0-9]+:public-key/[a-zA-Z0-9-]+$") + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + } + } + + public struct DeletePublicKeyResponse: AWSDecodableShape { + public init() {} + } + public struct DeleteStageRequest: AWSEncodableShape { /// ARN of the stage to be deleted. public let arn: String @@ -810,6 +886,38 @@ extension IVSRealTime { } } + public struct GetPublicKeyRequest: AWSEncodableShape { + /// ARN of the public key for which the information is to be retrieved. + public let arn: String + + public init(arn: String) { + self.arn = arn + } + + public func validate(name: String) throws { + try self.validate(self.arn, name: "arn", parent: name, max: 128) + try self.validate(self.arn, name: "arn", parent: name, min: 1) + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws:ivs:[a-z0-9-]+:[0-9]+:public-key/[a-zA-Z0-9-]+$") + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + } + } + + public struct GetPublicKeyResponse: AWSDecodableShape { + /// The public key that is returned. + public let publicKey: PublicKey? + + public init(publicKey: PublicKey? = nil) { + self.publicKey = publicKey + } + + private enum CodingKeys: String, CodingKey { + case publicKey = "publicKey" + } + } + public struct GetStageRequest: AWSEncodableShape { /// ARN of the stage for which the information is to be retrieved. public let arn: String @@ -914,17 +1022,15 @@ extension IVSRealTime { } public struct GridConfiguration: AWSEncodableShape & AWSDecodableShape { - /// This attribute name identifies the featured slot. A participant with this attribute set to "true" (as a string value) in ParticipantTokenConfiguration is placed in the featured slot. + /// This attribute name identifies the featured slot. A participant with this attribute set to "true" (as a string value) in ParticipantTokenConfiguration is placed in the featured slot. Default: "" (no featured participant). public let featuredParticipantAttribute: String? /// Specifies the spacing between participant tiles in pixels. Default: 2. public let gridGap: Int? /// Determines whether to omit participants with stopped video in the composition. Default: false. public let omitStoppedVideo: Bool? - /// Sets the non-featured participant display mode. Default: VIDEO. + /// Sets the non-featured participant display mode, to control the aspect ratio of video tiles. VIDEO is 16:9, SQUARE is 1:1, and PORTRAIT is 3:4. Default: VIDEO. public let videoAspectRatio: VideoAspectRatio? - /// Defines how video fits within the participant tile. When not set, - /// videoFillMode defaults to COVER fill mode for participants in the grid - /// and to CONTAIN fill mode for featured participants. + /// Defines how video content fits within the participant tile: FILL (stretched), COVER (cropped), or CONTAIN (letterboxed). When not set, videoFillMode defaults to COVER fill mode for participants in the grid and to CONTAIN fill mode for featured participants. public let videoFillMode: VideoFillMode? public init(featuredParticipantAttribute: String? = nil, gridGap: Int? = nil, omitStoppedVideo: Bool? = nil, videoAspectRatio: VideoAspectRatio? = nil, videoFillMode: VideoFillMode? = nil) { @@ -950,6 +1056,52 @@ extension IVSRealTime { } } + public struct ImportPublicKeyRequest: AWSEncodableShape { + /// Name of the public key to be imported. + public let name: String? + /// The content of the public key to be imported. + public let publicKeyMaterial: String + /// Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there. + public let tags: [String: String]? + + public init(name: String? = nil, publicKeyMaterial: String, tags: [String: String]? = nil) { + self.name = name + self.publicKeyMaterial = publicKeyMaterial + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 128) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9-_]*$") + try self.validate(self.publicKeyMaterial, name: "publicKeyMaterial", parent: name, pattern: "-----BEGIN PUBLIC KEY-----\\r?\\n([a-zA-Z0-9+/=\\r\\n]+)\\r?\\n-----END PUBLIC KEY-----(\\r?\\n)?") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 50) + } + + private enum CodingKeys: String, CodingKey { + case name = "name" + case publicKeyMaterial = "publicKeyMaterial" + case tags = "tags" + } + } + + public struct ImportPublicKeyResponse: AWSDecodableShape { + /// The public key that was imported. + public let publicKey: PublicKey? + + public init(publicKey: PublicKey? = nil) { + self.publicKey = publicKey + } + + private enum CodingKeys: String, CodingKey { + case publicKey = "publicKey" + } + } + public struct LayoutConfiguration: AWSEncodableShape & AWSDecodableShape { /// Configuration related to grid layout. Default: Grid layout. public let grid: GridConfiguration? @@ -1130,11 +1282,14 @@ extension IVSRealTime { } public struct ListParticipantsRequest: AWSEncodableShape { - /// Filters the response list to only show participants who published during the stage session. Only one of filterByUserId, filterByPublished, or filterByState can be provided per request. + /// Filters the response list to only show participants who published during the stage session. Only one of filterByUserId, filterByPublished, filterByState, or filterByRecordingState can be provided per request. public let filterByPublished: Bool? - /// Filters the response list to only show participants in the specified state. Only one of filterByUserId, filterByPublished, or filterByState can be provided per request. + /// Filters the response list to only show participants with the specified recording state. Only one of filterByUserId, filterByPublished, filterByState, or filterByRecordingState can be provided per request. + public let filterByRecordingState: ParticipantRecordingFilterByRecordingState? + /// Filters the response list to only show participants in the specified state. Only one of filterByUserId, filterByPublished, filterByState, or filterByRecordingState can be provided per request. public let filterByState: ParticipantState? - /// Filters the response list to match the specified user ID. Only one of filterByUserId, filterByPublished, or filterByState can be provided per request. A userId is a customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. + /// Filters the response list to match the specified user ID. Only one of filterByUserId, filterByPublished, filterByState, or filterByRecordingState can be provided per request. + /// A userId is a customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. public let filterByUserId: String? /// Maximum number of results to return. Default: 50. public let maxResults: Int? @@ -1145,8 +1300,9 @@ extension IVSRealTime { /// Stage ARN. public let stageArn: String - public init(filterByPublished: Bool? = nil, filterByState: ParticipantState? = nil, filterByUserId: String? = nil, maxResults: Int? = nil, nextToken: String? = nil, sessionId: String, stageArn: String) { + public init(filterByPublished: Bool? = nil, filterByRecordingState: ParticipantRecordingFilterByRecordingState? = nil, filterByState: ParticipantState? = nil, filterByUserId: String? = nil, maxResults: Int? = nil, nextToken: String? = nil, sessionId: String, stageArn: String) { self.filterByPublished = filterByPublished + self.filterByRecordingState = filterByRecordingState self.filterByState = filterByState self.filterByUserId = filterByUserId self.maxResults = maxResults @@ -1171,6 +1327,7 @@ extension IVSRealTime { private enum CodingKeys: String, CodingKey { case filterByPublished = "filterByPublished" + case filterByRecordingState = "filterByRecordingState" case filterByState = "filterByState" case filterByUserId = "filterByUserId" case maxResults = "maxResults" @@ -1197,6 +1354,47 @@ extension IVSRealTime { } } + public struct ListPublicKeysRequest: AWSEncodableShape { + /// Maximum number of results to return. Default: 50. + public let maxResults: Int? + /// The first public key to retrieve. This is used for pagination; see the nextToken response field. + public let nextToken: String? + + public init(maxResults: Int? = nil, nextToken: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[a-zA-Z0-9+/=_-]*$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "maxResults" + case nextToken = "nextToken" + } + } + + public struct ListPublicKeysResponse: AWSDecodableShape { + /// If there are more public keys than maxResults, use nextToken in the request to get the next set. + public let nextToken: String? + /// List of the matching public keys (summary information only). + public let publicKeys: [PublicKeySummary] + + public init(nextToken: String? = nil, publicKeys: [PublicKeySummary]) { + self.nextToken = nextToken + self.publicKeys = publicKeys + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case publicKeys = "publicKeys" + } + } + public struct ListStageSessionsRequest: AWSEncodableShape { /// Maximum number of results to return. Default: 50. public let maxResults: Int? @@ -1385,6 +1583,12 @@ extension IVSRealTime { public let participantId: String? /// Whether the participant ever published to the stage session. public let published: Bool? + /// Name of the S3 bucket to where the participant is being recorded, if individual participant recording is enabled, or "" (empty string), if recording is not enabled. + public let recordingS3BucketName: String? + /// S3 prefix of the S3 bucket where the participant is being recorded, if individual participant recording is enabled, or "" (empty string), if recording is not enabled. + public let recordingS3Prefix: String? + /// The participant’s recording state. + public let recordingState: ParticipantRecordingState? /// The participant’s SDK version. public let sdkVersion: String? /// Whether the participant is connected to or disconnected from the stage. @@ -1392,7 +1596,7 @@ extension IVSRealTime { /// Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information. public let userId: String? - public init(attributes: [String: String]? = nil, browserName: String? = nil, browserVersion: String? = nil, firstJoinTime: Date? = nil, ispName: String? = nil, osName: String? = nil, osVersion: String? = nil, participantId: String? = nil, published: Bool? = nil, sdkVersion: String? = nil, state: ParticipantState? = nil, userId: String? = nil) { + public init(attributes: [String: String]? = nil, browserName: String? = nil, browserVersion: String? = nil, firstJoinTime: Date? = nil, ispName: String? = nil, osName: String? = nil, osVersion: String? = nil, participantId: String? = nil, published: Bool? = nil, recordingS3BucketName: String? = nil, recordingS3Prefix: String? = nil, recordingState: ParticipantRecordingState? = nil, sdkVersion: String? = nil, state: ParticipantState? = nil, userId: String? = nil) { self.attributes = attributes self.browserName = browserName self.browserVersion = browserVersion @@ -1402,6 +1606,9 @@ extension IVSRealTime { self.osVersion = osVersion self.participantId = participantId self.published = published + self.recordingS3BucketName = recordingS3BucketName + self.recordingS3Prefix = recordingS3Prefix + self.recordingState = recordingState self.sdkVersion = sdkVersion self.state = state self.userId = userId @@ -1417,6 +1624,9 @@ extension IVSRealTime { case osVersion = "osVersion" case participantId = "participantId" case published = "published" + case recordingS3BucketName = "recordingS3BucketName" + case recordingS3Prefix = "recordingS3Prefix" + case recordingState = "recordingState" case sdkVersion = "sdkVersion" case state = "state" case userId = "userId" @@ -1431,15 +1641,18 @@ extension IVSRealTime { public let participantId: String? /// Whether the participant ever published to the stage session. public let published: Bool? + /// The participant’s recording state. + public let recordingState: ParticipantRecordingState? /// Whether the participant is connected to or disconnected from the stage. public let state: ParticipantState? /// Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information. public let userId: String? - public init(firstJoinTime: Date? = nil, participantId: String? = nil, published: Bool? = nil, state: ParticipantState? = nil, userId: String? = nil) { + public init(firstJoinTime: Date? = nil, participantId: String? = nil, published: Bool? = nil, recordingState: ParticipantRecordingState? = nil, state: ParticipantState? = nil, userId: String? = nil) { self.firstJoinTime = firstJoinTime self.participantId = participantId self.published = published + self.recordingState = recordingState self.state = state self.userId = userId } @@ -1448,6 +1661,7 @@ extension IVSRealTime { case firstJoinTime = "firstJoinTime" case participantId = "participantId" case published = "published" + case recordingState = "recordingState" case state = "state" case userId = "userId" } @@ -1524,13 +1738,13 @@ extension IVSRealTime { } public struct PipConfiguration: AWSEncodableShape & AWSDecodableShape { - /// This attribute name identifies the featured slot. A participant with this attribute set to "true" (as a string value) in ParticipantTokenConfiguration is placed in the featured slot. + /// This attribute name identifies the featured slot. A participant with this attribute set to "true" (as a string value) in ParticipantTokenConfiguration is placed in the featured slot. Default: "" (no featured participant). public let featuredParticipantAttribute: String? /// Specifies the spacing between participant tiles in pixels. Default: 0. public let gridGap: Int? /// Determines whether to omit participants with stopped video in the composition. Default: false. public let omitStoppedVideo: Bool? - /// Defines PiP behavior when all participants have left. Default: STATIC. + /// Defines PiP behavior when all participants have left: STATIC (maintains original position/size) or DYNAMIC (expands to full composition). Default: STATIC. public let pipBehavior: PipBehavior? /// Specifies the height of the PiP window in pixels. When this is not set explicitly, /// pipHeight’s value will be based on the size of the composition and the @@ -1539,8 +1753,8 @@ extension IVSRealTime { /// Sets the PiP window’s offset position in pixels from the closest edges determined by PipPosition. /// Default: 0. public let pipOffset: Int? - /// Identifies the PiP slot. A participant with this attribute set to "true" (as a string value) in ParticipantTokenConfiguration - /// is placed in the PiP slot. + /// Specifies the participant for the PiP window. A participant with this attribute set to "true" (as a string value) in ParticipantTokenConfiguration + /// is placed in the PiP slot. Default: "" (no PiP participant). public let pipParticipantAttribute: String? /// Determines the corner position of the PiP window. Default: BOTTOM_RIGHT. public let pipPosition: PipPosition? @@ -1548,7 +1762,8 @@ extension IVSRealTime { /// pipWidth’s value will be based on the size of the composition and the /// aspect ratio of the participant’s video. public let pipWidth: Int? - /// Defines how video fits within the participant tile. Default: COVER. + /// Defines how video content fits within the participant tile: FILL (stretched), + /// COVER (cropped), or CONTAIN (letterboxed). Default: COVER. public let videoFillMode: VideoFillMode? public init(featuredParticipantAttribute: String? = nil, gridGap: Int? = nil, omitStoppedVideo: Bool? = nil, pipBehavior: PipBehavior? = nil, pipHeight: Int? = nil, pipOffset: Int? = nil, pipParticipantAttribute: String? = nil, pipPosition: PipPosition? = nil, pipWidth: Int? = nil, videoFillMode: VideoFillMode? = nil) { @@ -1589,6 +1804,56 @@ extension IVSRealTime { } } + public struct PublicKey: AWSDecodableShape { + /// Public key ARN. + public let arn: String? + /// The public key fingerprint, a short string used to identify or verify the full public key. + public let fingerprint: String? + /// Public key name. + public let name: String? + /// Public key material. + public let publicKeyMaterial: String? + /// Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there. + public let tags: [String: String]? + + public init(arn: String? = nil, fingerprint: String? = nil, name: String? = nil, publicKeyMaterial: String? = nil, tags: [String: String]? = nil) { + self.arn = arn + self.fingerprint = fingerprint + self.name = name + self.publicKeyMaterial = publicKeyMaterial + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case fingerprint = "fingerprint" + case name = "name" + case publicKeyMaterial = "publicKeyMaterial" + case tags = "tags" + } + } + + public struct PublicKeySummary: AWSDecodableShape { + /// Public key ARN. + public let arn: String? + /// Public key name. + public let name: String? + /// Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there. + public let tags: [String: String]? + + public init(arn: String? = nil, name: String? = nil, tags: [String: String]? = nil) { + self.arn = arn + self.name = name + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case name = "name" + case tags = "tags" + } + } + public struct RecordingConfiguration: AWSEncodableShape & AWSDecodableShape { /// The recording format for storing a recording in Amazon S3. public let format: RecordingConfigurationFormat? @@ -1677,14 +1942,20 @@ extension IVSRealTime { public let activeSessionId: String? /// Stage ARN. public let arn: String + /// Configuration object for individual participant recording, attached to the stage. + public let autoParticipantRecordingConfiguration: AutoParticipantRecordingConfiguration? + /// Summary information about various endpoints for a stage. + public let endpoints: StageEndpoints? /// Stage name. public let name: String? /// Tags attached to the resource. Array of maps, each of the form string:string (key:value). See Tagging AWS Resources for details, including restrictions that apply to tags and "Tag naming limits and requirements"; Amazon IVS has no constraints on tags beyond what is documented there. public let tags: [String: String]? - public init(activeSessionId: String? = nil, arn: String, name: String? = nil, tags: [String: String]? = nil) { + public init(activeSessionId: String? = nil, arn: String, autoParticipantRecordingConfiguration: AutoParticipantRecordingConfiguration? = nil, endpoints: StageEndpoints? = nil, name: String? = nil, tags: [String: String]? = nil) { self.activeSessionId = activeSessionId self.arn = arn + self.autoParticipantRecordingConfiguration = autoParticipantRecordingConfiguration + self.endpoints = endpoints self.name = name self.tags = tags } @@ -1692,11 +1963,30 @@ extension IVSRealTime { private enum CodingKeys: String, CodingKey { case activeSessionId = "activeSessionId" case arn = "arn" + case autoParticipantRecordingConfiguration = "autoParticipantRecordingConfiguration" + case endpoints = "endpoints" case name = "name" case tags = "tags" } } + public struct StageEndpoints: AWSDecodableShape { + /// Events endpoint. + public let events: String? + /// WHIP endpoint. + public let whip: String? + + public init(events: String? = nil, whip: String? = nil) { + self.events = events + self.whip = whip + } + + private enum CodingKeys: String, CodingKey { + case events = "events" + case whip = "whip" + } + } + public struct StageSession: AWSDecodableShape { /// ISO 8601 timestamp (returned as a string) when the stage session ended. This is null if the stage is active. @OptionalCustomCoding @@ -1982,11 +2272,14 @@ extension IVSRealTime { public struct UpdateStageRequest: AWSEncodableShape { /// ARN of the stage to be updated. public let arn: String + /// Configuration object for individual participant recording, to attach to the stage. Note that this cannot be updated while recording is active. + public let autoParticipantRecordingConfiguration: AutoParticipantRecordingConfiguration? /// Name of the stage to be updated. public let name: String? - public init(arn: String, name: String? = nil) { + public init(arn: String, autoParticipantRecordingConfiguration: AutoParticipantRecordingConfiguration? = nil, name: String? = nil) { self.arn = arn + self.autoParticipantRecordingConfiguration = autoParticipantRecordingConfiguration self.name = name } @@ -1994,12 +2287,14 @@ extension IVSRealTime { try self.validate(self.arn, name: "arn", parent: name, max: 128) try self.validate(self.arn, name: "arn", parent: name, min: 1) try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws:ivs:[a-z0-9-]+:[0-9]+:stage/[a-zA-Z0-9-]+$") + try self.autoParticipantRecordingConfiguration?.validate(name: "\(name).autoParticipantRecordingConfiguration") try self.validate(self.name, name: "name", parent: name, max: 128) try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9-_]*$") } private enum CodingKeys: String, CodingKey { case arn = "arn" + case autoParticipantRecordingConfiguration = "autoParticipantRecordingConfiguration" case name = "name" } } diff --git a/Sources/Soto/Services/Imagebuilder/Imagebuilder_shapes.swift b/Sources/Soto/Services/Imagebuilder/Imagebuilder_shapes.swift index c8439b15f0..54aa646c92 100644 --- a/Sources/Soto/Services/Imagebuilder/Imagebuilder_shapes.swift +++ b/Sources/Soto/Services/Imagebuilder/Imagebuilder_shapes.swift @@ -420,7 +420,7 @@ extension Imagebuilder { public func validate(name: String) throws { try self.validate(self.clientToken, name: "clientToken", parent: name, max: 36) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) - try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") } private enum CodingKeys: String, CodingKey { @@ -588,7 +588,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.componentArn, name: "componentArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$") + try self.validate(self.componentArn, name: "componentArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$") try self.parameters?.forEach { try $0.validate(name: "\(name).parameters[]") } @@ -2073,7 +2073,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.componentBuildVersionArn, name: "componentBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.componentBuildVersionArn, name: "componentBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") } private enum CodingKeys: CodingKey {} @@ -2264,7 +2264,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") } private enum CodingKeys: CodingKey {} @@ -2377,7 +2377,7 @@ extension Imagebuilder { public func validate(name: String) throws { try self.validate(self.workflowBuildVersionArn, name: "workflowBuildVersionArn", parent: name, max: 1024) - try self.validate(self.workflowBuildVersionArn, name: "workflowBuildVersionArn", parent: name, pattern: "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.workflowBuildVersionArn, name: "workflowBuildVersionArn", parent: name, pattern: "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") } private enum CodingKeys: CodingKey {} @@ -2757,7 +2757,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.componentArn, name: "componentArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.componentArn, name: "componentArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") } private enum CodingKeys: CodingKey {} @@ -2796,7 +2796,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.componentBuildVersionArn, name: "componentBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$") + try self.validate(self.componentBuildVersionArn, name: "componentBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$") } private enum CodingKeys: CodingKey {} @@ -2988,7 +2988,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.imageArn, name: "imageArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.imageArn, name: "imageArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") } private enum CodingKeys: CodingKey {} @@ -3103,7 +3103,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$") + try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$") } private enum CodingKeys: CodingKey {} @@ -3345,7 +3345,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.workflowBuildVersionArn, name: "workflowBuildVersionArn", parent: name, pattern: "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$") + try self.validate(self.workflowBuildVersionArn, name: "workflowBuildVersionArn", parent: name, pattern: "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$") } private enum CodingKeys: CodingKey {} @@ -5269,7 +5269,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.componentVersionArn, name: "componentVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$") + try self.validate(self.componentVersionArn, name: "componentVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 25) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 65535) @@ -5520,7 +5520,7 @@ extension Imagebuilder { } try self.validate(self.filters, name: "filters", parent: name, max: 10) try self.validate(self.filters, name: "filters", parent: name, min: 1) - try self.validate(self.imageVersionArn, name: "imageVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$") + try self.validate(self.imageVersionArn, name: "imageVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 25) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 65535) @@ -5574,7 +5574,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 25) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 65535) @@ -6125,7 +6125,7 @@ extension Imagebuilder { try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 65535) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$") } private enum CodingKeys: String, CodingKey { @@ -6224,7 +6224,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$") } private enum CodingKeys: CodingKey {} @@ -6308,7 +6308,7 @@ extension Imagebuilder { try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 65535) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.workflowVersionArn, name: "workflowVersionArn", parent: name, pattern: "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:[0-9]+|x)\\.(?:[0-9]+|x)\\.(?:[0-9]+|x)$") + try self.validate(self.workflowVersionArn, name: "workflowVersionArn", parent: name, pattern: "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:[0-9]+|x)\\.(?:[0-9]+|x)\\.(?:[0-9]+|x)$") } private enum CodingKeys: String, CodingKey { @@ -6355,7 +6355,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 25) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 65535) @@ -6638,7 +6638,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.componentArn, name: "componentArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.componentArn, name: "componentArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") try self.validate(self.policy, name: "policy", parent: name, max: 30000) try self.validate(self.policy, name: "policy", parent: name, min: 1) } @@ -6722,7 +6722,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.imageArn, name: "imageArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.imageArn, name: "imageArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") try self.validate(self.policy, name: "policy", parent: name, max: 30000) try self.validate(self.policy, name: "policy", parent: name, min: 1) } @@ -7000,7 +7000,7 @@ extension Imagebuilder { public func validate(name: String) throws { try self.validate(self.clientToken, name: "clientToken", parent: name, max: 36) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) - try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.imageBuildVersionArn, name: "imageBuildVersionArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") try self.validate(self.reason, name: "reason", parent: name, max: 1024) try self.validate(self.reason, name: "reason", parent: name, min: 1) try self.validate(self.stepExecutionId, name: "stepExecutionId", parent: name, pattern: "^step-[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") @@ -7142,7 +7142,7 @@ extension Imagebuilder { try self.validate(self.executionRole, name: "executionRole", parent: name, max: 2048) try self.validate(self.executionRole, name: "executionRole", parent: name, min: 1) try self.validate(self.executionRole, name: "executionRole", parent: name, pattern: "^(?:arn:aws(?:-[a-z]+)*:iam::[0-9]{12}:role/)?[a-zA-Z_0-9+=,.@\\-_/]+$") - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$") } private enum CodingKeys: String, CodingKey { @@ -7209,7 +7209,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$") try self.tags.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -7271,7 +7271,7 @@ extension Imagebuilder { } public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$") try self.tagKeys.forEach { try validate($0, name: "tagKeys[]", parent: name, max: 128) try validate($0, name: "tagKeys[]", parent: name, min: 1) @@ -7826,7 +7826,7 @@ extension Imagebuilder { try $0.validate(name: "\(name).parameters[]") } try self.validate(self.parameters, name: "parameters", parent: name, min: 1) - try self.validate(self.workflowArn, name: "workflowArn", parent: name, pattern: "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$") + try self.validate(self.workflowArn, name: "workflowArn", parent: name, pattern: "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$") } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift index 402d4115f3..62324fed1d 100644 --- a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift +++ b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift @@ -1134,6 +1134,8 @@ extension IoTFleetWise.ListVehiclesInFleetRequest: AWSPaginateToken { extension IoTFleetWise.ListVehiclesRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> IoTFleetWise.ListVehiclesRequest { return .init( + attributeNames: self.attributeNames, + attributeValues: self.attributeValues, maxResults: self.maxResults, modelManifestArn: self.modelManifestArn, nextToken: token diff --git a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift index ff5cfb3783..137ed2ec56 100644 --- a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift +++ b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift @@ -1020,7 +1020,7 @@ extension IoTFleetWise { public let postTriggerCollectionDuration: Int64? /// (Optional) A number indicating the priority of one campaign over another campaign for a certain vehicle or fleet. A campaign with the lowest value is deployed to vehicles before any other campaigns. If it's not specified, 0 is used. Default: 0 public let priority: Int? - /// (Optional) The Amazon Resource Name (ARN) of the signal catalog to associate with the campaign. + /// The Amazon Resource Name (ARN) of the signal catalog to associate with the campaign. public let signalCatalogArn: String /// (Optional) A list of information about signals to collect. public let signalsToCollect: [SignalInformation]? @@ -3318,6 +3318,10 @@ extension IoTFleetWise { } public struct ListVehiclesRequest: AWSEncodableShape { + /// The fully qualified names of the attributes. For example, the fully qualified name of an attribute might be Vehicle.Body.Engine.Type. + public let attributeNames: [String]? + /// Static information about a vehicle attribute value in string format. For example: "1.3 L R2" + public let attributeValues: [String]? /// The maximum number of items to return, between 1 and 100, inclusive. public let maxResults: Int? /// The Amazon Resource Name (ARN) of a vehicle model (model manifest). You can use this optional parameter to list only the vehicles created from a certain vehicle model. @@ -3325,7 +3329,9 @@ extension IoTFleetWise { /// A pagination token for the next set of results. If the results of a search are large, only a portion of the results are returned, and a nextToken pagination token is returned in the response. To retrieve the next set of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value. public let nextToken: String? - public init(maxResults: Int? = nil, modelManifestArn: String? = nil, nextToken: String? = nil) { + public init(attributeNames: [String]? = nil, attributeValues: [String]? = nil, maxResults: Int? = nil, modelManifestArn: String? = nil, nextToken: String? = nil) { + self.attributeNames = attributeNames + self.attributeValues = attributeValues self.maxResults = maxResults self.modelManifestArn = modelManifestArn self.nextToken = nextToken @@ -3334,12 +3340,23 @@ extension IoTFleetWise { public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.attributeNames, key: "attributeNames") + request.encodeQuery(self.attributeValues, key: "attributeValues") request.encodeQuery(self.maxResults, key: "maxResults") request.encodeQuery(self.modelManifestArn, key: "modelManifestArn") request.encodeQuery(self.nextToken, key: "nextToken") } public func validate(name: String) throws { + try self.attributeNames?.forEach { + try validate($0, name: "attributeNames[]", parent: name, max: 150) + try validate($0, name: "attributeNames[]", parent: name, min: 1) + try validate($0, name: "attributeNames[]", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + } + try self.validate(self.attributeNames, name: "attributeNames", parent: name, max: 5) + try self.validate(self.attributeNames, name: "attributeNames", parent: name, min: 1) + try self.validate(self.attributeValues, name: "attributeValues", parent: name, max: 5) + try self.validate(self.attributeValues, name: "attributeValues", parent: name, min: 1) try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 4096) diff --git a/Sources/Soto/Services/IoTTwinMaker/IoTTwinMaker_shapes.swift b/Sources/Soto/Services/IoTTwinMaker/IoTTwinMaker_shapes.swift index 2a7376768e..f3cc1e5f87 100644 --- a/Sources/Soto/Services/IoTTwinMaker/IoTTwinMaker_shapes.swift +++ b/Sources/Soto/Services/IoTTwinMaker/IoTTwinMaker_shapes.swift @@ -123,6 +123,7 @@ extension IoTTwinMaker { public enum PropertyUpdateType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case create = "CREATE" case delete = "DELETE" + case resetValue = "RESET_VALUE" case update = "UPDATE" public var description: String { return self.rawValue } } diff --git a/Sources/Soto/Services/IoTWireless/IoTWireless_shapes.swift b/Sources/Soto/Services/IoTWireless/IoTWireless_shapes.swift index 626e25ea96..bd005cc432 100644 --- a/Sources/Soto/Services/IoTWireless/IoTWireless_shapes.swift +++ b/Sources/Soto/Services/IoTWireless/IoTWireless_shapes.swift @@ -118,6 +118,7 @@ extension IoTWireless { } public enum FuotaDeviceStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case deviceExistInConflictFuotaTask = "Device_exist_in_conflict_fuota_task" case fragAlgoUnsupported = "FragAlgo_unsupported" case fragIndexUnsupported = "FragIndex_unsupported" case initial = "Initial" @@ -193,6 +194,8 @@ extension IoTWireless { case deviceJoinRequestCount = "DeviceJoinRequestCount" case deviceRSSI = "DeviceRSSI" case deviceRoamingDownlinkCount = "DeviceRoamingDownlinkCount" + case deviceRoamingRSSI = "DeviceRoamingRSSI" + case deviceRoamingSNR = "DeviceRoamingSNR" case deviceRoamingUplinkCount = "DeviceRoamingUplinkCount" case deviceSNR = "DeviceSNR" case deviceUplinkCount = "DeviceUplinkCount" diff --git a/Sources/Soto/Services/KMS/KMS_api.swift b/Sources/Soto/Services/KMS/KMS_api.swift index ac8fc3e8ed..0e93aa233b 100644 --- a/Sources/Soto/Services/KMS/KMS_api.swift +++ b/Sources/Soto/Services/KMS/KMS_api.swift @@ -187,7 +187,7 @@ public struct KMS: AWSService { ) } - /// Creates a unique customer managed KMS key in your Amazon Web Services account and Region. You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services services let you use KMS keys that you create and manage to protect your service resources. A KMS key is a logical representation of a cryptographic key. In addition to the key material used in cryptographic operations, a KMS key includes metadata, such as the key ID, key policy, creation date, description, and key state. For details, see Managing keys in the Key Management Service Developer Guide Use the parameters of CreateKey to specify the type of KMS key, the source of its key material, its key policy, description, tags, and other properties. KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term. To create different types of KMS keys, use the following guidance: Symmetric encryption KMS key By default, CreateKey creates a symmetric encryption KMS key with key material that KMS generates. This is the basic and most widely used type of KMS key, and provides the best performance. To create a symmetric encryption KMS key, you don't need to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric encryption KMS key with KMS key material. If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair. Asymmetric KMS keys To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created. Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. KMS keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and verify messages (but not both). KMS keys with ECC key pairs can be used only to sign and verify messages. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide. HMAC KMS key To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created. HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes. Multi-Region primary keys Imported key material To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation. You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store. This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide. To import your own key material into a KMS key, begin by creating a KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token. Use the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide . You can import key material into KMS keys of all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't import key material into a KMS key in a custom key store. To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide. Custom key store A custom key store lets you protect your Amazon Web Services resources using keys in a backing key store that you own and manage. When you request a cryptographic operation with a KMS key in a custom key store, the operation is performed in the backing key store using its cryptographic keys. KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store, KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you specify an existing encryption key in the external key manager. Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation. Before you create a KMS key in a custom key store, the ConnectionState of the key store must be CONNECTED. To connect the custom key store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores operation. To create a KMS key in a custom key store, use the CustomKeyStoreId. Use the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is supported in a custom key store. To create a KMS key in an CloudHSM key store, use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region. To create a KMS key in an external key store, use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing external key. Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation. Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account. Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide. Related operations: DescribeKey ListKeys ScheduleKeyDeletion Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency. + /// Creates a unique customer managed KMS key in your Amazon Web Services account and Region. You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services services let you use KMS keys that you create and manage to protect your service resources. A KMS key is a logical representation of a cryptographic key. In addition to the key material used in cryptographic operations, a KMS key includes metadata, such as the key ID, key policy, creation date, description, and key state. For details, see Managing keys in the Key Management Service Developer Guide Use the parameters of CreateKey to specify the type of KMS key, the source of its key material, its key policy, description, tags, and other properties. KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term. To create different types of KMS keys, use the following guidance: Symmetric encryption KMS key By default, CreateKey creates a symmetric encryption KMS key with key material that KMS generates. This is the basic and most widely used type of KMS key, and provides the best performance. To create a symmetric encryption KMS key, you don't need to specify any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage, ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric encryption KMS key with KMS key material. If you need a key for basic encryption and decryption or you are creating a KMS key to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key. The key material in a symmetric encryption key never leaves KMS unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but they are typically used to generate data keys and data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair. Asymmetric KMS keys To create an asymmetric KMS key, use the KeySpec parameter to specify the type of key material in the KMS key. Then, use the KeyUsage parameter to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. You can't change these properties after the KMS key is created. Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey operation to download the public key so it can be used outside of KMS. Each KMS key can have only one key usage. KMS keys with RSA key pairs can be used to encrypt and decrypt data or sign and verify messages (but not both). KMS keys with NIST-recommended ECC key pairs can be used to sign and verify messages or derive shared secrets (but not both). KMS keys with ECC_SECG_P256K1 can be used only to sign and verify messages. KMS keys with SM2 key pairs (China Regions only) can be used to either encrypt and decrypt data, sign and verify messages, or derive shared secrets (you must choose one key usage type). For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide. HMAC KMS key To create an HMAC KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys. You can't change these properties after the KMS key is created. HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes. Multi-Region primary keys Imported key material To create a multi-Region primary key in the local Amazon Web Services Region, use the MultiRegion parameter with a value of True. To create a multi-Region replica key, that is, a KMS key with the same key ID and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its primary key to a replica key, use the UpdatePrimaryRegion operation. You can create multi-Region KMS keys for all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't create multi-Region keys in a custom key store. This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide. To import your own key material into a KMS key, begin by creating a KMS key with no key material. To do this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token. Use the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For step-by-step instructions, see Importing Key Material in the Key Management Service Developer Guide . You can import key material into KMS keys of all supported KMS key types: symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric signing KMS keys. You can also create multi-Region keys with imported key material. However, you can't import key material into a KMS key in a custom key store. To create a multi-Region primary key with imported key material, use the Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide. Custom key store A custom key store lets you protect your Amazon Web Services resources using keys in a backing key store that you own and manage. When you request a cryptographic operation with a KMS key in a custom key store, the operation is performed in the backing key store using its cryptographic keys. KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store, KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS key. When you create a KMS key in an external key store, you specify an existing encryption key in the external key manager. Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation. Before you create a KMS key in a custom key store, the ConnectionState of the key store must be CONNECTED. To connect the custom key store, use the ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores operation. To create a KMS key in a custom key store, use the CustomKeyStoreId. Use the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is supported in a custom key store. To create a KMS key in an CloudHSM key store, use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store must have at least two active HSMs in different Availability Zones in the Amazon Web Services Region. To create a KMS key in an external key store, use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId parameter that identifies an existing external key. Some external key managers provide a simpler method for creating a KMS key in an external key store. For details, see your external key manager documentation. Cross-account use: No. You cannot use this operation to create a KMS key in a different Amazon Web Services account. Required permissions: kms:CreateKey (IAM policy). To use the Tags parameter, kms:TagResource (IAM policy). For examples and information about related permissions, see Allow a user to create KMS keys in the Key Management Service Developer Guide. Related operations: DescribeKey ListKeys ScheduleKeyDeletion Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency. @Sendable public func createKey(_ input: CreateKeyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateKeyResponse { return try await self.client.execute( @@ -257,6 +257,20 @@ public struct KMS: AWSService { ) } + /// Derives a shared secret using a key agreement algorithm. You must use an asymmetric NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) KMS key pair with a KeyUsage value of KEY_AGREEMENT to call DeriveSharedSecret. DeriveSharedSecret uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive (ECDH) to establish a key agreement between two peers by deriving a shared secret from their elliptic curve public-private key pairs. You can use the raw shared secret that DeriveSharedSecret returns to derive a symmetric key that can encrypt and decrypt data that is sent between the two peers, or that can generate and verify HMACs. KMS recommends that you follow NIST recommendations for key derivation when using the raw shared secret to derive a symmetric key. The following workflow demonstrates how to establish key agreement over an insecure communication channel using DeriveSharedSecret. Alice calls CreateKey to create an asymmetric KMS key pair with a KeyUsage value of KEY_AGREEMENT. The asymmetric KMS key must use a NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) key spec. Bob creates an elliptic curve key pair. Bob can call CreateKey to create an asymmetric KMS key pair or generate a key pair outside of KMS. Bob's key pair must use the same NIST-recommended elliptic curve (ECC) or SM2 (China Regions ony) curve as Alice. Alice and Bob exchange their public keys through an insecure communication channel (like the internet). Use GetPublicKey to download the public key of your asymmetric KMS key pair. KMS strongly recommends verifying that the public key you receive came from the expected party before using it to derive a shared secret. Alice calls DeriveSharedSecret. KMS uses the private key from the KMS key pair generated in Step 1, Bob's public key, and the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to derive the shared secret. The private key in your KMS key pair never leaves KMS unencrypted. DeriveSharedSecret returns the raw shared secret. Bob uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to calculate the same raw secret using his private key and Alice's public key. To derive a shared secret you must provide a key agreement algorithm, the private key of the caller's asymmetric NIST-recommended elliptic curve or SM2 (China Regions only) KMS key pair, and the public key from your peer's NIST-recommended elliptic curve or SM2 (China Regions only) key pair. The public key can be from another asymmetric KMS key pair or from a key pair generated outside of KMS, but both key pairs must be on the same elliptic curve. The KMS key that you use for this operation must be in a compatible key state. For + /// details, see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: kms:DeriveSharedSecret (key policy) Related operations: CreateKey GetPublicKey DescribeKey Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency. + @Sendable + public func deriveSharedSecret(_ input: DeriveSharedSecretRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeriveSharedSecretResponse { + return try await self.client.execute( + operation: "DeriveSharedSecret", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets information about custom key stores in the account and Region. This operation is part of the custom key stores feature in KMS, which /// combines the convenience and extensive integration of KMS with the isolation and control of a /// key store that you own and manage. By default, this operation returns information about all custom key stores in the account and Region. To get only information about a particular custom key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter (but not both). To determine whether the custom key store is connected to its CloudHSM cluster or external key store proxy, use the ConnectionState element in the response. If an attempt to connect the custom key store failed, the ConnectionState value is FAILED and the ConnectionErrorCode element in the response indicates the cause of the failure. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry. Custom key stores have a DISCONNECTED connection state if the key store has never been connected or you used the DisconnectCustomKeyStore operation to disconnect it. Otherwise, the connection state is CONNECTED. If your custom key store connection state is CONNECTED but you are having trouble using it, verify that the backing store is active and available. For an CloudHSM key store, verify that the associated CloudHSM cluster is active and contains the minimum number of HSMs required for the operation, if any. For an external key store, verify that the external key store proxy and its associated external key manager are reachable and enabled. For help repairing your CloudHSM key store, see the Troubleshooting CloudHSM key stores. For help repairing your external key store, see the Troubleshooting external key stores. Both topics are in the Key Management Service Developer Guide. Cross-account use: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account. Required permissions: kms:DescribeCustomKeyStores (IAM policy) Related operations: ConnectCustomKeyStore CreateCustomKeyStore DeleteCustomKeyStore DisconnectCustomKeyStore UpdateCustomKeyStore Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency. @@ -495,7 +509,7 @@ public struct KMS: AWSService { ) } - /// Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide. You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS. To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including: KeySpec: The type of key material in the public key, such as RSA_4096 or ECC_NIST_P521. KeyUsage: Whether the key is used for encryption or signing. EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing algorithms for the key. Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation. To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs. The KMS key that you use for this operation must be in a compatible key state. For + /// Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey permission can download the public key of an asymmetric KMS key. You can share the public key to allow others to encrypt messages and verify signatures outside of KMS. For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide. You do not need to download the public key. Instead, you can use the public key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the public key within KMS, you benefit from the authentication, authorization, and logging that are part of every KMS operation. You also reduce of risk of encrypting data that cannot be decrypted. These features are not effective outside of KMS. To help you use the public key safely outside of KMS, GetPublicKey returns important information about the public key in the response, including: KeySpec: The type of key material in the public key, such as RSA_4096 or ECC_NIST_P521. KeyUsage: Whether the key is used for encryption, signing, or deriving a shared secret. EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing algorithms for the key. Although KMS cannot enforce these restrictions on external operations, it is crucial that you use this information to prevent the public key from being used improperly. For example, you can prevent a public signing key from being used encrypt data, or prevent a public key from being used with an encryption algorithm that is not supported by KMS. You can also avoid errors, such as using the wrong signing algorithm in a verification operation. To verify a signature outside of KMS with an SM2 public key (China Regions only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more information, see Offline verification with SM2 key pairs. The KMS key that you use for this operation must be in a compatible key state. For /// details, see Key states of KMS keys in the Key Management Service Developer Guide. Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN in the value of the KeyId parameter. Required permissions: kms:GetPublicKey (key policy) Related operations: CreateKey Eventual consistency: The KMS API follows an eventual consistency model. For more information, see KMS eventual consistency. @Sendable public func getPublicKey(_ input: GetPublicKeyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetPublicKeyResponse { diff --git a/Sources/Soto/Services/KMS/KMS_shapes.swift b/Sources/Soto/Services/KMS/KMS_shapes.swift index b5530f076b..1f53e5e536 100644 --- a/Sources/Soto/Services/KMS/KMS_shapes.swift +++ b/Sources/Soto/Services/KMS/KMS_shapes.swift @@ -32,6 +32,7 @@ extension KMS { case rsaesOaepSha1 = "RSAES_OAEP_SHA_1" case rsaesOaepSha256 = "RSAES_OAEP_SHA_256" case rsaesPkcs1V15 = "RSAES_PKCS1_V1_5" + case sm2pke = "SM2PKE" public var description: String { return self.rawValue } } @@ -124,6 +125,7 @@ extension KMS { public enum GrantOperation: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case createGrant = "CreateGrant" case decrypt = "Decrypt" + case deriveSharedSecret = "DeriveSharedSecret" case describeKey = "DescribeKey" case encrypt = "Encrypt" case generateDataKey = "GenerateDataKey" @@ -141,6 +143,11 @@ extension KMS { public var description: String { return self.rawValue } } + public enum KeyAgreementAlgorithmSpec: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case ecdh = "ECDH" + public var description: String { return self.rawValue } + } + public enum KeyEncryptionMechanism: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case rsaesOaepSha256 = "RSAES_OAEP_SHA_256" public var description: String { return self.rawValue } @@ -184,6 +191,7 @@ extension KMS { public enum KeyUsageType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case encryptDecrypt = "ENCRYPT_DECRYPT" case generateVerifyMac = "GENERATE_VERIFY_MAC" + case keyAgreement = "KEY_AGREEMENT" case signVerify = "SIGN_VERIFY" public var description: String { return self.rawValue } } @@ -240,6 +248,7 @@ extension KMS { case rsa2048 = "RSA_2048" case rsa3072 = "RSA_3072" case rsa4096 = "RSA_4096" + case sm2 = "SM2" public var description: String { return self.rawValue } } @@ -529,9 +538,9 @@ extension KMS { public let customKeyStoreId: String? /// A description of the KMS key. Use a description that helps you decide whether the KMS key is appropriate for a task. The default value is an empty string (no description). Do not include confidential or sensitive information in this field. This field may be displayed in plaintext in CloudTrail logs and other output. To set or change the description after the key is created, use UpdateKeyDescription. public let description: String? - /// Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide . The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide . Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys. KMS supports the following key specs for KMS keys: Symmetric encryption key (default) SYMMETRIC_DEFAULT HMAC keys (symmetric) HMAC_224 HMAC_256 HMAC_384 HMAC_512 Asymmetric RSA key pairs RSA_2048 RSA_3072 RSA_4096 Asymmetric NIST-recommended elliptic curve key pairs ECC_NIST_P256 (secp256r1) ECC_NIST_P384 (secp384r1) ECC_NIST_P521 (secp521r1) Other asymmetric elliptic curve key pairs ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies. SM2 key pairs (China Regions only) SM2 + /// Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the Key Management Service Developer Guide . The KeySpec determines whether the KMS key contains a symmetric key or an asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't change the KeySpec after the KMS key is created. To further restrict the algorithms that can be used with the KMS key, use a condition key in its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the Key Management Service Developer Guide . Amazon Web Services services that are integrated with KMS use symmetric encryption KMS keys to protect your data. These services do not support asymmetric KMS keys or HMAC KMS keys. KMS supports the following key specs for KMS keys: Symmetric encryption key (default) SYMMETRIC_DEFAULT HMAC keys (symmetric) HMAC_224 HMAC_256 HMAC_384 HMAC_512 Asymmetric RSA key pairs (encryption and decryption -or- signing and verification) RSA_2048 RSA_3072 RSA_4096 Asymmetric NIST-recommended elliptic curve key pairs (signing and verification -or- deriving shared secrets) ECC_NIST_P256 (secp256r1) ECC_NIST_P384 (secp384r1) ECC_NIST_P521 (secp521r1) Other asymmetric elliptic curve key pairs (signing and verification) ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies. SM2 key pairs (encryption and decryption -or- signing and verification -or- deriving shared secrets) SM2 (China Regions only) public let keySpec: KeySpec? - /// Determines the cryptographic operations for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric encryption KMS key; otherwise, it is required. You can't change the KeyUsage value after the KMS key is created. Select only one valid value. For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT. For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC. For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT or SIGN_VERIFY. For asymmetric KMS keys with ECC key material, specify SIGN_VERIFY. For asymmetric KMS keys with SM2 key material (China Regions only), specify ENCRYPT_DECRYPT or SIGN_VERIFY. + /// Determines the cryptographic operations for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric encryption KMS key; otherwise, it is required. You can't change the KeyUsage value after the KMS key is created. Select only one valid value. For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT. For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC. For asymmetric KMS keys with RSA key pairs, specify ENCRYPT_DECRYPT or SIGN_VERIFY. For asymmetric KMS keys with NIST-recommended elliptic curve key pairs, specify SIGN_VERIFY or KEY_AGREEMENT. For asymmetric KMS keys with ECC_SECG_P256K1 key pairs specify SIGN_VERIFY. For asymmetric KMS keys with SM2 key pairs (China Regions only), specify ENCRYPT_DECRYPT, SIGN_VERIFY, or KEY_AGREEMENT. public let keyUsage: KeyUsageType? /// Creates a multi-Region primary key that you can replicate into other Amazon Web Services Regions. You cannot change this value after you create the KMS key. For a multi-Region key, set this parameter to True. For a single-Region KMS key, omit this parameter or set it to False. The default value is False. This operation supports multi-Region keys, an KMS feature that lets you create multiple interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide. This value creates a primary key, not a replica. To create a replica key, use the ReplicateKey operation. You can create a symmetric or asymmetric multi-Region key, and you can create a multi-Region key with imported key material. However, you cannot create a multi-Region key in a custom key store. public let multiRegion: Bool? @@ -802,6 +811,81 @@ extension KMS { } } + public struct DeriveSharedSecretRequest: AWSEncodableShape { + /// Checks if your request will succeed. DryRun is an optional parameter. To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide. + public let dryRun: Bool? + /// A list of grant tokens. Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the Key Management Service Developer Guide. + public let grantTokens: [String]? + /// Specifies the key agreement algorithm used to derive the shared secret. The only valid value is ECDH. + public let keyAgreementAlgorithm: KeyAgreementAlgorithmSpec + /// Identifies an asymmetric NIST-recommended ECC or SM2 (China Regions only) KMS key. KMS uses the private key in the specified key pair to derive the shared secret. The key usage of the KMS key must be KEY_AGREEMENT. To find the KeyUsage of a KMS key, use the DescribeKey operation. To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with "alias/". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN. For example: Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Alias name: alias/ExampleAlias Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases. + public let keyId: String + /// Specifies the public key in your peer's NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) key pair. The public key must be a DER-encoded X.509 public key, also known as SubjectPublicKeyInfo (SPKI), as defined in RFC 5280. GetPublicKey returns the public key of an asymmetric KMS key pair in the required DER-encoded format. If you use Amazon Web Services CLI version 1, you must provide the DER-encoded X.509 public key in a file. Otherwise, the Amazon Web Services CLI Base64-encodes the public key a second time, resulting in a ValidationException. You can specify the public key as binary data in a file using fileb (fileb://) or in-line using a Base64 encoded string. + public let publicKey: AWSBase64Data + /// A signed attestation document from an Amazon Web Services Nitro enclave and the encryption algorithm to use with the enclave's public key. The only valid encryption algorithm is RSAES_OAEP_SHA_256. This parameter only supports attestation documents for Amazon Web Services Nitro Enclaves. To call DeriveSharedSecret for an Amazon Web Services Nitro Enclaves, use the Amazon Web Services Nitro Enclaves SDK to generate the attestation document and then use the Recipient parameter from any Amazon Web Services SDK to provide the attestation document for the enclave. When you use this parameter, instead of returning a plaintext copy of the shared secret, KMS encrypts the plaintext shared secret under the public key in the attestation document, and returns the resulting ciphertext in the CiphertextForRecipient field in the response. This ciphertext can be decrypted only with the private key in the enclave. The CiphertextBlob field in the response contains the encrypted shared secret derived from the KMS key specified by the KeyId parameter and public key specified by the PublicKey parameter. The SharedSecret field in the response is null or empty. For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide. + public let recipient: RecipientInfo? + + public init(dryRun: Bool? = nil, grantTokens: [String]? = nil, keyAgreementAlgorithm: KeyAgreementAlgorithmSpec, keyId: String, publicKey: AWSBase64Data, recipient: RecipientInfo? = nil) { + self.dryRun = dryRun + self.grantTokens = grantTokens + self.keyAgreementAlgorithm = keyAgreementAlgorithm + self.keyId = keyId + self.publicKey = publicKey + self.recipient = recipient + } + + public func validate(name: String) throws { + try self.grantTokens?.forEach { + try validate($0, name: "grantTokens[]", parent: name, max: 8192) + try validate($0, name: "grantTokens[]", parent: name, min: 1) + } + try self.validate(self.grantTokens, name: "grantTokens", parent: name, max: 10) + try self.validate(self.keyId, name: "keyId", parent: name, max: 2048) + try self.validate(self.keyId, name: "keyId", parent: name, min: 1) + try self.validate(self.publicKey, name: "publicKey", parent: name, max: 8192) + try self.validate(self.publicKey, name: "publicKey", parent: name, min: 1) + try self.recipient?.validate(name: "\(name).recipient") + } + + private enum CodingKeys: String, CodingKey { + case dryRun = "DryRun" + case grantTokens = "GrantTokens" + case keyAgreementAlgorithm = "KeyAgreementAlgorithm" + case keyId = "KeyId" + case publicKey = "PublicKey" + case recipient = "Recipient" + } + } + + public struct DeriveSharedSecretResponse: AWSDecodableShape { + /// The plaintext shared secret encrypted with the public key in the attestation document. This field is included in the response only when the Recipient parameter in the request includes a valid attestation document from an Amazon Web Services Nitro enclave. For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide. + public let ciphertextForRecipient: AWSBase64Data? + /// Identifies the key agreement algorithm used to derive the shared secret. + public let keyAgreementAlgorithm: KeyAgreementAlgorithmSpec? + /// Identifies the KMS key used to derive the shared secret. + public let keyId: String? + /// The source of the key material for the specified KMS key. When this value is AWS_KMS, KMS created the key material. When this value is EXTERNAL, the key material was imported or the KMS key doesn't have any key material. The only valid values for DeriveSharedSecret are AWS_KMS and EXTERNAL. DeriveSharedSecret does not support KMS keys with a KeyOrigin value of AWS_CLOUDHSM or EXTERNAL_KEY_STORE. + public let keyOrigin: OriginType? + /// The raw secret derived from the specified key agreement algorithm, private key in the asymmetric KMS key, and your peer's public key. If the response includes the CiphertextForRecipient field, the SharedSecret field is null or empty. + public let sharedSecret: AWSBase64Data? + + public init(ciphertextForRecipient: AWSBase64Data? = nil, keyAgreementAlgorithm: KeyAgreementAlgorithmSpec? = nil, keyId: String? = nil, keyOrigin: OriginType? = nil, sharedSecret: AWSBase64Data? = nil) { + self.ciphertextForRecipient = ciphertextForRecipient + self.keyAgreementAlgorithm = keyAgreementAlgorithm + self.keyId = keyId + self.keyOrigin = keyOrigin + self.sharedSecret = sharedSecret + } + + private enum CodingKeys: String, CodingKey { + case ciphertextForRecipient = "CiphertextForRecipient" + case keyAgreementAlgorithm = "KeyAgreementAlgorithm" + case keyId = "KeyId" + case keyOrigin = "KeyOrigin" + case sharedSecret = "SharedSecret" + } + } + public struct DescribeCustomKeyStoresRequest: AWSEncodableShape { /// Gets only information about the specified custom key store. Enter the key store ID. By default, this operation gets information about all custom key stores in the account and Region. To limit the output to a particular custom key store, provide either the CustomKeyStoreId or CustomKeyStoreName parameter, but not both. public let customKeyStoreId: String? @@ -1083,7 +1167,7 @@ extension KMS { public let keyId: String /// Determines the type of data key pair that is generated. The KMS rule that restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or to sign and verify (but not both), and the rule that permits you to use ECC KMS keys only to sign and verify, are not effective on data key pairs, which are used outside of KMS. The SM2 key spec is only available in China Regions. public let keyPairSpec: DataKeyPairSpec - /// A signed attestation document from an Amazon Web Services Nitro enclave and the encryption algorithm to use with the enclave's public key. The only valid encryption algorithm is RSAES_OAEP_SHA_256. This parameter only supports attestation documents for Amazon Web Services Nitro Enclaves. To include this parameter, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK. When you use this parameter, instead of returning a plaintext copy of the private data key, KMS encrypts the plaintext private data key under the public key in the attestation document, and returns the resulting ciphertext in the CiphertextForRecipient field in the response. This ciphertext can be decrypted only with the private key in the enclave. The CiphertextBlob field in the response contains a copy of the private data key encrypted under the KMS key specified by the KeyId parameter. The PrivateKeyPlaintext field in the response is null or empty. For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide. + /// A signed attestation document from an Amazon Web Services Nitro enclave and the encryption algorithm to use with the enclave's public key. The only valid encryption algorithm is RSAES_OAEP_SHA_256. This parameter only supports attestation documents for Amazon Web Services Nitro Enclaves. To call DeriveSharedSecret for an Amazon Web Services Nitro Enclaves, use the Amazon Web Services Nitro Enclaves SDK to generate the attestation document and then use the Recipient parameter from any Amazon Web Services SDK to provide the attestation document for the enclave. When you use this parameter, instead of returning a plaintext copy of the private data key, KMS encrypts the plaintext private data key under the public key in the attestation document, and returns the resulting ciphertext in the CiphertextForRecipient field in the response. This ciphertext can be decrypted only with the private key in the enclave. The CiphertextBlob field in the response contains a copy of the private data key encrypted under the KMS key specified by the KeyId parameter. The PrivateKeyPlaintext field in the response is null or empty. For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide. public let recipient: RecipientInfo? public init(dryRun: Bool? = nil, encryptionContext: [String: String]? = nil, grantTokens: [String]? = nil, keyId: String, keyPairSpec: DataKeyPairSpec, recipient: RecipientInfo? = nil) { @@ -1640,20 +1724,23 @@ extension KMS { public let customerMasterKeySpec: CustomerMasterKeySpec? /// The encryption algorithms that KMS supports for this key. This information is critical. If a public key encrypts data outside of KMS by using an unsupported encryption algorithm, the ciphertext cannot be decrypted. This field appears in the response only when the KeyUsage of the public key is ENCRYPT_DECRYPT. public let encryptionAlgorithms: [EncryptionAlgorithmSpec]? + /// The key agreement algorithm used to derive a shared secret. This field is present only when the KMS key has a KeyUsage value of KEY_AGREEMENT. + public let keyAgreementAlgorithms: [KeyAgreementAlgorithmSpec]? /// The Amazon Resource Name (key ARN) of the asymmetric KMS key from which the public key was downloaded. public let keyId: String? /// The type of the of the public key that was downloaded. public let keySpec: KeySpec? - /// The permitted use of the public key. Valid values are ENCRYPT_DECRYPT or SIGN_VERIFY. This information is critical. If a public key with SIGN_VERIFY key usage encrypts data outside of KMS, the ciphertext cannot be decrypted. + /// The permitted use of the public key. Valid values for asymmetric key pairs are ENCRYPT_DECRYPT, SIGN_VERIFY, and KEY_AGREEMENT. This information is critical. For example, if a public key with SIGN_VERIFY key usage encrypts data outside of KMS, the ciphertext cannot be decrypted. public let keyUsage: KeyUsageType? /// The exported public key. The value is a DER-encoded X.509 public key, also known as SubjectPublicKeyInfo (SPKI), as defined in RFC 5280. When you use the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. public let publicKey: AWSBase64Data? /// The signing algorithms that KMS supports for this key. This field appears in the response only when the KeyUsage of the public key is SIGN_VERIFY. public let signingAlgorithms: [SigningAlgorithmSpec]? - public init(encryptionAlgorithms: [EncryptionAlgorithmSpec]? = nil, keyId: String? = nil, keySpec: KeySpec? = nil, keyUsage: KeyUsageType? = nil, publicKey: AWSBase64Data? = nil, signingAlgorithms: [SigningAlgorithmSpec]? = nil) { + public init(encryptionAlgorithms: [EncryptionAlgorithmSpec]? = nil, keyAgreementAlgorithms: [KeyAgreementAlgorithmSpec]? = nil, keyId: String? = nil, keySpec: KeySpec? = nil, keyUsage: KeyUsageType? = nil, publicKey: AWSBase64Data? = nil, signingAlgorithms: [SigningAlgorithmSpec]? = nil) { self.customerMasterKeySpec = nil self.encryptionAlgorithms = encryptionAlgorithms + self.keyAgreementAlgorithms = keyAgreementAlgorithms self.keyId = keyId self.keySpec = keySpec self.keyUsage = keyUsage @@ -1662,9 +1749,10 @@ extension KMS { } @available(*, deprecated, message: "Members customerMasterKeySpec have been deprecated") - public init(customerMasterKeySpec: CustomerMasterKeySpec? = nil, encryptionAlgorithms: [EncryptionAlgorithmSpec]? = nil, keyId: String? = nil, keySpec: KeySpec? = nil, keyUsage: KeyUsageType? = nil, publicKey: AWSBase64Data? = nil, signingAlgorithms: [SigningAlgorithmSpec]? = nil) { + public init(customerMasterKeySpec: CustomerMasterKeySpec? = nil, encryptionAlgorithms: [EncryptionAlgorithmSpec]? = nil, keyAgreementAlgorithms: [KeyAgreementAlgorithmSpec]? = nil, keyId: String? = nil, keySpec: KeySpec? = nil, keyUsage: KeyUsageType? = nil, publicKey: AWSBase64Data? = nil, signingAlgorithms: [SigningAlgorithmSpec]? = nil) { self.customerMasterKeySpec = customerMasterKeySpec self.encryptionAlgorithms = encryptionAlgorithms + self.keyAgreementAlgorithms = keyAgreementAlgorithms self.keyId = keyId self.keySpec = keySpec self.keyUsage = keyUsage @@ -1675,6 +1763,7 @@ extension KMS { private enum CodingKeys: String, CodingKey { case customerMasterKeySpec = "CustomerMasterKeySpec" case encryptionAlgorithms = "EncryptionAlgorithms" + case keyAgreementAlgorithms = "KeyAgreementAlgorithms" case keyId = "KeyId" case keySpec = "KeySpec" case keyUsage = "KeyUsage" @@ -1827,6 +1916,8 @@ extension KMS { public let encryptionAlgorithms: [EncryptionAlgorithmSpec]? /// Specifies whether the KMS key's key material expires. This value is present only when Origin is EXTERNAL, otherwise this value is omitted. public let expirationModel: ExpirationModelType? + /// The key agreement algorithm used to derive a shared secret. + public let keyAgreementAlgorithms: [KeyAgreementAlgorithmSpec]? /// The globally unique identifier for the KMS key. public let keyId: String /// The manager of the KMS key. KMS keys in your Amazon Web Services account are either customer managed or Amazon Web Services managed. For more information about the difference, see KMS keys in the Key Management Service Developer Guide. @@ -1854,7 +1945,7 @@ extension KMS { /// Information about the external key that is associated with a KMS key in an external key store. For more information, see External key in the Key Management Service Developer Guide. public let xksKeyConfiguration: XksKeyConfigurationType? - public init(arn: String? = nil, awsAccountId: String? = nil, cloudHsmClusterId: String? = nil, creationDate: Date? = nil, customKeyStoreId: String? = nil, deletionDate: Date? = nil, description: String? = nil, enabled: Bool? = nil, encryptionAlgorithms: [EncryptionAlgorithmSpec]? = nil, expirationModel: ExpirationModelType? = nil, keyId: String, keyManager: KeyManagerType? = nil, keySpec: KeySpec? = nil, keyState: KeyState? = nil, keyUsage: KeyUsageType? = nil, macAlgorithms: [MacAlgorithmSpec]? = nil, multiRegion: Bool? = nil, multiRegionConfiguration: MultiRegionConfiguration? = nil, origin: OriginType? = nil, pendingDeletionWindowInDays: Int? = nil, signingAlgorithms: [SigningAlgorithmSpec]? = nil, validTo: Date? = nil, xksKeyConfiguration: XksKeyConfigurationType? = nil) { + public init(arn: String? = nil, awsAccountId: String? = nil, cloudHsmClusterId: String? = nil, creationDate: Date? = nil, customKeyStoreId: String? = nil, deletionDate: Date? = nil, description: String? = nil, enabled: Bool? = nil, encryptionAlgorithms: [EncryptionAlgorithmSpec]? = nil, expirationModel: ExpirationModelType? = nil, keyAgreementAlgorithms: [KeyAgreementAlgorithmSpec]? = nil, keyId: String, keyManager: KeyManagerType? = nil, keySpec: KeySpec? = nil, keyState: KeyState? = nil, keyUsage: KeyUsageType? = nil, macAlgorithms: [MacAlgorithmSpec]? = nil, multiRegion: Bool? = nil, multiRegionConfiguration: MultiRegionConfiguration? = nil, origin: OriginType? = nil, pendingDeletionWindowInDays: Int? = nil, signingAlgorithms: [SigningAlgorithmSpec]? = nil, validTo: Date? = nil, xksKeyConfiguration: XksKeyConfigurationType? = nil) { self.arn = arn self.awsAccountId = awsAccountId self.cloudHsmClusterId = cloudHsmClusterId @@ -1866,6 +1957,7 @@ extension KMS { self.enabled = enabled self.encryptionAlgorithms = encryptionAlgorithms self.expirationModel = expirationModel + self.keyAgreementAlgorithms = keyAgreementAlgorithms self.keyId = keyId self.keyManager = keyManager self.keySpec = keySpec @@ -1882,7 +1974,7 @@ extension KMS { } @available(*, deprecated, message: "Members customerMasterKeySpec have been deprecated") - public init(arn: String? = nil, awsAccountId: String? = nil, cloudHsmClusterId: String? = nil, creationDate: Date? = nil, customerMasterKeySpec: CustomerMasterKeySpec? = nil, customKeyStoreId: String? = nil, deletionDate: Date? = nil, description: String? = nil, enabled: Bool? = nil, encryptionAlgorithms: [EncryptionAlgorithmSpec]? = nil, expirationModel: ExpirationModelType? = nil, keyId: String, keyManager: KeyManagerType? = nil, keySpec: KeySpec? = nil, keyState: KeyState? = nil, keyUsage: KeyUsageType? = nil, macAlgorithms: [MacAlgorithmSpec]? = nil, multiRegion: Bool? = nil, multiRegionConfiguration: MultiRegionConfiguration? = nil, origin: OriginType? = nil, pendingDeletionWindowInDays: Int? = nil, signingAlgorithms: [SigningAlgorithmSpec]? = nil, validTo: Date? = nil, xksKeyConfiguration: XksKeyConfigurationType? = nil) { + public init(arn: String? = nil, awsAccountId: String? = nil, cloudHsmClusterId: String? = nil, creationDate: Date? = nil, customerMasterKeySpec: CustomerMasterKeySpec? = nil, customKeyStoreId: String? = nil, deletionDate: Date? = nil, description: String? = nil, enabled: Bool? = nil, encryptionAlgorithms: [EncryptionAlgorithmSpec]? = nil, expirationModel: ExpirationModelType? = nil, keyAgreementAlgorithms: [KeyAgreementAlgorithmSpec]? = nil, keyId: String, keyManager: KeyManagerType? = nil, keySpec: KeySpec? = nil, keyState: KeyState? = nil, keyUsage: KeyUsageType? = nil, macAlgorithms: [MacAlgorithmSpec]? = nil, multiRegion: Bool? = nil, multiRegionConfiguration: MultiRegionConfiguration? = nil, origin: OriginType? = nil, pendingDeletionWindowInDays: Int? = nil, signingAlgorithms: [SigningAlgorithmSpec]? = nil, validTo: Date? = nil, xksKeyConfiguration: XksKeyConfigurationType? = nil) { self.arn = arn self.awsAccountId = awsAccountId self.cloudHsmClusterId = cloudHsmClusterId @@ -1894,6 +1986,7 @@ extension KMS { self.enabled = enabled self.encryptionAlgorithms = encryptionAlgorithms self.expirationModel = expirationModel + self.keyAgreementAlgorithms = keyAgreementAlgorithms self.keyId = keyId self.keyManager = keyManager self.keySpec = keySpec @@ -1921,6 +2014,7 @@ extension KMS { case enabled = "Enabled" case encryptionAlgorithms = "EncryptionAlgorithms" case expirationModel = "ExpirationModel" + case keyAgreementAlgorithms = "KeyAgreementAlgorithms" case keyId = "KeyId" case keyManager = "KeyManager" case keySpec = "KeySpec" @@ -3326,7 +3420,7 @@ public struct KMSErrorType: AWSErrorType { public static var invalidGrantTokenException: Self { .init(.invalidGrantTokenException) } /// The request was rejected because the provided import token is invalid or is associated with a different KMS key. public static var invalidImportTokenException: Self { .init(.invalidImportTokenException) } - /// The request was rejected for one of the following reasons: The KeyUsage value of the KMS key is incompatible with the API operation. The encryption algorithm or signing algorithm specified for the operation is incompatible with the type of key material in the KMS key (KeySpec). For encrypting, decrypting, re-encrypting, and generating data keys, the KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage of a KMS key, use the DescribeKey operation. To find the encryption or signing algorithms supported for a particular KMS key, use the DescribeKey operation. + /// The request was rejected for one of the following reasons: The KeyUsage value of the KMS key is incompatible with the API operation. The encryption algorithm or signing algorithm specified for the operation is incompatible with the type of key material in the KMS key (KeySpec). For encrypting, decrypting, re-encrypting, and generating data keys, the KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage of a KMS key, use the DescribeKey operation. To find the encryption or signing algorithms supported for a particular KMS key, use the DescribeKey operation. public static var invalidKeyUsageException: Self { .init(.invalidKeyUsageException) } /// The request was rejected because the marker that specifies where pagination should next begin is not valid. public static var invalidMarkerException: Self { .init(.invalidMarkerException) } diff --git a/Sources/Soto/Services/Kafka/Kafka_shapes.swift b/Sources/Soto/Services/Kafka/Kafka_shapes.swift index 4136004e82..60c8cf3d8e 100644 --- a/Sources/Soto/Services/Kafka/Kafka_shapes.swift +++ b/Sources/Soto/Services/Kafka/Kafka_shapes.swift @@ -232,6 +232,23 @@ extension Kafka { } } + public struct BrokerCountUpdateInfo: AWSDecodableShape { + /// Kafka Broker IDs of brokers being created. + public let createdBrokerIds: [Double]? + /// Kafka Broker IDs of brokers being deleted. + public let deletedBrokerIds: [Double]? + + public init(createdBrokerIds: [Double]? = nil, deletedBrokerIds: [Double]? = nil) { + self.createdBrokerIds = createdBrokerIds + self.deletedBrokerIds = deletedBrokerIds + } + + private enum CodingKeys: String, CodingKey { + case createdBrokerIds = "createdBrokerIds" + case deletedBrokerIds = "deletedBrokerIds" + } + } + public struct BrokerEBSVolumeInfo: AWSEncodableShape & AWSDecodableShape { /// The ID of the broker to update. public let kafkaBrokerNodeId: String? @@ -973,6 +990,19 @@ extension Kafka { } } + public struct ControllerNodeInfo: AWSDecodableShape { + /// Endpoints for accessing the Controller. + public let endpoints: [String]? + + public init(endpoints: [String]? = nil) { + self.endpoints = endpoints + } + + private enum CodingKeys: String, CodingKey { + case endpoints = "endpoints" + } + } + public struct CreateClusterRequest: AWSEncodableShape { /// Information about the broker nodes in the cluster. public let brokerNodeGroupInfo: BrokerNodeGroupInfo? @@ -2797,6 +2827,8 @@ extension Kafka { } public struct MutableClusterInfo: AWSDecodableShape { + /// Describes brokers being changed during a broker count update. + public let brokerCountUpdateInfo: BrokerCountUpdateInfo? /// Specifies the size of the EBS volume and the ID of the associated broker. public let brokerEBSVolumeInfo: [BrokerEBSVolumeInfo]? /// Includes all client authentication information. @@ -2822,7 +2854,8 @@ extension Kafka { /// This controls storage mode for supported storage tiers. public let storageMode: StorageMode? - public init(brokerEBSVolumeInfo: [BrokerEBSVolumeInfo]? = nil, clientAuthentication: ClientAuthentication? = nil, configurationInfo: ConfigurationInfo? = nil, connectivityInfo: ConnectivityInfo? = nil, encryptionInfo: EncryptionInfo? = nil, enhancedMonitoring: EnhancedMonitoring? = nil, instanceType: String? = nil, kafkaVersion: String? = nil, loggingInfo: LoggingInfo? = nil, numberOfBrokerNodes: Int? = nil, openMonitoring: OpenMonitoring? = nil, storageMode: StorageMode? = nil) { + public init(brokerCountUpdateInfo: BrokerCountUpdateInfo? = nil, brokerEBSVolumeInfo: [BrokerEBSVolumeInfo]? = nil, clientAuthentication: ClientAuthentication? = nil, configurationInfo: ConfigurationInfo? = nil, connectivityInfo: ConnectivityInfo? = nil, encryptionInfo: EncryptionInfo? = nil, enhancedMonitoring: EnhancedMonitoring? = nil, instanceType: String? = nil, kafkaVersion: String? = nil, loggingInfo: LoggingInfo? = nil, numberOfBrokerNodes: Int? = nil, openMonitoring: OpenMonitoring? = nil, storageMode: StorageMode? = nil) { + self.brokerCountUpdateInfo = brokerCountUpdateInfo self.brokerEBSVolumeInfo = brokerEBSVolumeInfo self.clientAuthentication = clientAuthentication self.configurationInfo = configurationInfo @@ -2838,6 +2871,7 @@ extension Kafka { } private enum CodingKeys: String, CodingKey { + case brokerCountUpdateInfo = "brokerCountUpdateInfo" case brokerEBSVolumeInfo = "brokerEBSVolumeInfo" case clientAuthentication = "clientAuthentication" case configurationInfo = "configurationInfo" @@ -2884,6 +2918,8 @@ extension Kafka { public let addedToClusterTime: String? /// The broker node info. public let brokerNodeInfo: BrokerNodeInfo? + /// The ControllerNodeInfo. + public let controllerNodeInfo: ControllerNodeInfo? /// The instance type. public let instanceType: String? /// The Amazon Resource Name (ARN) of the node. @@ -2893,9 +2929,10 @@ extension Kafka { /// The ZookeeperNodeInfo. public let zookeeperNodeInfo: ZookeeperNodeInfo? - public init(addedToClusterTime: String? = nil, brokerNodeInfo: BrokerNodeInfo? = nil, instanceType: String? = nil, nodeARN: String? = nil, nodeType: NodeType? = nil, zookeeperNodeInfo: ZookeeperNodeInfo? = nil) { + public init(addedToClusterTime: String? = nil, brokerNodeInfo: BrokerNodeInfo? = nil, controllerNodeInfo: ControllerNodeInfo? = nil, instanceType: String? = nil, nodeARN: String? = nil, nodeType: NodeType? = nil, zookeeperNodeInfo: ZookeeperNodeInfo? = nil) { self.addedToClusterTime = addedToClusterTime self.brokerNodeInfo = brokerNodeInfo + self.controllerNodeInfo = controllerNodeInfo self.instanceType = instanceType self.nodeARN = nodeARN self.nodeType = nodeType @@ -2905,6 +2942,7 @@ extension Kafka { private enum CodingKeys: String, CodingKey { case addedToClusterTime = "addedToClusterTime" case brokerNodeInfo = "brokerNodeInfo" + case controllerNodeInfo = "controllerNodeInfo" case instanceType = "instanceType" case nodeARN = "nodeARN" case nodeType = "nodeType" diff --git a/Sources/Soto/Services/Kendra/Kendra_api.swift b/Sources/Soto/Services/Kendra/Kendra_api.swift index 1cbd775a4e..d54c4f945a 100644 --- a/Sources/Soto/Services/Kendra/Kendra_api.swift +++ b/Sources/Soto/Services/Kendra/Kendra_api.swift @@ -75,6 +75,7 @@ public struct Kendra: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.fips]: .init(endpoints: [ + "ca-central-1": "kendra-fips.ca-central-1.amazonaws.com", "us-east-1": "kendra-fips.us-east-1.amazonaws.com", "us-east-2": "kendra-fips.us-east-2.amazonaws.com", "us-gov-west-1": "kendra-fips.us-gov-west-1.amazonaws.com", diff --git a/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_api.swift b/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_api.swift index a58b497e33..d84f7d706d 100644 --- a/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_api.swift +++ b/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_api.swift @@ -127,7 +127,7 @@ public struct KinesisAnalyticsV2: AWSService { ) } - /// Adds a reference data source to an existing SQL-based Kinesis Data Analytics application. Kinesis Data Analytics reads reference data (that is, an Amazon S3 object) and creates an in-application table within your application. In the request, you provide the source (S3 bucket name and object key name), name of the in-application table to create, and the necessary mapping information that describes how data in an Amazon S3 object maps to columns in the resulting in-application table. + /// Adds a reference data source to an existing SQL-based Kinesis Data Analytics application. Kinesis Data Analytics reads reference data (that is, an Amazon S3 object) and creates an in-application table within your application. In the request, you provide the source (S3 bucket name and object key name), name of the in-application table to create, and the necessary mapping information that describes how data in an Amazon S3 object maps to columns in the resulting in-application table. @Sendable public func addApplicationReferenceDataSource(_ input: AddApplicationReferenceDataSourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AddApplicationReferenceDataSourceResponse { return try await self.client.execute( @@ -296,6 +296,19 @@ public struct KinesisAnalyticsV2: AWSService { ) } + /// Returns information about a specific operation performed on a Managed Service for Apache Flink application + @Sendable + public func describeApplicationOperation(_ input: DescribeApplicationOperationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeApplicationOperationResponse { + return try await self.client.execute( + operation: "DescribeApplicationOperation", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns information about a snapshot of application state data. @Sendable public func describeApplicationSnapshot(_ input: DescribeApplicationSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeApplicationSnapshotResponse { @@ -335,6 +348,19 @@ public struct KinesisAnalyticsV2: AWSService { ) } + /// Lists information about operations performed on a Managed Service for Apache Flink application + @Sendable + public func listApplicationOperations(_ input: ListApplicationOperationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListApplicationOperationsResponse { + return try await self.client.execute( + operation: "ListApplicationOperations", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists information about the current application snapshots. @Sendable public func listApplicationSnapshots(_ input: ListApplicationSnapshotsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListApplicationSnapshotsResponse { @@ -348,7 +374,7 @@ public struct KinesisAnalyticsV2: AWSService { ) } - /// Lists all the versions for the specified application, including versions that were rolled back. The response also includes a summary of the configuration associated with each version. To get the complete description of a specific application version, invoke the DescribeApplicationVersion operation. This operation is supported only for Managed Service for Apache Flink. + /// Lists all the versions for the specified application, including versions that were rolled back. The response also includes a summary of the configuration associated with each version. To get the complete description of a specific application version, invoke the DescribeApplicationVersion operation. This operation is supported only for Managed Service for Apache Flink. @Sendable public func listApplicationVersions(_ input: ListApplicationVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListApplicationVersionsResponse { return try await self.client.execute( @@ -387,7 +413,7 @@ public struct KinesisAnalyticsV2: AWSService { ) } - /// Reverts the application to the previous running version. You can roll back an application if you suspect it is stuck in a transient status. You can roll back an application only if it is in the UPDATING or AUTOSCALING status. When you rollback an application, it loads state data from the last successful snapshot. If the application has no snapshots, Managed Service for Apache Flink rejects the rollback request. This action is not supported for Managed Service for Apache Flink for SQL applications. + /// Reverts the application to the previous running version. You can roll back an application if you suspect it is stuck in a transient status or in the running status. You can roll back an application only if it is in the UPDATING, AUTOSCALING, or RUNNING statuses. When you rollback an application, it loads state data from the last successful snapshot. If the application has no snapshots, Managed Service for Apache Flink rejects the rollback request. @Sendable public func rollbackApplication(_ input: RollbackApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RollbackApplicationResponse { return try await self.client.execute( @@ -413,7 +439,7 @@ public struct KinesisAnalyticsV2: AWSService { ) } - /// Stops the application from processing data. You can stop an application only if it is in the running status, unless you set the Force parameter to true. You can use the DescribeApplication operation to find the application status. Managed Service for Apache Flink takes a snapshot when the application is stopped, unless Force is set to true. + /// Stops the application from processing data. You can stop an application only if it is in the running status, unless you set the Force parameter to true. You can use the DescribeApplication operation to find the application status. Managed Service for Apache Flink takes a snapshot when the application is stopped, unless Force is set to true. @Sendable public func stopApplication(_ input: StopApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopApplicationResponse { return try await self.client.execute( @@ -487,3 +513,125 @@ extension KinesisAnalyticsV2 { self.config = from.config.with(patch: patch) } } + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension KinesisAnalyticsV2 { + /// Lists information about operations performed on a Managed Service for Apache Flink application + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listApplicationOperationsPaginator( + _ input: ListApplicationOperationsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listApplicationOperations, + inputKey: \ListApplicationOperationsRequest.nextToken, + outputKey: \ListApplicationOperationsResponse.nextToken, + logger: logger + ) + } + + /// Lists information about the current application snapshots. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listApplicationSnapshotsPaginator( + _ input: ListApplicationSnapshotsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listApplicationSnapshots, + inputKey: \ListApplicationSnapshotsRequest.nextToken, + outputKey: \ListApplicationSnapshotsResponse.nextToken, + logger: logger + ) + } + + /// Lists all the versions for the specified application, including versions that were rolled back. The response also includes a summary of the configuration associated with each version. To get the complete description of a specific application version, invoke the DescribeApplicationVersion operation. This operation is supported only for Managed Service for Apache Flink. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listApplicationVersionsPaginator( + _ input: ListApplicationVersionsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listApplicationVersions, + inputKey: \ListApplicationVersionsRequest.nextToken, + outputKey: \ListApplicationVersionsResponse.nextToken, + logger: logger + ) + } + + /// Returns a list of Managed Service for Apache Flink applications in your account. For each application, the response includes the application name, Amazon Resource Name (ARN), and status. If you want detailed information about a specific application, use DescribeApplication. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listApplicationsPaginator( + _ input: ListApplicationsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listApplications, + inputKey: \ListApplicationsRequest.nextToken, + outputKey: \ListApplicationsResponse.nextToken, + logger: logger + ) + } +} + +extension KinesisAnalyticsV2.ListApplicationOperationsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> KinesisAnalyticsV2.ListApplicationOperationsRequest { + return .init( + applicationName: self.applicationName, + limit: self.limit, + nextToken: token, + operation: self.operation, + operationStatus: self.operationStatus + ) + } +} + +extension KinesisAnalyticsV2.ListApplicationSnapshotsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> KinesisAnalyticsV2.ListApplicationSnapshotsRequest { + return .init( + applicationName: self.applicationName, + limit: self.limit, + nextToken: token + ) + } +} + +extension KinesisAnalyticsV2.ListApplicationVersionsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> KinesisAnalyticsV2.ListApplicationVersionsRequest { + return .init( + applicationName: self.applicationName, + limit: self.limit, + nextToken: token + ) + } +} + +extension KinesisAnalyticsV2.ListApplicationsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> KinesisAnalyticsV2.ListApplicationsRequest { + return .init( + limit: self.limit, + nextToken: token + ) + } +} diff --git a/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_shapes.swift b/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_shapes.swift index 8a489bec19..86f51d42ae 100644 --- a/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_shapes.swift +++ b/Sources/Soto/Services/KinesisAnalyticsV2/KinesisAnalyticsV2_shapes.swift @@ -95,6 +95,14 @@ extension KinesisAnalyticsV2 { public var description: String { return self.rawValue } } + public enum OperationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cancelled = "CANCELLED" + case failed = "FAILED" + case inProgress = "IN_PROGRESS" + case successful = "SUCCESSFUL" + public var description: String { return self.rawValue } + } + public enum RecordFormatType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case csv = "CSV" case json = "JSON" @@ -175,17 +183,21 @@ extension KinesisAnalyticsV2 { public let applicationVersionId: Int64? /// The descriptions of the current CloudWatch logging options for the SQL-based Kinesis Data Analytics application. public let cloudWatchLoggingOptionDescriptions: [CloudWatchLoggingOptionDescription]? + /// Operation ID for tracking AddApplicationCloudWatchLoggingOption request + public let operationId: String? - public init(applicationARN: String? = nil, applicationVersionId: Int64? = nil, cloudWatchLoggingOptionDescriptions: [CloudWatchLoggingOptionDescription]? = nil) { + public init(applicationARN: String? = nil, applicationVersionId: Int64? = nil, cloudWatchLoggingOptionDescriptions: [CloudWatchLoggingOptionDescription]? = nil, operationId: String? = nil) { self.applicationARN = applicationARN self.applicationVersionId = applicationVersionId self.cloudWatchLoggingOptionDescriptions = cloudWatchLoggingOptionDescriptions + self.operationId = operationId } private enum CodingKeys: String, CodingKey { case applicationARN = "ApplicationARN" case applicationVersionId = "ApplicationVersionId" case cloudWatchLoggingOptionDescriptions = "CloudWatchLoggingOptionDescriptions" + case operationId = "OperationId" } } @@ -446,18 +458,22 @@ extension KinesisAnalyticsV2 { public let applicationARN: String? /// Provides the current application version. Managed Service for Apache Flink updates the ApplicationVersionId each time you update the application. public let applicationVersionId: Int64? + /// Operation ID for tracking AddApplicationVpcConfiguration request + public let operationId: String? /// The parameters of the new VPC configuration. public let vpcConfigurationDescription: VpcConfigurationDescription? - public init(applicationARN: String? = nil, applicationVersionId: Int64? = nil, vpcConfigurationDescription: VpcConfigurationDescription? = nil) { + public init(applicationARN: String? = nil, applicationVersionId: Int64? = nil, operationId: String? = nil, vpcConfigurationDescription: VpcConfigurationDescription? = nil) { self.applicationARN = applicationARN self.applicationVersionId = applicationVersionId + self.operationId = operationId self.vpcConfigurationDescription = vpcConfigurationDescription } private enum CodingKeys: String, CodingKey { case applicationARN = "ApplicationARN" case applicationVersionId = "ApplicationVersionId" + case operationId = "OperationId" case vpcConfigurationDescription = "VpcConfigurationDescription" } } @@ -526,6 +542,7 @@ extension KinesisAnalyticsV2 { public let applicationCodeConfiguration: ApplicationCodeConfiguration? /// Describes whether snapshots are enabled for a Managed Service for Apache Flink application. public let applicationSnapshotConfiguration: ApplicationSnapshotConfiguration? + public let applicationSystemRollbackConfiguration: ApplicationSystemRollbackConfiguration? /// Describes execution properties for a Managed Service for Apache Flink application. public let environmentProperties: EnvironmentProperties? /// The creation and update parameters for a Managed Service for Apache Flink application. @@ -537,9 +554,10 @@ extension KinesisAnalyticsV2 { /// The configuration parameters for a Managed Service for Apache Flink Studio notebook. public let zeppelinApplicationConfiguration: ZeppelinApplicationConfiguration? - public init(applicationCodeConfiguration: ApplicationCodeConfiguration? = nil, applicationSnapshotConfiguration: ApplicationSnapshotConfiguration? = nil, environmentProperties: EnvironmentProperties? = nil, flinkApplicationConfiguration: FlinkApplicationConfiguration? = nil, sqlApplicationConfiguration: SqlApplicationConfiguration? = nil, vpcConfigurations: [VpcConfiguration]? = nil, zeppelinApplicationConfiguration: ZeppelinApplicationConfiguration? = nil) { + public init(applicationCodeConfiguration: ApplicationCodeConfiguration? = nil, applicationSnapshotConfiguration: ApplicationSnapshotConfiguration? = nil, applicationSystemRollbackConfiguration: ApplicationSystemRollbackConfiguration? = nil, environmentProperties: EnvironmentProperties? = nil, flinkApplicationConfiguration: FlinkApplicationConfiguration? = nil, sqlApplicationConfiguration: SqlApplicationConfiguration? = nil, vpcConfigurations: [VpcConfiguration]? = nil, zeppelinApplicationConfiguration: ZeppelinApplicationConfiguration? = nil) { self.applicationCodeConfiguration = applicationCodeConfiguration self.applicationSnapshotConfiguration = applicationSnapshotConfiguration + self.applicationSystemRollbackConfiguration = applicationSystemRollbackConfiguration self.environmentProperties = environmentProperties self.flinkApplicationConfiguration = flinkApplicationConfiguration self.sqlApplicationConfiguration = sqlApplicationConfiguration @@ -561,6 +579,7 @@ extension KinesisAnalyticsV2 { private enum CodingKeys: String, CodingKey { case applicationCodeConfiguration = "ApplicationCodeConfiguration" case applicationSnapshotConfiguration = "ApplicationSnapshotConfiguration" + case applicationSystemRollbackConfiguration = "ApplicationSystemRollbackConfiguration" case environmentProperties = "EnvironmentProperties" case flinkApplicationConfiguration = "FlinkApplicationConfiguration" case sqlApplicationConfiguration = "SqlApplicationConfiguration" @@ -574,6 +593,7 @@ extension KinesisAnalyticsV2 { public let applicationCodeConfigurationDescription: ApplicationCodeConfigurationDescription? /// Describes whether snapshots are enabled for a Managed Service for Apache Flink application. public let applicationSnapshotConfigurationDescription: ApplicationSnapshotConfigurationDescription? + public let applicationSystemRollbackConfigurationDescription: ApplicationSystemRollbackConfigurationDescription? /// Describes execution properties for a Managed Service for Apache Flink application. public let environmentPropertyDescriptions: EnvironmentPropertyDescriptions? /// The details about a Managed Service for Apache Flink application. @@ -587,9 +607,10 @@ extension KinesisAnalyticsV2 { /// The configuration parameters for a Managed Service for Apache Flink Studio notebook. public let zeppelinApplicationConfigurationDescription: ZeppelinApplicationConfigurationDescription? - public init(applicationCodeConfigurationDescription: ApplicationCodeConfigurationDescription? = nil, applicationSnapshotConfigurationDescription: ApplicationSnapshotConfigurationDescription? = nil, environmentPropertyDescriptions: EnvironmentPropertyDescriptions? = nil, flinkApplicationConfigurationDescription: FlinkApplicationConfigurationDescription? = nil, runConfigurationDescription: RunConfigurationDescription? = nil, sqlApplicationConfigurationDescription: SqlApplicationConfigurationDescription? = nil, vpcConfigurationDescriptions: [VpcConfigurationDescription]? = nil, zeppelinApplicationConfigurationDescription: ZeppelinApplicationConfigurationDescription? = nil) { + public init(applicationCodeConfigurationDescription: ApplicationCodeConfigurationDescription? = nil, applicationSnapshotConfigurationDescription: ApplicationSnapshotConfigurationDescription? = nil, applicationSystemRollbackConfigurationDescription: ApplicationSystemRollbackConfigurationDescription? = nil, environmentPropertyDescriptions: EnvironmentPropertyDescriptions? = nil, flinkApplicationConfigurationDescription: FlinkApplicationConfigurationDescription? = nil, runConfigurationDescription: RunConfigurationDescription? = nil, sqlApplicationConfigurationDescription: SqlApplicationConfigurationDescription? = nil, vpcConfigurationDescriptions: [VpcConfigurationDescription]? = nil, zeppelinApplicationConfigurationDescription: ZeppelinApplicationConfigurationDescription? = nil) { self.applicationCodeConfigurationDescription = applicationCodeConfigurationDescription self.applicationSnapshotConfigurationDescription = applicationSnapshotConfigurationDescription + self.applicationSystemRollbackConfigurationDescription = applicationSystemRollbackConfigurationDescription self.environmentPropertyDescriptions = environmentPropertyDescriptions self.flinkApplicationConfigurationDescription = flinkApplicationConfigurationDescription self.runConfigurationDescription = runConfigurationDescription @@ -601,6 +622,7 @@ extension KinesisAnalyticsV2 { private enum CodingKeys: String, CodingKey { case applicationCodeConfigurationDescription = "ApplicationCodeConfigurationDescription" case applicationSnapshotConfigurationDescription = "ApplicationSnapshotConfigurationDescription" + case applicationSystemRollbackConfigurationDescription = "ApplicationSystemRollbackConfigurationDescription" case environmentPropertyDescriptions = "EnvironmentPropertyDescriptions" case flinkApplicationConfigurationDescription = "FlinkApplicationConfigurationDescription" case runConfigurationDescription = "RunConfigurationDescription" @@ -615,6 +637,7 @@ extension KinesisAnalyticsV2 { public let applicationCodeConfigurationUpdate: ApplicationCodeConfigurationUpdate? /// Describes whether snapshots are enabled for a Managed Service for Apache Flink application. public let applicationSnapshotConfigurationUpdate: ApplicationSnapshotConfigurationUpdate? + public let applicationSystemRollbackConfigurationUpdate: ApplicationSystemRollbackConfigurationUpdate? /// Describes updates to the environment properties for a Managed Service for Apache Flink application. public let environmentPropertyUpdates: EnvironmentPropertyUpdates? /// Describes updates to a Managed Service for Apache Flink application's configuration. @@ -626,9 +649,10 @@ extension KinesisAnalyticsV2 { /// Updates to the configuration of a Managed Service for Apache Flink Studio notebook. public let zeppelinApplicationConfigurationUpdate: ZeppelinApplicationConfigurationUpdate? - public init(applicationCodeConfigurationUpdate: ApplicationCodeConfigurationUpdate? = nil, applicationSnapshotConfigurationUpdate: ApplicationSnapshotConfigurationUpdate? = nil, environmentPropertyUpdates: EnvironmentPropertyUpdates? = nil, flinkApplicationConfigurationUpdate: FlinkApplicationConfigurationUpdate? = nil, sqlApplicationConfigurationUpdate: SqlApplicationConfigurationUpdate? = nil, vpcConfigurationUpdates: [VpcConfigurationUpdate]? = nil, zeppelinApplicationConfigurationUpdate: ZeppelinApplicationConfigurationUpdate? = nil) { + public init(applicationCodeConfigurationUpdate: ApplicationCodeConfigurationUpdate? = nil, applicationSnapshotConfigurationUpdate: ApplicationSnapshotConfigurationUpdate? = nil, applicationSystemRollbackConfigurationUpdate: ApplicationSystemRollbackConfigurationUpdate? = nil, environmentPropertyUpdates: EnvironmentPropertyUpdates? = nil, flinkApplicationConfigurationUpdate: FlinkApplicationConfigurationUpdate? = nil, sqlApplicationConfigurationUpdate: SqlApplicationConfigurationUpdate? = nil, vpcConfigurationUpdates: [VpcConfigurationUpdate]? = nil, zeppelinApplicationConfigurationUpdate: ZeppelinApplicationConfigurationUpdate? = nil) { self.applicationCodeConfigurationUpdate = applicationCodeConfigurationUpdate self.applicationSnapshotConfigurationUpdate = applicationSnapshotConfigurationUpdate + self.applicationSystemRollbackConfigurationUpdate = applicationSystemRollbackConfigurationUpdate self.environmentPropertyUpdates = environmentPropertyUpdates self.flinkApplicationConfigurationUpdate = flinkApplicationConfigurationUpdate self.sqlApplicationConfigurationUpdate = sqlApplicationConfigurationUpdate @@ -650,6 +674,7 @@ extension KinesisAnalyticsV2 { private enum CodingKeys: String, CodingKey { case applicationCodeConfigurationUpdate = "ApplicationCodeConfigurationUpdate" case applicationSnapshotConfigurationUpdate = "ApplicationSnapshotConfigurationUpdate" + case applicationSystemRollbackConfigurationUpdate = "ApplicationSystemRollbackConfigurationUpdate" case environmentPropertyUpdates = "EnvironmentPropertyUpdates" case flinkApplicationConfigurationUpdate = "FlinkApplicationConfigurationUpdate" case sqlApplicationConfigurationUpdate = "SqlApplicationConfigurationUpdate" @@ -673,6 +698,8 @@ extension KinesisAnalyticsV2 { public let applicationName: String /// The status of the application. public let applicationStatus: ApplicationStatus + /// The current timestamp when the application version was created. + public let applicationVersionCreateTimestamp: Date? /// Provides the current application version. Managed Service for Apache Flink updates the ApplicationVersionId each time you update the application. public let applicationVersionId: Int64 /// If you reverted the application using RollbackApplication, the application version when RollbackApplication was called. @@ -694,7 +721,7 @@ extension KinesisAnalyticsV2 { /// Specifies the IAM role that the application uses to access external resources. public let serviceExecutionRole: String? - public init(applicationARN: String, applicationConfigurationDescription: ApplicationConfigurationDescription? = nil, applicationDescription: String? = nil, applicationMaintenanceConfigurationDescription: ApplicationMaintenanceConfigurationDescription? = nil, applicationMode: ApplicationMode? = nil, applicationName: String, applicationStatus: ApplicationStatus, applicationVersionId: Int64, applicationVersionRolledBackFrom: Int64? = nil, applicationVersionRolledBackTo: Int64? = nil, applicationVersionUpdatedFrom: Int64? = nil, cloudWatchLoggingOptionDescriptions: [CloudWatchLoggingOptionDescription]? = nil, conditionalToken: String? = nil, createTimestamp: Date? = nil, lastUpdateTimestamp: Date? = nil, runtimeEnvironment: RuntimeEnvironment, serviceExecutionRole: String? = nil) { + public init(applicationARN: String, applicationConfigurationDescription: ApplicationConfigurationDescription? = nil, applicationDescription: String? = nil, applicationMaintenanceConfigurationDescription: ApplicationMaintenanceConfigurationDescription? = nil, applicationMode: ApplicationMode? = nil, applicationName: String, applicationStatus: ApplicationStatus, applicationVersionCreateTimestamp: Date? = nil, applicationVersionId: Int64, applicationVersionRolledBackFrom: Int64? = nil, applicationVersionRolledBackTo: Int64? = nil, applicationVersionUpdatedFrom: Int64? = nil, cloudWatchLoggingOptionDescriptions: [CloudWatchLoggingOptionDescription]? = nil, conditionalToken: String? = nil, createTimestamp: Date? = nil, lastUpdateTimestamp: Date? = nil, runtimeEnvironment: RuntimeEnvironment, serviceExecutionRole: String? = nil) { self.applicationARN = applicationARN self.applicationConfigurationDescription = applicationConfigurationDescription self.applicationDescription = applicationDescription @@ -702,6 +729,7 @@ extension KinesisAnalyticsV2 { self.applicationMode = applicationMode self.applicationName = applicationName self.applicationStatus = applicationStatus + self.applicationVersionCreateTimestamp = applicationVersionCreateTimestamp self.applicationVersionId = applicationVersionId self.applicationVersionRolledBackFrom = applicationVersionRolledBackFrom self.applicationVersionRolledBackTo = applicationVersionRolledBackTo @@ -722,6 +750,7 @@ extension KinesisAnalyticsV2 { case applicationMode = "ApplicationMode" case applicationName = "ApplicationName" case applicationStatus = "ApplicationStatus" + case applicationVersionCreateTimestamp = "ApplicationVersionCreateTimestamp" case applicationVersionId = "ApplicationVersionId" case applicationVersionRolledBackFrom = "ApplicationVersionRolledBackFrom" case applicationVersionRolledBackTo = "ApplicationVersionRolledBackTo" @@ -771,6 +800,61 @@ extension KinesisAnalyticsV2 { } } + public struct ApplicationOperationInfo: AWSDecodableShape { + /// The timestamp at which the operation finished for the application + public let endTime: Date? + public let operation: String? + public let operationId: String? + public let operationStatus: OperationStatus? + /// The timestamp at which the operation was created + public let startTime: Date? + + public init(endTime: Date? = nil, operation: String? = nil, operationId: String? = nil, operationStatus: OperationStatus? = nil, startTime: Date? = nil) { + self.endTime = endTime + self.operation = operation + self.operationId = operationId + self.operationStatus = operationStatus + self.startTime = startTime + } + + private enum CodingKeys: String, CodingKey { + case endTime = "EndTime" + case operation = "Operation" + case operationId = "OperationId" + case operationStatus = "OperationStatus" + case startTime = "StartTime" + } + } + + public struct ApplicationOperationInfoDetails: AWSDecodableShape { + public let applicationVersionChangeDetails: ApplicationVersionChangeDetails? + /// The timestamp at which the operation finished for the application + public let endTime: Date + public let operation: String + public let operationFailureDetails: OperationFailureDetails? + public let operationStatus: OperationStatus + /// The timestamp at which the operation was created + public let startTime: Date + + public init(applicationVersionChangeDetails: ApplicationVersionChangeDetails? = nil, endTime: Date, operation: String, operationFailureDetails: OperationFailureDetails? = nil, operationStatus: OperationStatus, startTime: Date) { + self.applicationVersionChangeDetails = applicationVersionChangeDetails + self.endTime = endTime + self.operation = operation + self.operationFailureDetails = operationFailureDetails + self.operationStatus = operationStatus + self.startTime = startTime + } + + private enum CodingKeys: String, CodingKey { + case applicationVersionChangeDetails = "ApplicationVersionChangeDetails" + case endTime = "EndTime" + case operation = "Operation" + case operationFailureDetails = "OperationFailureDetails" + case operationStatus = "OperationStatus" + case startTime = "StartTime" + } + } + public struct ApplicationRestoreConfiguration: AWSEncodableShape & AWSDecodableShape { /// Specifies how the application should be restored. public let applicationRestoreType: ApplicationRestoreType @@ -866,6 +950,62 @@ extension KinesisAnalyticsV2 { } } + public struct ApplicationSystemRollbackConfiguration: AWSEncodableShape { + /// Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application + public let rollbackEnabled: Bool + + public init(rollbackEnabled: Bool) { + self.rollbackEnabled = rollbackEnabled + } + + private enum CodingKeys: String, CodingKey { + case rollbackEnabled = "RollbackEnabled" + } + } + + public struct ApplicationSystemRollbackConfigurationDescription: AWSDecodableShape { + /// Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application + public let rollbackEnabled: Bool + + public init(rollbackEnabled: Bool) { + self.rollbackEnabled = rollbackEnabled + } + + private enum CodingKeys: String, CodingKey { + case rollbackEnabled = "RollbackEnabled" + } + } + + public struct ApplicationSystemRollbackConfigurationUpdate: AWSEncodableShape { + /// Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application + public let rollbackEnabledUpdate: Bool + + public init(rollbackEnabledUpdate: Bool) { + self.rollbackEnabledUpdate = rollbackEnabledUpdate + } + + private enum CodingKeys: String, CodingKey { + case rollbackEnabledUpdate = "RollbackEnabledUpdate" + } + } + + public struct ApplicationVersionChangeDetails: AWSDecodableShape { + /// The operation was performed on this version of the application + public let applicationVersionUpdatedFrom: Int64 + /// The operation execution resulted in the transition to the following version of the application + public let applicationVersionUpdatedTo: Int64 + + public init(applicationVersionUpdatedFrom: Int64, applicationVersionUpdatedTo: Int64) { + self.applicationVersionUpdatedFrom = applicationVersionUpdatedFrom + self.applicationVersionUpdatedTo = applicationVersionUpdatedTo + } + + private enum CodingKeys: String, CodingKey { + case applicationVersionUpdatedFrom = "ApplicationVersionUpdatedFrom" + case applicationVersionUpdatedTo = "ApplicationVersionUpdatedTo" + } + } + public struct ApplicationVersionSummary: AWSDecodableShape { /// The status of the application. public let applicationStatus: ApplicationStatus @@ -1420,17 +1560,21 @@ extension KinesisAnalyticsV2 { public let applicationVersionId: Int64? /// The descriptions of the remaining CloudWatch logging options for the application. public let cloudWatchLoggingOptionDescriptions: [CloudWatchLoggingOptionDescription]? + /// Operation ID for tracking DeleteApplicationCloudWatchLoggingOption request + public let operationId: String? - public init(applicationARN: String? = nil, applicationVersionId: Int64? = nil, cloudWatchLoggingOptionDescriptions: [CloudWatchLoggingOptionDescription]? = nil) { + public init(applicationARN: String? = nil, applicationVersionId: Int64? = nil, cloudWatchLoggingOptionDescriptions: [CloudWatchLoggingOptionDescription]? = nil, operationId: String? = nil) { self.applicationARN = applicationARN self.applicationVersionId = applicationVersionId self.cloudWatchLoggingOptionDescriptions = cloudWatchLoggingOptionDescriptions + self.operationId = operationId } private enum CodingKeys: String, CodingKey { case applicationARN = "ApplicationARN" case applicationVersionId = "ApplicationVersionId" case cloudWatchLoggingOptionDescriptions = "CloudWatchLoggingOptionDescriptions" + case operationId = "OperationId" } } @@ -1686,15 +1830,19 @@ extension KinesisAnalyticsV2 { public let applicationARN: String? /// The updated version ID of the application. public let applicationVersionId: Int64? + /// Operation ID for tracking DeleteApplicationVpcConfiguration request + public let operationId: String? - public init(applicationARN: String? = nil, applicationVersionId: Int64? = nil) { + public init(applicationARN: String? = nil, applicationVersionId: Int64? = nil, operationId: String? = nil) { self.applicationARN = applicationARN self.applicationVersionId = applicationVersionId + self.operationId = operationId } private enum CodingKeys: String, CodingKey { case applicationARN = "ApplicationARN" case applicationVersionId = "ApplicationVersionId" + case operationId = "OperationId" } } @@ -1745,6 +1893,41 @@ extension KinesisAnalyticsV2 { } } + public struct DescribeApplicationOperationRequest: AWSEncodableShape { + public let applicationName: String + public let operationId: String + + public init(applicationName: String, operationId: String) { + self.applicationName = applicationName + self.operationId = operationId + } + + public func validate(name: String) throws { + try self.validate(self.applicationName, name: "applicationName", parent: name, max: 128) + try self.validate(self.applicationName, name: "applicationName", parent: name, min: 1) + try self.validate(self.applicationName, name: "applicationName", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + try self.validate(self.operationId, name: "operationId", parent: name, max: 64) + try self.validate(self.operationId, name: "operationId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case applicationName = "ApplicationName" + case operationId = "OperationId" + } + } + + public struct DescribeApplicationOperationResponse: AWSDecodableShape { + public let applicationOperationInfoDetails: ApplicationOperationInfoDetails? + + public init(applicationOperationInfoDetails: ApplicationOperationInfoDetails? = nil) { + self.applicationOperationInfoDetails = applicationOperationInfoDetails + } + + private enum CodingKeys: String, CodingKey { + case applicationOperationInfoDetails = "ApplicationOperationInfoDetails" + } + } + public struct DescribeApplicationRequest: AWSEncodableShape { /// The name of the application. public let applicationName: String @@ -1988,6 +2171,18 @@ extension KinesisAnalyticsV2 { } } + public struct ErrorInfo: AWSDecodableShape { + public let errorString: String? + + public init(errorString: String? = nil) { + self.errorString = errorString + } + + private enum CodingKeys: String, CodingKey { + case errorString = "ErrorString" + } + } + public struct FlinkApplicationConfiguration: AWSEncodableShape { /// Describes an application's checkpointing configuration. Checkpointing is the process of persisting application state for fault tolerance. For more information, see Checkpoints for Fault Tolerance in the Apache Flink Documentation. public let checkpointConfiguration: CheckpointConfiguration? @@ -2390,7 +2585,7 @@ extension KinesisAnalyticsV2 { } public struct InputStartingPositionConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The starting position on the stream. NOW - Start reading just after the most recent record in the stream, and start at the request timestamp that the customer issued. TRIM_HORIZON - Start reading at the last untrimmed record in the stream, which is the oldest record available in the stream. This option is not available for an Amazon Kinesis Data Firehose delivery stream. LAST_STOPPED_POINT - Resume reading from where the application last stopped reading. + /// The starting position on the stream. NOW - Start reading just after the most recent record in the stream, and start at the request timestamp that the customer issued. TRIM_HORIZON - Start reading at the last untrimmed record in the stream, which is the oldest record available in the stream. This option is not available for an Amazon Kinesis Data Firehose delivery stream. LAST_STOPPED_POINT - Resume reading from where the application last stopped reading. public let inputStartingPosition: InputStartingPosition? public init(inputStartingPosition: InputStartingPosition? = nil) { @@ -2747,6 +2942,57 @@ extension KinesisAnalyticsV2 { } } + public struct ListApplicationOperationsRequest: AWSEncodableShape { + public let applicationName: String + public let limit: Int? + public let nextToken: String? + public let operation: String? + public let operationStatus: OperationStatus? + + public init(applicationName: String, limit: Int? = nil, nextToken: String? = nil, operation: String? = nil, operationStatus: OperationStatus? = nil) { + self.applicationName = applicationName + self.limit = limit + self.nextToken = nextToken + self.operation = operation + self.operationStatus = operationStatus + } + + public func validate(name: String) throws { + try self.validate(self.applicationName, name: "applicationName", parent: name, max: 128) + try self.validate(self.applicationName, name: "applicationName", parent: name, min: 1) + try self.validate(self.applicationName, name: "applicationName", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + try self.validate(self.limit, name: "limit", parent: name, max: 50) + try self.validate(self.limit, name: "limit", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 512) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.operation, name: "operation", parent: name, max: 64) + try self.validate(self.operation, name: "operation", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case applicationName = "ApplicationName" + case limit = "Limit" + case nextToken = "NextToken" + case operation = "Operation" + case operationStatus = "OperationStatus" + } + } + + public struct ListApplicationOperationsResponse: AWSDecodableShape { + public let applicationOperationInfoList: [ApplicationOperationInfo]? + public let nextToken: String? + + public init(applicationOperationInfoList: [ApplicationOperationInfo]? = nil, nextToken: String? = nil) { + self.applicationOperationInfoList = applicationOperationInfoList + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case applicationOperationInfoList = "ApplicationOperationInfoList" + case nextToken = "NextToken" + } + } + public struct ListApplicationSnapshotsRequest: AWSEncodableShape { /// The name of an existing application. public let applicationName: String @@ -3035,6 +3281,22 @@ extension KinesisAnalyticsV2 { } } + public struct OperationFailureDetails: AWSDecodableShape { + public let errorInfo: ErrorInfo? + /// Provides the operation ID of a system-rollback operation executed due to failure in the current operation + public let rollbackOperationId: String? + + public init(errorInfo: ErrorInfo? = nil, rollbackOperationId: String? = nil) { + self.errorInfo = errorInfo + self.rollbackOperationId = rollbackOperationId + } + + private enum CodingKeys: String, CodingKey { + case errorInfo = "ErrorInfo" + case rollbackOperationId = "RollbackOperationId" + } + } + public struct Output: AWSEncodableShape { /// Describes the data format when records are written to the destination. public let destinationSchema: DestinationSchema @@ -3437,13 +3699,17 @@ extension KinesisAnalyticsV2 { public struct RollbackApplicationResponse: AWSDecodableShape { public let applicationDetail: ApplicationDetail + /// Operation ID for tracking RollbackApplication request + public let operationId: String? - public init(applicationDetail: ApplicationDetail) { + public init(applicationDetail: ApplicationDetail, operationId: String? = nil) { self.applicationDetail = applicationDetail + self.operationId = operationId } private enum CodingKeys: String, CodingKey { case applicationDetail = "ApplicationDetail" + case operationId = "OperationId" } } @@ -3955,7 +4221,16 @@ extension KinesisAnalyticsV2 { } public struct StartApplicationResponse: AWSDecodableShape { - public init() {} + /// Operation ID for tracking StartApplication request + public let operationId: String? + + public init(operationId: String? = nil) { + self.operationId = operationId + } + + private enum CodingKeys: String, CodingKey { + case operationId = "OperationId" + } } public struct StopApplicationRequest: AWSEncodableShape { @@ -3982,7 +4257,16 @@ extension KinesisAnalyticsV2 { } public struct StopApplicationResponse: AWSDecodableShape { - public init() {} + /// Operation ID for tracking StopApplication request + public let operationId: String? + + public init(operationId: String? = nil) { + self.operationId = operationId + } + + private enum CodingKeys: String, CodingKey { + case operationId = "OperationId" + } } public struct Tag: AWSEncodableShape & AWSDecodableShape { @@ -4177,13 +4461,17 @@ extension KinesisAnalyticsV2 { public struct UpdateApplicationResponse: AWSDecodableShape { /// Describes application updates. public let applicationDetail: ApplicationDetail + /// Operation ID for tracking UpdateApplication request + public let operationId: String? - public init(applicationDetail: ApplicationDetail) { + public init(applicationDetail: ApplicationDetail, operationId: String? = nil) { self.applicationDetail = applicationDetail + self.operationId = operationId } private enum CodingKeys: String, CodingKey { case applicationDetail = "ApplicationDetail" + case operationId = "OperationId" } } diff --git a/Sources/Soto/Services/KinesisVideo/KinesisVideo_api.swift b/Sources/Soto/Services/KinesisVideo/KinesisVideo_api.swift index e2165df4c5..fc9f243660 100644 --- a/Sources/Soto/Services/KinesisVideo/KinesisVideo_api.swift +++ b/Sources/Soto/Services/KinesisVideo/KinesisVideo_api.swift @@ -57,6 +57,8 @@ public struct KinesisVideo: AWSService { serviceProtocol: .restjson, apiVersion: "2017-09-30", endpoint: endpoint, + serviceEndpoints: Self.serviceEndpoints, + variantEndpoints: Self.variantEndpoints, errorType: KinesisVideoErrorType.self, xmlNamespace: "https://kinesisvideo.amazonaws.com/doc/2017-09-30/", middleware: middleware, @@ -67,8 +69,20 @@ public struct KinesisVideo: AWSService { } + /// custom endpoints for regions + static var serviceEndpoints: [String: String] {[ + "us-gov-east-1": "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + ]} + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.fips]: .init(endpoints: [ + "us-gov-east-1": "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/KinesisVideoArchivedMedia/KinesisVideoArchivedMedia_api.swift b/Sources/Soto/Services/KinesisVideoArchivedMedia/KinesisVideoArchivedMedia_api.swift index 7bd48665b2..7cdd287e33 100644 --- a/Sources/Soto/Services/KinesisVideoArchivedMedia/KinesisVideoArchivedMedia_api.swift +++ b/Sources/Soto/Services/KinesisVideoArchivedMedia/KinesisVideoArchivedMedia_api.swift @@ -57,6 +57,8 @@ public struct KinesisVideoArchivedMedia: AWSService { serviceProtocol: .restjson, apiVersion: "2017-09-30", endpoint: endpoint, + serviceEndpoints: Self.serviceEndpoints, + variantEndpoints: Self.variantEndpoints, errorType: KinesisVideoArchivedMediaErrorType.self, middleware: middleware, timeout: timeout, @@ -66,8 +68,20 @@ public struct KinesisVideoArchivedMedia: AWSService { } + /// custom endpoints for regions + static var serviceEndpoints: [String: String] {[ + "us-gov-east-1": "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + ]} + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.fips]: .init(endpoints: [ + "us-gov-east-1": "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/KinesisVideoMedia/KinesisVideoMedia_api.swift b/Sources/Soto/Services/KinesisVideoMedia/KinesisVideoMedia_api.swift index 7a96e43fe9..872d83b12a 100644 --- a/Sources/Soto/Services/KinesisVideoMedia/KinesisVideoMedia_api.swift +++ b/Sources/Soto/Services/KinesisVideoMedia/KinesisVideoMedia_api.swift @@ -57,6 +57,8 @@ public struct KinesisVideoMedia: AWSService { serviceProtocol: .restjson, apiVersion: "2017-09-30", endpoint: endpoint, + serviceEndpoints: Self.serviceEndpoints, + variantEndpoints: Self.variantEndpoints, errorType: KinesisVideoMediaErrorType.self, middleware: middleware, timeout: timeout, @@ -66,8 +68,20 @@ public struct KinesisVideoMedia: AWSService { } + /// custom endpoints for regions + static var serviceEndpoints: [String: String] {[ + "us-gov-east-1": "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + ]} + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.fips]: .init(endpoints: [ + "us-gov-east-1": "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/KinesisVideoSignaling/KinesisVideoSignaling_api.swift b/Sources/Soto/Services/KinesisVideoSignaling/KinesisVideoSignaling_api.swift index 78df5a12d1..ad5dcab0f3 100644 --- a/Sources/Soto/Services/KinesisVideoSignaling/KinesisVideoSignaling_api.swift +++ b/Sources/Soto/Services/KinesisVideoSignaling/KinesisVideoSignaling_api.swift @@ -59,6 +59,8 @@ public struct KinesisVideoSignaling: AWSService { serviceProtocol: .restjson, apiVersion: "2019-12-04", endpoint: endpoint, + serviceEndpoints: Self.serviceEndpoints, + variantEndpoints: Self.variantEndpoints, errorType: KinesisVideoSignalingErrorType.self, middleware: middleware, timeout: timeout, @@ -68,8 +70,20 @@ public struct KinesisVideoSignaling: AWSService { } + /// custom endpoints for regions + static var serviceEndpoints: [String: String] {[ + "us-gov-east-1": "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + ]} + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.fips]: .init(endpoints: [ + "us-gov-east-1": "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_api.swift b/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_api.swift index 00589da467..1069aea522 100644 --- a/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_api.swift +++ b/Sources/Soto/Services/KinesisVideoWebRTCStorage/KinesisVideoWebRTCStorage_api.swift @@ -59,6 +59,8 @@ public struct KinesisVideoWebRTCStorage: AWSService { serviceProtocol: .restjson, apiVersion: "2018-05-10", endpoint: endpoint, + serviceEndpoints: Self.serviceEndpoints, + variantEndpoints: Self.variantEndpoints, errorType: KinesisVideoWebRTCStorageErrorType.self, middleware: middleware, timeout: timeout, @@ -68,8 +70,20 @@ public struct KinesisVideoWebRTCStorage: AWSService { } + /// custom endpoints for regions + static var serviceEndpoints: [String: String] {[ + "us-gov-east-1": "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + ]} + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.fips]: .init(endpoints: [ + "us-gov-east-1": "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/LakeFormation/LakeFormation_api.swift b/Sources/Soto/Services/LakeFormation/LakeFormation_api.swift index 6725040cf7..9e01195f37 100644 --- a/Sources/Soto/Services/LakeFormation/LakeFormation_api.swift +++ b/Sources/Soto/Services/LakeFormation/LakeFormation_api.swift @@ -366,6 +366,19 @@ public struct LakeFormation: AWSService { ) } + /// Returns the identity of the invoking principal. + @Sendable + public func getDataLakePrincipal(_ input: GetDataLakePrincipalRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataLakePrincipalResponse { + return try await self.client.execute( + operation: "GetDataLakePrincipal", + path: "/GetDataLakePrincipal", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves the list of the data lake administrators of a Lake Formation-managed data lake. @Sendable public func getDataLakeSettings(_ input: GetDataLakeSettingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataLakeSettingsResponse { diff --git a/Sources/Soto/Services/LakeFormation/LakeFormation_shapes.swift b/Sources/Soto/Services/LakeFormation/LakeFormation_shapes.swift index a8cdbc7e69..05a3de810c 100644 --- a/Sources/Soto/Services/LakeFormation/LakeFormation_shapes.swift +++ b/Sources/Soto/Services/LakeFormation/LakeFormation_shapes.swift @@ -1437,6 +1437,23 @@ extension LakeFormation { } } + public struct GetDataLakePrincipalRequest: AWSEncodableShape { + public init() {} + } + + public struct GetDataLakePrincipalResponse: AWSDecodableShape { + /// A unique identifier of the invoking principal. + public let identity: String? + + public init(identity: String? = nil) { + self.identity = identity + } + + private enum CodingKeys: String, CodingKey { + case identity = "Identity" + } + } + public struct GetDataLakeSettingsRequest: AWSEncodableShape { /// The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment. public let catalogId: String? diff --git a/Sources/Soto/Services/Lambda/Lambda_shapes.swift b/Sources/Soto/Services/Lambda/Lambda_shapes.swift index b7ed8ce9e8..0afd98a7ad 100644 --- a/Sources/Soto/Services/Lambda/Lambda_shapes.swift +++ b/Sources/Soto/Services/Lambda/Lambda_shapes.swift @@ -103,6 +103,7 @@ extension Lambda { } public enum LastUpdateStatusReasonCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case creating = "Creating" case disabledKMSKey = "DisabledKMSKey" case efsMountConnectivityError = "EFSMountConnectivityError" case efsMountFailure = "EFSMountFailure" diff --git a/Sources/Soto/Services/LaunchWizard/LaunchWizard_api.swift b/Sources/Soto/Services/LaunchWizard/LaunchWizard_api.swift index dc8529dd22..95b2c0a64d 100644 --- a/Sources/Soto/Services/LaunchWizard/LaunchWizard_api.swift +++ b/Sources/Soto/Services/LaunchWizard/LaunchWizard_api.swift @@ -125,6 +125,19 @@ public struct LaunchWizard: AWSService { ) } + /// Returns details for a given workload and deployment pattern, including the available specifications. You can use the ListWorkloads operation to discover the available workload names and the ListWorkloadDeploymentPatterns operation to discover the available deployment pattern names of a given workload. + @Sendable + public func getWorkloadDeploymentPattern(_ input: GetWorkloadDeploymentPatternInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetWorkloadDeploymentPatternOutput { + return try await self.client.execute( + operation: "GetWorkloadDeploymentPattern", + path: "/getWorkloadDeploymentPattern", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists the events of a deployment. @Sendable public func listDeploymentEvents(_ input: ListDeploymentEventsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDeploymentEventsOutput { @@ -151,7 +164,20 @@ public struct LaunchWizard: AWSService { ) } - /// Lists the workload deployment patterns. + /// Lists the tags associated with a specified resource. + @Sendable + public func listTagsForResource(_ input: ListTagsForResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceOutput { + return try await self.client.execute( + operation: "ListTagsForResource", + path: "/tags/{resourceArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists the workload deployment patterns for a given workload name. You can use the ListWorkloads operation to discover the available workload names. @Sendable public func listWorkloadDeploymentPatterns(_ input: ListWorkloadDeploymentPatternsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWorkloadDeploymentPatternsOutput { return try await self.client.execute( @@ -164,7 +190,7 @@ public struct LaunchWizard: AWSService { ) } - /// Lists the workloads. + /// Lists the available workload names. You can use the ListWorkloadDeploymentPatterns operation to discover the available deployment patterns for a given workload. @Sendable public func listWorkloads(_ input: ListWorkloadsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWorkloadsOutput { return try await self.client.execute( @@ -176,6 +202,32 @@ public struct LaunchWizard: AWSService { logger: logger ) } + + /// Adds the specified tags to the given resource. + @Sendable + public func tagResource(_ input: TagResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceOutput { + return try await self.client.execute( + operation: "TagResource", + path: "/tags/{resourceArn}", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Removes the specified tags from the given resource. + @Sendable + public func untagResource(_ input: UntagResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceOutput { + return try await self.client.execute( + operation: "UntagResource", + path: "/tags/{resourceArn}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } } extension LaunchWizard { @@ -229,7 +281,7 @@ extension LaunchWizard { ) } - /// Lists the workload deployment patterns. + /// Lists the workload deployment patterns for a given workload name. You can use the ListWorkloads operation to discover the available workload names. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -248,7 +300,7 @@ extension LaunchWizard { ) } - /// Lists the workloads. + /// Lists the available workload names. You can use the ListWorkloadDeploymentPatterns operation to discover the available deployment patterns for a given workload. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/LaunchWizard/LaunchWizard_shapes.swift b/Sources/Soto/Services/LaunchWizard/LaunchWizard_shapes.swift index 576ddc1cea..307ebec8fe 100644 --- a/Sources/Soto/Services/LaunchWizard/LaunchWizard_shapes.swift +++ b/Sources/Soto/Services/LaunchWizard/LaunchWizard_shapes.swift @@ -82,26 +82,29 @@ extension LaunchWizard { public let dryRun: Bool? /// The name of the deployment. public let name: String - /// The settings specified for the deployment. For more information on the specifications required for creating a deployment, see Workload specifications. + /// The settings specified for the deployment. These settings define how to deploy and configure your resources created by the deployment. For more information about the specifications required for creating a deployment for a SAP workload, see SAP deployment specifications. To retrieve the specifications required to create a deployment for other workloads, use the GetWorkloadDeploymentPattern operation. public let specifications: [String: String] - /// The name of the workload. You can use the ListWorkloadDeploymentPatterns operation to discover supported values for this parameter. + /// The tags to add to the deployment. + public let tags: [String: String]? + /// The name of the workload. You can use the ListWorkloads operation to discover supported values for this parameter. public let workloadName: String - public init(deploymentPatternName: String, dryRun: Bool? = nil, name: String, specifications: [String: String], workloadName: String) { + public init(deploymentPatternName: String, dryRun: Bool? = nil, name: String, specifications: [String: String], tags: [String: String]? = nil, workloadName: String) { self.deploymentPatternName = deploymentPatternName self.dryRun = dryRun self.name = name self.specifications = specifications + self.tags = tags self.workloadName = workloadName } public func validate(name: String) throws { try self.validate(self.deploymentPatternName, name: "deploymentPatternName", parent: name, max: 256) try self.validate(self.deploymentPatternName, name: "deploymentPatternName", parent: name, min: 1) - try self.validate(self.deploymentPatternName, name: "deploymentPatternName", parent: name, pattern: "^[a-zA-Z0-9-]+$") - try self.validate(self.name, name: "name", parent: name, max: 25) + try self.validate(self.deploymentPatternName, name: "deploymentPatternName", parent: name, pattern: "^[A-Za-z0-9][a-zA-Z0-9-]*$") + try self.validate(self.name, name: "name", parent: name, max: 50) try self.validate(self.name, name: "name", parent: name, min: 1) - try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9_\\s\\.-]+$") + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9_\\.-]+$") try self.specifications.forEach { try validate($0.key, name: "specifications.key", parent: name, max: 256) try validate($0.key, name: "specifications.key", parent: name, min: 3) @@ -111,9 +114,17 @@ extension LaunchWizard { } try self.validate(self.specifications, name: "specifications", parent: name, max: 100) try self.validate(self.specifications, name: "specifications", parent: name, min: 1) - try self.validate(self.workloadName, name: "workloadName", parent: name, max: 256) + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.tags, name: "tags", parent: name, min: 1) + try self.validate(self.workloadName, name: "workloadName", parent: name, max: 100) try self.validate(self.workloadName, name: "workloadName", parent: name, min: 1) - try self.validate(self.workloadName, name: "workloadName", parent: name, pattern: "^[a-zA-Z0-9-]+$") + try self.validate(self.workloadName, name: "workloadName", parent: name, pattern: "^[A-Za-z][a-zA-Z0-9-_]*$") } private enum CodingKeys: String, CodingKey { @@ -121,6 +132,7 @@ extension LaunchWizard { case dryRun = "dryRun" case name = "name" case specifications = "specifications" + case tags = "tags" case workloadName = "workloadName" } } @@ -174,11 +186,34 @@ extension LaunchWizard { } } + public struct DeploymentConditionalField: AWSDecodableShape { + /// The comparator of the condition. Valid values: Equal | NotEqual + public let comparator: String? + /// The name of the deployment condition. + public let name: String? + /// The value of the condition. + public let value: String? + + public init(comparator: String? = nil, name: String? = nil, value: String? = nil) { + self.comparator = comparator + self.name = name + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case comparator = "comparator" + case name = "name" + case value = "value" + } + } + public struct DeploymentData: AWSDecodableShape { /// The time the deployment was created. public let createdAt: Date? /// The time the deployment was deleted. public let deletedAt: Date? + /// The Amazon Resource Name (ARN) of the deployment. + public let deploymentArn: String? /// The ID of the deployment. public let id: String? /// The name of the deployment. @@ -187,34 +222,40 @@ extension LaunchWizard { public let patternName: String? /// The resource group of the deployment. public let resourceGroup: String? - /// The specifications of the deployment. For more information on specifications for each deployment, see Workload specifications. + /// The settings specified for the deployment. These settings define how to deploy and configure your resources created by the deployment. For more information about the specifications required for creating a deployment for a SAP workload, see SAP deployment specifications. To retrieve the specifications required to create a deployment for other workloads, use the GetWorkloadDeploymentPattern operation. public let specifications: [String: String]? /// The status of the deployment. public let status: DeploymentStatus? + /// Information about the tags attached to a deployment. + public let tags: [String: String]? /// The name of the workload. public let workloadName: String? - public init(createdAt: Date? = nil, deletedAt: Date? = nil, id: String? = nil, name: String? = nil, patternName: String? = nil, resourceGroup: String? = nil, specifications: [String: String]? = nil, status: DeploymentStatus? = nil, workloadName: String? = nil) { + public init(createdAt: Date? = nil, deletedAt: Date? = nil, deploymentArn: String? = nil, id: String? = nil, name: String? = nil, patternName: String? = nil, resourceGroup: String? = nil, specifications: [String: String]? = nil, status: DeploymentStatus? = nil, tags: [String: String]? = nil, workloadName: String? = nil) { self.createdAt = createdAt self.deletedAt = deletedAt + self.deploymentArn = deploymentArn self.id = id self.name = name self.patternName = patternName self.resourceGroup = resourceGroup self.specifications = specifications self.status = status + self.tags = tags self.workloadName = workloadName } private enum CodingKeys: String, CodingKey { case createdAt = "createdAt" case deletedAt = "deletedAt" + case deploymentArn = "deploymentArn" case id = "id" case name = "name" case patternName = "patternName" case resourceGroup = "resourceGroup" case specifications = "specifications" case status = "status" + case tags = "tags" case workloadName = "workloadName" } } @@ -298,6 +339,35 @@ extension LaunchWizard { } } + public struct DeploymentSpecificationsField: AWSDecodableShape { + /// The allowed values of the deployment specification. + public let allowedValues: [String]? + /// The conditionals used for the deployment specification. + public let conditionals: [DeploymentConditionalField]? + /// The description of the deployment specification. + public let description: String? + /// The name of the deployment specification. + public let name: String? + /// Indicates if the deployment specification is required. + public let required: String? + + public init(allowedValues: [String]? = nil, conditionals: [DeploymentConditionalField]? = nil, description: String? = nil, name: String? = nil, required: String? = nil) { + self.allowedValues = allowedValues + self.conditionals = conditionals + self.description = description + self.name = name + self.required = required + } + + private enum CodingKeys: String, CodingKey { + case allowedValues = "allowedValues" + case conditionals = "conditionals" + case description = "description" + case name = "name" + case required = "required" + } + } + public struct GetDeploymentInput: AWSEncodableShape { /// The ID of the deployment. public let deploymentId: String @@ -330,6 +400,45 @@ extension LaunchWizard { } } + public struct GetWorkloadDeploymentPatternInput: AWSEncodableShape { + /// The name of the deployment pattern. + public let deploymentPatternName: String + /// The name of the workload. + public let workloadName: String + + public init(deploymentPatternName: String, workloadName: String) { + self.deploymentPatternName = deploymentPatternName + self.workloadName = workloadName + } + + public func validate(name: String) throws { + try self.validate(self.deploymentPatternName, name: "deploymentPatternName", parent: name, max: 256) + try self.validate(self.deploymentPatternName, name: "deploymentPatternName", parent: name, min: 1) + try self.validate(self.deploymentPatternName, name: "deploymentPatternName", parent: name, pattern: "^[A-Za-z0-9][a-zA-Z0-9-]*$") + try self.validate(self.workloadName, name: "workloadName", parent: name, max: 100) + try self.validate(self.workloadName, name: "workloadName", parent: name, min: 1) + try self.validate(self.workloadName, name: "workloadName", parent: name, pattern: "^[A-Za-z][a-zA-Z0-9-_]*$") + } + + private enum CodingKeys: String, CodingKey { + case deploymentPatternName = "deploymentPatternName" + case workloadName = "workloadName" + } + } + + public struct GetWorkloadDeploymentPatternOutput: AWSDecodableShape { + /// Details about the workload deployment pattern. + public let workloadDeploymentPattern: WorkloadDeploymentPatternData? + + public init(workloadDeploymentPattern: WorkloadDeploymentPatternData? = nil) { + self.workloadDeploymentPattern = workloadDeploymentPattern + } + + private enum CodingKeys: String, CodingKey { + case workloadDeploymentPattern = "workloadDeploymentPattern" + } + } + public struct GetWorkloadInput: AWSEncodableShape { /// The name of the workload. public let workloadName: String @@ -339,9 +448,9 @@ extension LaunchWizard { } public func validate(name: String) throws { - try self.validate(self.workloadName, name: "workloadName", parent: name, max: 256) + try self.validate(self.workloadName, name: "workloadName", parent: name, max: 100) try self.validate(self.workloadName, name: "workloadName", parent: name, min: 1) - try self.validate(self.workloadName, name: "workloadName", parent: name, pattern: "^[a-zA-Z0-9-]+$") + try self.validate(self.workloadName, name: "workloadName", parent: name, pattern: "^[A-Za-z][a-zA-Z0-9-_]*$") } private enum CodingKeys: String, CodingKey { @@ -411,7 +520,7 @@ extension LaunchWizard { } public struct ListDeploymentsInput: AWSEncodableShape { - /// Filters to scope the results. The following filters are supported: WORKLOAD_NAME DEPLOYMENT_STATUS + /// Filters to scope the results. The following filters are supported: WORKLOAD_NAME - The name used in deployments. DEPLOYMENT_STATUS - COMPLETED | CREATING | DELETE_IN_PROGRESS | DELETE_INITIATING | DELETE_FAILED | DELETED | FAILED | IN_PROGRESS | VALIDATING public let filters: [DeploymentFilter]? /// The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. public let maxResults: Int? @@ -456,6 +565,36 @@ extension LaunchWizard { } } + public struct ListTagsForResourceInput: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTagsForResourceOutput: AWSDecodableShape { + /// Information about the tags. + public let tags: [String: String]? + + public init(tags: [String: String]? = nil) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + public struct ListWorkloadDeploymentPatternsInput: AWSEncodableShape { /// The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. public let maxResults: Int? @@ -475,9 +614,9 @@ extension LaunchWizard { try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.workloadName, name: "workloadName", parent: name, max: 256) + try self.validate(self.workloadName, name: "workloadName", parent: name, max: 100) try self.validate(self.workloadName, name: "workloadName", parent: name, min: 1) - try self.validate(self.workloadName, name: "workloadName", parent: name, pattern: "^[a-zA-Z0-9-]+$") + try self.validate(self.workloadName, name: "workloadName", parent: name, pattern: "^[A-Za-z][a-zA-Z0-9-_]*$") } private enum CodingKeys: String, CodingKey { @@ -545,6 +684,79 @@ extension LaunchWizard { } } + public struct TagResourceInput: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + /// One or more tags to attach to the resource. + public let tags: [String: String] + + public init(resourceArn: String, tags: [String: String]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + try container.encode(self.tags, forKey: .tags) + } + + public func validate(name: String) throws { + try self.tags.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.tags, name: "tags", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct TagResourceOutput: AWSDecodableShape { + public init() {} + } + + public struct UntagResourceInput: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + /// Keys identifying the tags to remove. + public let tagKeys: [String] + + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + request.encodeQuery(self.tagKeys, key: "tagKeys") + } + + public func validate(name: String) throws { + try self.tagKeys.forEach { + try validate($0, name: "tagKeys[]", parent: name, max: 128) + try validate($0, name: "tagKeys[]", parent: name, min: 1) + try validate($0, name: "tagKeys[]", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + } + try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 200) + try self.validate(self.tagKeys, name: "tagKeys", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct UntagResourceOutput: AWSDecodableShape { + public init() {} + } + public struct WorkloadData: AWSDecodableShape { /// The description of a workload. public let description: String? @@ -599,6 +811,47 @@ extension LaunchWizard { } } + public struct WorkloadDeploymentPatternData: AWSDecodableShape { + /// The name of the deployment pattern. + public let deploymentPatternName: String? + /// The description of the deployment pattern. + public let description: String? + /// The display name of the deployment pattern. + public let displayName: String? + /// The settings specified for the deployment. These settings define how to deploy and configure your resources created by the deployment. For more information about the specifications required for creating a deployment for a SAP workload, see SAP deployment specifications. To retrieve the specifications required to create a deployment for other workloads, use the GetWorkloadDeploymentPattern operation. + public let specifications: [DeploymentSpecificationsField]? + /// The status of the deployment pattern. + public let status: WorkloadDeploymentPatternStatus? + /// The status message of the deployment pattern. + public let statusMessage: String? + /// The workload name of the deployment pattern. + public let workloadName: String? + /// The workload version name of the deployment pattern. + public let workloadVersionName: String? + + public init(deploymentPatternName: String? = nil, description: String? = nil, displayName: String? = nil, specifications: [DeploymentSpecificationsField]? = nil, status: WorkloadDeploymentPatternStatus? = nil, statusMessage: String? = nil, workloadName: String? = nil, workloadVersionName: String? = nil) { + self.deploymentPatternName = deploymentPatternName + self.description = description + self.displayName = displayName + self.specifications = specifications + self.status = status + self.statusMessage = statusMessage + self.workloadName = workloadName + self.workloadVersionName = workloadVersionName + } + + private enum CodingKeys: String, CodingKey { + case deploymentPatternName = "deploymentPatternName" + case description = "description" + case displayName = "displayName" + case specifications = "specifications" + case status = "status" + case statusMessage = "statusMessage" + case workloadName = "workloadName" + case workloadVersionName = "workloadVersionName" + } + } + public struct WorkloadDeploymentPatternDataSummary: AWSDecodableShape { /// The name of a workload deployment pattern. public let deploymentPatternName: String? diff --git a/Sources/Soto/Services/Lightsail/Lightsail_shapes.swift b/Sources/Soto/Services/Lightsail/Lightsail_shapes.swift index 2bd63c98c7..39a29536b0 100644 --- a/Sources/Soto/Services/Lightsail/Lightsail_shapes.swift +++ b/Sources/Soto/Services/Lightsail/Lightsail_shapes.swift @@ -371,6 +371,7 @@ extension Lightsail { public enum IpAddressType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case dualstack = "dualstack" case ipv4 = "ipv4" + case ipv6 = "ipv6" public var description: String { return self.rawValue } } @@ -551,6 +552,7 @@ extension Lightsail { public enum NetworkProtocol: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case all = "all" case icmp = "icmp" + case icmpv6 = "icmpv6" case tcp = "tcp" case udp = "udp" public var description: String { return self.rawValue } @@ -1360,7 +1362,7 @@ extension Lightsail { public struct Blueprint: AWSDecodableShape { /// Virtual computer blueprints that are supported by Lightsail for Research. This parameter only applies to Lightsail for Research resources. public let appCategory: AppCategory? - /// The ID for the virtual private server image (app_wordpress_4_4 or app_lamp_7_0). + /// The ID for the virtual private server image (app_wordpress_x_x or app_lamp_x_x). public let blueprintId: String? /// The description of the blueprint. public let description: String? @@ -1572,13 +1574,13 @@ extension Lightsail { } public struct Bundle: AWSDecodableShape { - /// The bundle ID (micro_1_0). + /// The bundle ID (micro_x_x). public let bundleId: String? /// The number of vCPUs included in the bundle (2). public let cpuCount: Int? /// The size of the SSD (30). public let diskSizeInGb: Int? - /// The Amazon EC2 instance type (t2.micro). + /// The instance type (micro). public let instanceType: String? /// A Boolean value indicating whether the bundle is active. public let isActive: Bool? @@ -3143,13 +3145,13 @@ extension Lightsail { public let attachedDiskMapping: [String: [DiskMap]]? /// The Availability Zone where you want to create your instances. Use the following formatting: us-east-2a (case sensitive). You can get a list of Availability Zones by using the get regions operation. Be sure to add the include Availability Zones parameter to your request. public let availabilityZone: String - /// The bundle of specification information for your virtual private server (or instance), including the pricing plan (micro_1_0). + /// The bundle of specification information for your virtual private server (or instance), including the pricing plan (micro_x_x). public let bundleId: String /// The names for your new instances. public let instanceNames: [String] /// The name of the instance snapshot on which you are basing your new instances. Use the get instance snapshots operation to return information about your existing snapshots. Constraint: This parameter cannot be defined together with the source instance name parameter. The instance snapshot name and source instance name parameters are mutually exclusive. public let instanceSnapshotName: String? - /// The IP address type for the instance. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. The default value is dualstack. + /// The IP address type for the instance. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. The default value is dualstack. public let ipAddressType: IpAddressType? /// The name for your key pair. public let keyPairName: String? @@ -3227,15 +3229,15 @@ extension Lightsail { public let addOns: [AddOnRequest]? /// The Availability Zone in which to create your instance. Use the following format: us-east-2a (case sensitive). You can get a list of Availability Zones by using the get regions operation. Be sure to add the include Availability Zones parameter to your request. public let availabilityZone: String - /// The ID for a virtual private server image (app_wordpress_4_4 or app_lamp_7_0). Use the get blueprints operation to return a list of available images (or blueprints). Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases. + /// The ID for a virtual private server image (app_wordpress_x_x or app_lamp_x_x). Use the get blueprints operation to return a list of available images (or blueprints). Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases. public let blueprintId: String - /// The bundle of specification information for your virtual private server (or instance), including the pricing plan (micro_1_0). + /// The bundle of specification information for your virtual private server (or instance), including the pricing plan (medium_x_x). public let bundleId: String /// (Discontinued) The name for your custom image. In releases prior to June 12, 2017, this parameter was ignored by the API. It is now discontinued. public let customImageName: String? /// The names to use for your new Lightsail instances. Separate multiple values using quotation marks and commas, for example: ["MyFirstInstance","MySecondInstance"] public let instanceNames: [String] - /// The IP address type for the instance. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. The default value is dualstack. + /// The IP address type for the instance. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. The default value is dualstack. public let ipAddressType: IpAddressType? /// The name of your key pair. public let keyPairName: String? @@ -3365,7 +3367,7 @@ extension Lightsail { public let healthCheckPath: String? /// The instance port where you're creating your load balancer. public let instancePort: Int - /// The IP address type for the load balancer. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. The default value is dualstack. + /// The IP address type for the load balancer. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. The default value is dualstack. public let ipAddressType: IpAddressType? /// The name of your load balancer. public let loadBalancerName: String @@ -7529,17 +7531,17 @@ extension Lightsail { public let addOns: [AddOn]? /// The Amazon Resource Name (ARN) of the instance (arn:aws:lightsail:us-east-2:123456789101:Instance/244ad76f-8aad-4741-809f-12345EXAMPLE). public let arn: String? - /// The blueprint ID (os_amlinux_2016_03). + /// The blueprint ID (amazon_linux_2023). public let blueprintId: String? - /// The friendly name of the blueprint (Amazon Linux). + /// The friendly name of the blueprint (Amazon Linux 2023). public let blueprintName: String? - /// The bundle for the instance (micro_1_0). + /// The bundle for the instance (micro_x_x). public let bundleId: String? /// The timestamp when the instance was created (1479734909.17) in Unix time format. public let createdAt: Date? /// The size of the vCPU and the amount of RAM for the instance. public let hardware: InstanceHardware? - /// The IP address type of the instance. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. + /// The IP address type of the instance. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. public let ipAddressType: IpAddressType? /// The IPv6 addresses of the instance. public let ipv6Addresses: [String]? @@ -7549,7 +7551,7 @@ extension Lightsail { public let location: ResourceLocation? /// The metadata options for the Amazon Lightsail instance. public let metadataOptions: InstanceMetadataOptions? - /// The name the user gave the instance (Amazon_Linux-1GB-Ohio-1). + /// The name the user gave the instance (Amazon_Linux_2023-1). public let name: String? /// Information about the public ports and monthly data transfer rates for the instance. public let networking: InstanceNetworking? @@ -7813,7 +7815,7 @@ extension Lightsail { public let fromPort: Int? /// The IPv6 address, or range of IPv6 addresses (in CIDR notation) that are allowed to connect to an instance through the ports, and the protocol. Only devices with an IPv6 address can connect to an instance through IPv6; otherwise, IPv4 should be used. The cidrs parameter lists the IPv4 addresses that are allowed to connect to an instance. For more information about CIDR block notation, see Classless Inter-Domain Routing on Wikipedia. public let ipv6Cidrs: [String]? - /// The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. + /// The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you specify icmp6 as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. public let `protocol`: NetworkProtocol? /// The last port in a range of open ports on an instance. Allowed ports: TCP and UDP - 0 to 65535 ICMP - The ICMP code for IPv4 addresses. For example, specify 8 as the fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, see Control Messages on Wikipedia. ICMPv6 - The ICMP code for IPv6 addresses. For example, specify 128 as the fromPort (ICMPv6 type), and 0 as toPort (ICMPv6 code). For more information, see Internet Control Message Protocol for IPv6. public let toPort: Int? @@ -7854,7 +7856,7 @@ extension Lightsail { public let fromPort: Int? /// The IPv6 address, or range of IPv6 addresses (in CIDR notation) that are allowed to connect to an instance through the ports, and the protocol. Only devices with an IPv6 address can connect to an instance through IPv6; otherwise, IPv4 should be used. The cidrs parameter lists the IPv4 addresses that are allowed to connect to an instance. For more information about CIDR block notation, see Classless Inter-Domain Routing on Wikipedia. public let ipv6Cidrs: [String]? - /// The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. + /// The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you specify icmp6 as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. public let `protocol`: NetworkProtocol? /// Specifies whether the instance port is open or closed. The port state for Lightsail instances is always open. public let state: PortState? @@ -7889,9 +7891,9 @@ extension Lightsail { public let createdAt: Date? /// An array of disk objects containing information about all block storage disks. public let fromAttachedDisks: [Disk]? - /// The blueprint ID from which you created the snapshot (os_debian_8_3). A blueprint is a virtual private server (or instance) image used to create instances quickly. + /// The blueprint ID from which you created the snapshot (amazon_linux_2023). A blueprint is a virtual private server (or instance) image used to create instances quickly. public let fromBlueprintId: String? - /// The bundle ID from which you created the snapshot (micro_1_0). + /// The bundle ID from which you created the snapshot (micro_x_x). public let fromBundleId: String? /// The Amazon Resource Name (ARN) of the instance from which the snapshot was created (arn:aws:lightsail:us-east-2:123456789101:Instance/64b8404c-ccb1-430b-8daf-12345EXAMPLE). public let fromInstanceArn: String? @@ -7956,9 +7958,9 @@ extension Lightsail { } public struct InstanceSnapshotInfo: AWSDecodableShape { - /// The blueprint ID from which the source instance (os_debian_8_3). + /// The blueprint ID from which the source instance (amazon_linux_2023). public let fromBlueprintId: String? - /// The bundle ID from which the source instance was created (micro_1_0). + /// The bundle ID from which the source instance was created (micro_x_x). public let fromBundleId: String? /// A list of objects describing the disks that were attached to the source instance. public let fromDiskInfo: [DiskInfo]? @@ -8161,7 +8163,7 @@ extension Lightsail { public let instanceHealthSummary: [InstanceHealthSummary]? /// The port where the load balancer will direct traffic to your Lightsail instances. For HTTP traffic, it's port 80. For HTTPS traffic, it's port 443. public let instancePort: Int? - /// The IP address type of the load balancer. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. + /// The IP address type of the load balancer. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. public let ipAddressType: IpAddressType? /// The AWS Region where your load balancer was created (us-east-2a). Lightsail automatically creates your load balancer across Availability Zones. public let location: ResourceLocation? @@ -8779,7 +8781,7 @@ extension Lightsail { public let fromPort: Int? /// The IPv6 address, or range of IPv6 addresses (in CIDR notation) that are allowed to connect to an instance through the ports, and the protocol. Only devices with an IPv6 address can connect to an instance through IPv6; otherwise, IPv4 should be used. The cidrs parameter lists the IPv4 addresses that are allowed to connect to an instance. For more information about CIDR block notation, see Classless Inter-Domain Routing on Wikipedia. public let ipv6Cidrs: [String]? - /// The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. + /// The IP protocol name. The name can be one of the following: tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and error-checked delivery of streamed data between applications running on hosts communicating by an IP network. If you have an application that doesn't require reliable data stream service, use UDP instead. all - All transport layer protocol types. For more general information, see Transport layer on Wikipedia. udp - With User Datagram Protocol (UDP), computer applications can send messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior communications are not required to set up transmission channels or data paths. Applications that don't require reliable data stream service can use UDP, which provides a connectionless datagram service that emphasizes reduced latency over reliability. If you do require reliable data stream service, use TCP instead. icmp - Internet Control Message Protocol (ICMP) is used to send error messages and operational information indicating success or failure when communicating with an instance. For example, an error is indicated when an instance could not be reached. When you specify icmp as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you specify icmp6 as the protocol, you must specify the ICMP type using the fromPort parameter, and ICMP code using the toPort parameter. public let `protocol`: NetworkProtocol? /// The last port in a range of open ports on an instance. Allowed ports: TCP and UDP - 0 to 65535 ICMP - The ICMP code for IPv4 addresses. For example, specify 8 as the fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, see Control Messages on Wikipedia. ICMPv6 - The ICMP code for IPv6 addresses. For example, specify 128 as the fromPort (ICMPv6 type), and 0 as toPort (ICMPv6 code). For more information, see Internet Control Message Protocol for IPv6. public let toPort: Int? @@ -9727,14 +9729,17 @@ extension Lightsail { } public struct SetIpAddressTypeRequest: AWSEncodableShape { - /// The IP address type to set for the specified resource. The possible values are ipv4 for IPv4 only, and dualstack for IPv4 and IPv6. + /// Required parameter to accept the instance bundle update when changing to, and from, IPv6-only. An instance bundle will change when switching from dual-stack or ipv4, to ipv6. It also changes when switching from ipv6, to dual-stack or ipv4. You must include this parameter in the command to update the bundle. For example, if you switch from dual-stack to ipv6, the bundle will be updated, and billing for the IPv6-only instance bundle begins immediately. + public let acceptBundleUpdate: Bool? + /// The IP address type to set for the specified resource. The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only, and dualstack for IPv4 and IPv6. public let ipAddressType: IpAddressType /// The name of the resource for which to set the IP address type. public let resourceName: String /// The resource type. The resource values are Distribution, Instance, and LoadBalancer. Distribution-related APIs are available only in the N. Virginia (us-east-1) Amazon Web Services Region. Set your Amazon Web Services Region configuration to us-east-1 to create, view, or edit distributions. public let resourceType: ResourceType - public init(ipAddressType: IpAddressType, resourceName: String, resourceType: ResourceType) { + public init(acceptBundleUpdate: Bool? = nil, ipAddressType: IpAddressType, resourceName: String, resourceType: ResourceType) { + self.acceptBundleUpdate = acceptBundleUpdate self.ipAddressType = ipAddressType self.resourceName = resourceName self.resourceType = resourceType @@ -9745,6 +9750,7 @@ extension Lightsail { } private enum CodingKeys: String, CodingKey { + case acceptBundleUpdate = "acceptBundleUpdate" case ipAddressType = "ipAddressType" case resourceName = "resourceName" case resourceType = "resourceType" diff --git a/Sources/Soto/Services/Location/Location_api.swift b/Sources/Soto/Services/Location/Location_api.swift index 0343f009d4..f667bc25ff 100644 --- a/Sources/Soto/Services/Location/Location_api.swift +++ b/Sources/Soto/Services/Location/Location_api.swift @@ -472,6 +472,20 @@ public struct Location: AWSService { ) } + /// Evaluates device positions against geofence geometries from a given geofence collection. The event forecasts three states for which a device can be in relative to a geofence: ENTER: If a device is outside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window. EXIT: If a device is inside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window. IDLE: If a device is inside of a geofence, and the device is not moving. + @Sendable + public func forecastGeofenceEvents(_ input: ForecastGeofenceEventsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ForecastGeofenceEventsResponse { + return try await self.client.execute( + operation: "ForecastGeofenceEvents", + path: "/geofencing/v0/collections/{CollectionName}/forecast-geofence-events", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + hostPrefix: "geofencing.", + logger: logger + ) + } + /// Retrieves a device's most recent position according to its sample time. Device positions are deleted after 30 days. @Sendable public func getDevicePosition(_ input: GetDevicePositionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDevicePositionResponse { @@ -500,7 +514,7 @@ public struct Location: AWSService { ) } - /// Retrieves the geofence details from a geofence collection. + /// Retrieves the geofence details from a geofence collection. The returned geometry will always match the geometry format used when the geofence was created. @Sendable public func getGeofence(_ input: GetGeofenceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetGeofenceResponse { return try await self.client.execute( @@ -891,6 +905,20 @@ public struct Location: AWSService { logger: logger ) } + + /// Verifies the integrity of the device's position by determining if it was reported behind a proxy, and by comparing it to an inferred position estimated based on the device's state. + @Sendable + public func verifyDevicePosition(_ input: VerifyDevicePositionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> VerifyDevicePositionResponse { + return try await self.client.execute( + operation: "VerifyDevicePosition", + path: "/tracking/v0/trackers/{TrackerName}/positions/verify", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + hostPrefix: "tracking.", + logger: logger + ) + } } extension Location { @@ -906,6 +934,25 @@ extension Location { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension Location { + /// Evaluates device positions against geofence geometries from a given geofence collection. The event forecasts three states for which a device can be in relative to a geofence: ENTER: If a device is outside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window. EXIT: If a device is inside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window. IDLE: If a device is inside of a geofence, and the device is not moving. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func forecastGeofenceEventsPaginator( + _ input: ForecastGeofenceEventsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.forecastGeofenceEvents, + inputKey: \ForecastGeofenceEventsRequest.nextToken, + outputKey: \ForecastGeofenceEventsResponse.nextToken, + logger: logger + ) + } + /// Retrieves the device position history from a tracker resource within a specified range of time. Device positions are deleted after 30 days. /// Return PaginatorSequence for operation. /// @@ -1097,6 +1144,20 @@ extension Location { } } +extension Location.ForecastGeofenceEventsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Location.ForecastGeofenceEventsRequest { + return .init( + collectionName: self.collectionName, + deviceState: self.deviceState, + distanceUnit: self.distanceUnit, + maxResults: self.maxResults, + nextToken: token, + speedUnit: self.speedUnit, + timeHorizonMinutes: self.timeHorizonMinutes + ) + } +} + extension Location.GetDevicePositionHistoryRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Location.GetDevicePositionHistoryRequest { return .init( diff --git a/Sources/Soto/Services/Location/Location_shapes.swift b/Sources/Soto/Services/Location/Location_shapes.swift index 2942fb99dc..40308c7c46 100644 --- a/Sources/Soto/Services/Location/Location_shapes.swift +++ b/Sources/Soto/Services/Location/Location_shapes.swift @@ -54,6 +54,16 @@ extension Location { public var description: String { return self.rawValue } } + public enum ForecastedGeofenceEventType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + /// This event type signifies that a device is forecasted to enter the geofence + case enter = "ENTER" + /// This event type signifies that a device is forecasted to exit the geofence + case exit = "EXIT" + /// This event type signifies that a device is stationary in the geofence and an exit/enter cannot be forecasted + case idle = "IDLE" + public var description: String { return self.rawValue } + } + public enum IntendedUse: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { /// Indicates that results of the operation are for single use, e.g., displaying results on a map or presenting options to users. case singleUse = "SingleUse" @@ -98,6 +108,12 @@ extension Location { public var description: String { return self.rawValue } } + public enum SpeedUnit: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case kilometersPerHour = "KilometersPerHour" + case milesPerHour = "MilesPerHour" + public var description: String { return self.rawValue } + } + public enum Status: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { /// List all active API keys. case active = "Active" @@ -154,7 +170,7 @@ extension Location { try self.allowActions.forEach { try validate($0, name: "allowActions[]", parent: name, max: 200) try validate($0, name: "allowActions[]", parent: name, min: 5) - try validate($0, name: "allowActions[]", parent: name, pattern: "^geo:\\w*\\*?$") + try validate($0, name: "allowActions[]", parent: name, pattern: "^(geo|geo-routes|geo-places|geo-maps):\\w*\\*?$") } try self.allowReferers?.forEach { try validate($0, name: "allowReferers[]", parent: name, max: 253) @@ -162,7 +178,7 @@ extension Location { } try self.allowResources.forEach { try validate($0, name: "allowResources[]", parent: name, max: 1600) - try validate($0, name: "allowResources[]", parent: name, pattern: "^arn(:[a-z0-9]+([.-][a-z0-9]+)*):geo(:([a-z0-9]+([.-][a-z0-9]+)*))(:[0-9]+):((\\*)|([-a-z]+[/][*-._\\w]+))$") + try validate($0, name: "allowResources[]", parent: name, pattern: "(^arn(:[a-z0-9]+([.-][a-z0-9]+)*):geo(:([a-z0-9]+([.-][a-z0-9]+)*))(:[0-9]+):((\\*)|([-a-z]+[/][*-._\\w]+))$)|(^arn(:[a-z0-9]+([.-][a-z0-9]+)*):(geo-routes|geo-places|geo-maps)(:((\\*)|([a-z0-9]+([.-][a-z0-9]+)*)))::((provider[\\/][*-._\\w]+))$)") } } @@ -541,7 +557,7 @@ extension Location { public let geofenceId: String /// Associates one of more properties with the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence. Format: "key" : "value" public let geofenceProperties: [String: String]? - /// Contains the details of the position of the geofence. Can be either a polygon or a circle. Including both will return a validation error. Each geofence polygon can have a maximum of 1,000 vertices. + /// Contains the details to specify the position of the geofence. Can be a polygon, a circle or a polygon encoded in Geobuf format. Including multiple selections will return a validation error. The geofence polygon format supports a maximum of 1,000 vertices. The Geofence geobuf format supports a maximum of 100,000 vertices. public let geometry: GeofenceGeometry public init(geofenceId: String, geofenceProperties: [String: String]? = nil, geometry: GeofenceGeometry) { @@ -987,6 +1003,25 @@ extension Location { } } + public struct CellSignals: AWSEncodableShape { + /// Information about the Long-Term Evolution (LTE) network the device is connected to. + public let lteCellDetails: [LteCellDetails] + + public init(lteCellDetails: [LteCellDetails]) { + self.lteCellDetails = lteCellDetails + } + + public func validate(name: String) throws { + try self.lteCellDetails.forEach { + try $0.validate(name: "\(name).lteCellDetails[]") + } + } + + private enum CodingKeys: String, CodingKey { + case lteCellDetails = "LteCellDetails" + } + } + public struct Circle: AWSEncodableShape & AWSDecodableShape { /// A single point geometry, specifying the center of the circle, using WGS 84 coordinates, in the form [longitude, latitude]. public let center: [Double] @@ -1014,7 +1049,7 @@ extension Location { public let collectionName: String /// An optional description for the geofence collection. public let description: String? - /// A key identifier for an Amazon Web Services KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN. + /// A key identifier for an Amazon Web Services KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN. /// public let kmsKeyId: String? /// No longer used. If included, the only allowed value is RequestBasedUsage. @@ -1710,7 +1745,7 @@ extension Location { public let description: String /// The number of geofences in the geofence collection. public let geofenceCount: Int? - /// A key identifier for an Amazon Web Services KMS customer managed key assigned to the Amazon Location resource + /// A key identifier for an Amazon Web Services KMS customer managed key assigned to the Amazon Location resource public let kmsKeyId: String? /// No longer used. Always returns RequestBasedUsage. public let pricingPlan: PricingPlan? @@ -2249,6 +2284,52 @@ extension Location { } } + public struct DeviceState: AWSEncodableShape { + public let accuracy: PositionalAccuracy? + /// The cellular network infrastructure that the device is connected to. + public let cellSignals: CellSignals? + /// The device identifier. + public let deviceId: String + /// The device's Ipv4 address. + public let ipv4Address: String? + /// The last known device position. + public let position: [Double] + /// The timestamp at which the device's position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. + @CustomCoding + public var sampleTime: Date + /// The Wi-Fi access points the device is using. + public let wiFiAccessPoints: [WiFiAccessPoint]? + + public init(accuracy: PositionalAccuracy? = nil, cellSignals: CellSignals? = nil, deviceId: String, ipv4Address: String? = nil, position: [Double], sampleTime: Date, wiFiAccessPoints: [WiFiAccessPoint]? = nil) { + self.accuracy = accuracy + self.cellSignals = cellSignals + self.deviceId = deviceId + self.ipv4Address = ipv4Address + self.position = position + self.sampleTime = sampleTime + self.wiFiAccessPoints = wiFiAccessPoints + } + + public func validate(name: String) throws { + try self.cellSignals?.validate(name: "\(name).cellSignals") + try self.validate(self.deviceId, name: "deviceId", parent: name, max: 100) + try self.validate(self.deviceId, name: "deviceId", parent: name, min: 1) + try self.validate(self.deviceId, name: "deviceId", parent: name, pattern: "^[-._\\p{L}\\p{N}]+$") + try self.validate(self.position, name: "position", parent: name, max: 2) + try self.validate(self.position, name: "position", parent: name, min: 2) + } + + private enum CodingKeys: String, CodingKey { + case accuracy = "Accuracy" + case cellSignals = "CellSignals" + case deviceId = "DeviceId" + case ipv4Address = "Ipv4Address" + case position = "Position" + case sampleTime = "SampleTime" + case wiFiAccessPoints = "WiFiAccessPoints" + } + } + public struct DisassociateTrackerConsumerRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) for the geofence collection to be disassociated from the tracker resource. Used when you need to specify a resource across all Amazon Web Services. Format example: arn:aws:geo:region:account-id:geofence-collection/ExampleGeofenceCollectionConsumer public let consumerArn: String @@ -2282,19 +2363,165 @@ extension Location { public init() {} } + public struct ForecastGeofenceEventsDeviceState: AWSEncodableShape { + /// The device's position. + public let position: [Double] + /// The device's speed. + public let speed: Double? + + public init(position: [Double], speed: Double? = nil) { + self.position = position + self.speed = speed + } + + public func validate(name: String) throws { + try self.validate(self.position, name: "position", parent: name, max: 2) + try self.validate(self.position, name: "position", parent: name, min: 2) + } + + private enum CodingKeys: String, CodingKey { + case position = "Position" + case speed = "Speed" + } + } + + public struct ForecastGeofenceEventsRequest: AWSEncodableShape { + /// The name of the geofence collection. + public let collectionName: String + /// The device's state, including current position and speed. + public let deviceState: ForecastGeofenceEventsDeviceState + /// The distance unit used for the NearestDistance property returned in a forecasted event. The measurement system must match for DistanceUnit and SpeedUnit; if Kilometers is specified for DistanceUnit, then SpeedUnit must be KilometersPerHour. Default Value: Kilometers + public let distanceUnit: DistanceUnit? + /// An optional limit for the number of resources returned in a single call. Default value: 20 + public let maxResults: Int? + /// The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page. Default value: null + public let nextToken: String? + /// The speed unit for the device captured by the device state. The measurement system must match for DistanceUnit and SpeedUnit; if Kilometers is specified for DistanceUnit, then SpeedUnit must be KilometersPerHour. Default Value: KilometersPerHour. + public let speedUnit: SpeedUnit? + /// Specifies the time horizon in minutes for the forecasted events. + public let timeHorizonMinutes: Double? + + public init(collectionName: String, deviceState: ForecastGeofenceEventsDeviceState, distanceUnit: DistanceUnit? = nil, maxResults: Int? = nil, nextToken: String? = nil, speedUnit: SpeedUnit? = nil, timeHorizonMinutes: Double? = nil) { + self.collectionName = collectionName + self.deviceState = deviceState + self.distanceUnit = distanceUnit + self.maxResults = maxResults + self.nextToken = nextToken + self.speedUnit = speedUnit + self.timeHorizonMinutes = timeHorizonMinutes + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.collectionName, key: "CollectionName") + try container.encode(self.deviceState, forKey: .deviceState) + try container.encodeIfPresent(self.distanceUnit, forKey: .distanceUnit) + try container.encodeIfPresent(self.maxResults, forKey: .maxResults) + try container.encodeIfPresent(self.nextToken, forKey: .nextToken) + try container.encodeIfPresent(self.speedUnit, forKey: .speedUnit) + try container.encodeIfPresent(self.timeHorizonMinutes, forKey: .timeHorizonMinutes) + } + + public func validate(name: String) throws { + try self.validate(self.collectionName, name: "collectionName", parent: name, max: 100) + try self.validate(self.collectionName, name: "collectionName", parent: name, min: 1) + try self.validate(self.collectionName, name: "collectionName", parent: name, pattern: "^[-._\\w]+$") + try self.deviceState.validate(name: "\(name).deviceState") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 60000) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case deviceState = "DeviceState" + case distanceUnit = "DistanceUnit" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case speedUnit = "SpeedUnit" + case timeHorizonMinutes = "TimeHorizonMinutes" + } + } + + public struct ForecastGeofenceEventsResponse: AWSDecodableShape { + /// The distance unit for the forecasted events. + public let distanceUnit: DistanceUnit + /// The list of forecasted events. + public let forecastedEvents: [ForecastedEvent] + /// The pagination token specifying which page of results to return in the response. If no token is provided, the default page is the first page. + public let nextToken: String? + /// The speed unit for the forecasted events. + public let speedUnit: SpeedUnit + + public init(distanceUnit: DistanceUnit, forecastedEvents: [ForecastedEvent], nextToken: String? = nil, speedUnit: SpeedUnit) { + self.distanceUnit = distanceUnit + self.forecastedEvents = forecastedEvents + self.nextToken = nextToken + self.speedUnit = speedUnit + } + + private enum CodingKeys: String, CodingKey { + case distanceUnit = "DistanceUnit" + case forecastedEvents = "ForecastedEvents" + case nextToken = "NextToken" + case speedUnit = "SpeedUnit" + } + } + + public struct ForecastedEvent: AWSDecodableShape { + /// The forecasted event identifier. + public let eventId: String + /// The event type, forecasting three states for which a device can be in relative to a geofence: ENTER: If a device is outside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window. EXIT: If a device is inside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window. IDLE: If a device is inside of a geofence, and the device is not moving. + public let eventType: ForecastedGeofenceEventType + /// The forecasted time the device will breach the geofence in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ + @OptionalCustomCoding + public var forecastedBreachTime: Date? + /// The geofence identifier pertaining to the forecasted event. + public let geofenceId: String + /// The geofence properties. + public let geofenceProperties: [String: String]? + /// Indicates if the device is located within the geofence. + public let isDeviceInGeofence: Bool + /// The closest distance from the device's position to the geofence. + public let nearestDistance: Double + + public init(eventId: String, eventType: ForecastedGeofenceEventType, forecastedBreachTime: Date? = nil, geofenceId: String, geofenceProperties: [String: String]? = nil, isDeviceInGeofence: Bool, nearestDistance: Double) { + self.eventId = eventId + self.eventType = eventType + self.forecastedBreachTime = forecastedBreachTime + self.geofenceId = geofenceId + self.geofenceProperties = geofenceProperties + self.isDeviceInGeofence = isDeviceInGeofence + self.nearestDistance = nearestDistance + } + + private enum CodingKeys: String, CodingKey { + case eventId = "EventId" + case eventType = "EventType" + case forecastedBreachTime = "ForecastedBreachTime" + case geofenceId = "GeofenceId" + case geofenceProperties = "GeofenceProperties" + case isDeviceInGeofence = "IsDeviceInGeofence" + case nearestDistance = "NearestDistance" + } + } + public struct GeofenceGeometry: AWSEncodableShape & AWSDecodableShape { /// A circle on the earth, as defined by a center point and a radius. public let circle: Circle? + /// Geobuf is a compact binary encoding for geographic data that provides lossless compression of GeoJSON polygons. The Geobuf must be Base64-encoded. A polygon in Geobuf format can have up to 100,000 vertices. + public let geobuf: AWSBase64Data? /// A polygon is a list of linear rings which are each made up of a list of vertices. Each vertex is a 2-dimensional point of the form: [longitude, latitude]. This is represented as an array of doubles of length 2 (so [double, double]). An array of 4 or more vertices, where the first and last vertex are the same (to form a closed boundary), is called a linear ring. The linear ring vertices must be listed in counter-clockwise order around the ring’s interior. The linear ring is represented as an array of vertices, or an array of arrays of doubles ([[double, double], ...]). A geofence consists of a single linear ring. To allow for future expansion, the Polygon parameter takes an array of linear rings, which is represented as an array of arrays of arrays of doubles ([[[double, double], ...], ...]). A linear ring for use in geofences can consist of between 4 and 1,000 vertices. public let polygon: [[[Double]]]? - public init(circle: Circle? = nil, polygon: [[[Double]]]? = nil) { + public init(circle: Circle? = nil, geobuf: AWSBase64Data? = nil, polygon: [[[Double]]]? = nil) { self.circle = circle + self.geobuf = geobuf self.polygon = polygon } public func validate(name: String) throws { try self.circle?.validate(name: "\(name).circle") + try self.validate(self.geobuf, name: "geobuf", parent: name, max: 600000) try self.polygon?.forEach { try validate($0, name: "polygon[]", parent: name, min: 4) } @@ -2303,6 +2530,7 @@ extension Location { private enum CodingKeys: String, CodingKey { case circle = "Circle" + case geobuf = "Geobuf" case polygon = "Polygon" } } @@ -2418,7 +2646,7 @@ extension Location { public let position: [Double] /// The properties associated with the position. public let positionProperties: [String: String]? - /// The timestamp for when the tracker resource received the device position in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. + /// The timestamp for when the tracker resource received the device position. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. @CustomCoding public var receivedTime: Date /// The timestamp at which the device's position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. @@ -2510,7 +2738,7 @@ extension Location { } public struct GetMapGlyphsRequest: AWSEncodableShape { - /// A comma-separated list of fonts to load glyphs from in order of preference. For example, Noto Sans Regular, Arial Unicode. Valid font stacks for Esri styles: VectorEsriDarkGrayCanvas – Ubuntu Medium Italic | Ubuntu Medium | Ubuntu Italic | Ubuntu Regular | Ubuntu Bold VectorEsriLightGrayCanvas – Ubuntu Italic | Ubuntu Regular | Ubuntu Light | Ubuntu Bold VectorEsriTopographic – Noto Sans Italic | Noto Sans Regular | Noto Sans Bold | Noto Serif Regular | Roboto Condensed Light Italic VectorEsriStreets – Arial Regular | Arial Italic | Arial Bold VectorEsriNavigation – Arial Regular | Arial Italic | Arial Bold | Arial Unicode MS Bold | Arial Unicode MS Regular Valid font stacks for HERE Technologies styles: VectorHereContrast – Fira GO Regular | Fira GO Bold VectorHereExplore, VectorHereExploreTruck, HybridHereExploreSatellite – Fira GO Italic | Fira GO Map | Fira GO Map Bold | Noto Sans CJK JP Bold | Noto Sans CJK JP Light | Noto Sans CJK JP Regular Valid font stacks for GrabMaps styles: VectorGrabStandardLight, VectorGrabStandardDark – Noto Sans Regular | Noto Sans Medium | Noto Sans Bold Valid font stacks for Open Data styles: VectorOpenDataStandardLight, VectorOpenDataStandardDark, VectorOpenDataVisualizationLight, VectorOpenDataVisualizationDark – Amazon Ember Regular,Noto Sans Regular | Amazon Ember Bold,Noto Sans Bold | Amazon Ember Medium,Noto Sans Medium | Amazon Ember Regular Italic,Noto Sans Italic | Amazon Ember Condensed RC Regular,Noto Sans Regular | Amazon Ember Condensed RC Bold,Noto Sans Bold | Amazon Ember Regular,Noto Sans Regular,Noto Sans Arabic Regular | Amazon Ember Condensed RC Bold,Noto Sans Bold,Noto Sans Arabic Condensed Bold | Amazon Ember Bold,Noto Sans Bold,Noto Sans Arabic Bold | Amazon Ember Regular Italic,Noto Sans Italic,Noto Sans Arabic Regular | Amazon Ember Condensed RC Regular,Noto Sans Regular,Noto Sans Arabic Condensed Regular | Amazon Ember Medium,Noto Sans Medium,Noto Sans Arabic Medium The fonts used by the Open Data map styles are combined fonts that use Amazon Ember for most glyphs but Noto Sans for glyphs unsupported by Amazon Ember. + /// A comma-separated list of fonts to load glyphs from in order of preference. For example, Noto Sans Regular, Arial Unicode. Valid font stacks for Esri styles: VectorEsriDarkGrayCanvas – Ubuntu Medium Italic | Ubuntu Medium | Ubuntu Italic | Ubuntu Regular | Ubuntu Bold VectorEsriLightGrayCanvas – Ubuntu Italic | Ubuntu Regular | Ubuntu Light | Ubuntu Bold VectorEsriTopographic – Noto Sans Italic | Noto Sans Regular | Noto Sans Bold | Noto Serif Regular | Roboto Condensed Light Italic VectorEsriStreets – Arial Regular | Arial Italic | Arial Bold VectorEsriNavigation – Arial Regular | Arial Italic | Arial Bold Valid font stacks for HERE Technologies styles: VectorHereContrast – Fira GO Regular | Fira GO Bold VectorHereExplore, VectorHereExploreTruck, HybridHereExploreSatellite – Fira GO Italic | Fira GO Map | Fira GO Map Bold | Noto Sans CJK JP Bold | Noto Sans CJK JP Light | Noto Sans CJK JP Regular Valid font stacks for GrabMaps styles: VectorGrabStandardLight, VectorGrabStandardDark – Noto Sans Regular | Noto Sans Medium | Noto Sans Bold Valid font stacks for Open Data styles: VectorOpenDataStandardLight, VectorOpenDataStandardDark, VectorOpenDataVisualizationLight, VectorOpenDataVisualizationDark – Amazon Ember Regular,Noto Sans Regular | Amazon Ember Bold,Noto Sans Bold | Amazon Ember Medium,Noto Sans Medium | Amazon Ember Regular Italic,Noto Sans Italic | Amazon Ember Condensed RC Regular,Noto Sans Regular | Amazon Ember Condensed RC Bold,Noto Sans Bold | Amazon Ember Regular,Noto Sans Regular,Noto Sans Arabic Regular | Amazon Ember Condensed RC Bold,Noto Sans Bold,Noto Sans Arabic Condensed Bold | Amazon Ember Bold,Noto Sans Bold,Noto Sans Arabic Bold | Amazon Ember Regular Italic,Noto Sans Italic,Noto Sans Arabic Regular | Amazon Ember Condensed RC Regular,Noto Sans Regular,Noto Sans Arabic Condensed Regular | Amazon Ember Medium,Noto Sans Medium,Noto Sans Arabic Medium The fonts used by the Open Data map styles are combined fonts that use Amazon Ember for most glyphs but Noto Sans for glyphs unsupported by Amazon Ember. public let fontStack: String /// A Unicode range of characters to download glyphs for. Each response will contain 256 characters. For example, 0–255 includes all characters from range U+0000 to 00FF. Must be aligned to multiples of 256. public let fontUnicodeRange: String @@ -2756,7 +2984,7 @@ extension Location { public let key: String? /// The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English. This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result. For an example, we'll use the Greek language. You search for a location around Athens, Greece, with the language parameter set to en. The city in the results will most likely be returned as Athens. If you set the language parameter to el, for Greek, then the city in the results will more likely be returned as Αθήνα. If the data provider does not have a value for Greek, the result will be in a language that the provider does support. public let language: String? - /// The identifier of the place to find. While you can use PlaceID in subsequent requests, PlaceID is not intended to be a permanent identifier and the ID can change between consecutive API calls. Please see the following PlaceID behaviour for each data provider: Esri: Place IDs will change every quarter at a minimum. The typical time period for these changes would be March, June, September, and December. Place IDs might also change between the typical quarterly change but that will be much less frequent. HERE: We recommend that you cache data for no longer than a week to keep your data data fresh. You can assume that less than 1% ID shifts will release over release which is approximately 1 - 2 times per week. Grab: Place IDs can expire or become invalid in the following situations. Data operations: The POI may be removed from Grab POI database by Grab Map Ops based on the ground-truth, such as being closed in the real world, being detected as a duplicate POI, or having incorrect information. Grab will synchronize data to the Waypoint environment on weekly basis. Interpolated POI: Interpolated POI is a temporary POI generated in real time when serving a request, and it will be marked as derived in the place.result_type field in the response. The information of interpolated POIs will be retained for at least 30 days, which means that within 30 days, you are able to obtain POI details by Place ID from Place Details API. After 30 days, the interpolated POIs(both Place ID and details) may expire and inaccessible from the Places Details API. + /// The identifier of the place to find. public let placeId: String public init(indexName: String, key: String? = nil, language: String? = nil, placeId: String) { @@ -2800,6 +3028,31 @@ extension Location { } } + public struct InferredState: AWSDecodableShape { + /// The level of certainty of the inferred position. + public let accuracy: PositionalAccuracy? + /// The distance between the inferred position and the device's self-reported position. + public let deviationDistance: Double? + /// The device position inferred by the provided position, IP address, cellular signals, and Wi-Fi- access points. + public let position: [Double]? + /// Indicates if a proxy was used. + public let proxyDetected: Bool + + public init(accuracy: PositionalAccuracy? = nil, deviationDistance: Double? = nil, position: [Double]? = nil, proxyDetected: Bool) { + self.accuracy = accuracy + self.deviationDistance = deviationDistance + self.position = position + self.proxyDetected = proxyDetected + } + + private enum CodingKeys: String, CodingKey { + case accuracy = "Accuracy" + case deviationDistance = "DeviationDistance" + case position = "Position" + case proxyDetected = "ProxyDetected" + } + } + public struct Leg: AWSDecodableShape { /// The distance between the leg's StartPosition and EndPosition along a calculated route. The default measurement is Kilometers unless the request specifies a DistanceUnit of Miles. public let distance: Double @@ -3080,7 +3333,7 @@ extension Location { try self.validate(self.collectionName, name: "collectionName", parent: name, max: 100) try self.validate(self.collectionName, name: "collectionName", parent: name, min: 1) try self.validate(self.collectionName, name: "collectionName", parent: name, pattern: "^[-._\\w]+$") - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2000) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 60000) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) } @@ -3609,14 +3862,141 @@ extension Location { } } + public struct LteCellDetails: AWSEncodableShape { + /// The E-UTRAN Cell Identifier (ECI). + public let cellId: Int + /// The LTE local identification information (local ID). + public let localId: LteLocalId? + /// The Mobile Country Code (MCC). + public let mcc: Int + /// The Mobile Network Code (MNC) + public let mnc: Int + /// The network measurements. + public let networkMeasurements: [LteNetworkMeasurements]? + /// Indicates whether the LTE object is capable of supporting NR (new radio). + public let nrCapable: Bool? + /// Signal power of the reference signal received, measured in decibel-milliwatts (dBm). + public let rsrp: Int? + /// Signal quality of the reference Signal received, measured in decibels (dB). + public let rsrq: Float? + /// LTE Tracking Area Code (TAC). + public let tac: Int? + /// Timing Advance (TA). + public let timingAdvance: Int? + + public init(cellId: Int = 0, localId: LteLocalId? = nil, mcc: Int, mnc: Int, networkMeasurements: [LteNetworkMeasurements]? = nil, nrCapable: Bool? = nil, rsrp: Int? = nil, rsrq: Float? = nil, tac: Int? = nil, timingAdvance: Int? = nil) { + self.cellId = cellId + self.localId = localId + self.mcc = mcc + self.mnc = mnc + self.networkMeasurements = networkMeasurements + self.nrCapable = nrCapable + self.rsrp = rsrp + self.rsrq = rsrq + self.tac = tac + self.timingAdvance = timingAdvance + } + + public func validate(name: String) throws { + try self.validate(self.cellId, name: "cellId", parent: name, max: 268435455) + try self.validate(self.cellId, name: "cellId", parent: name, min: 0) + try self.localId?.validate(name: "\(name).localId") + try self.networkMeasurements?.forEach { + try $0.validate(name: "\(name).networkMeasurements[]") + } + try self.validate(self.rsrp, name: "rsrp", parent: name, max: -44) + try self.validate(self.rsrp, name: "rsrp", parent: name, min: -140) + try self.validate(self.rsrq, name: "rsrq", parent: name, max: -3.0) + try self.validate(self.rsrq, name: "rsrq", parent: name, min: -19.5) + } + + private enum CodingKeys: String, CodingKey { + case cellId = "CellId" + case localId = "LocalId" + case mcc = "Mcc" + case mnc = "Mnc" + case networkMeasurements = "NetworkMeasurements" + case nrCapable = "NrCapable" + case rsrp = "Rsrp" + case rsrq = "Rsrq" + case tac = "Tac" + case timingAdvance = "TimingAdvance" + } + } + + public struct LteLocalId: AWSEncodableShape { + /// E-UTRA (Evolved Universal Terrestrial Radio Access) absolute radio frequency channel number (EARFCN). + public let earfcn: Int + /// Physical Cell ID (PCI). + public let pci: Int + + public init(earfcn: Int = 0, pci: Int = 0) { + self.earfcn = earfcn + self.pci = pci + } + + public func validate(name: String) throws { + try self.validate(self.earfcn, name: "earfcn", parent: name, max: 262143) + try self.validate(self.earfcn, name: "earfcn", parent: name, min: 0) + try self.validate(self.pci, name: "pci", parent: name, max: 503) + try self.validate(self.pci, name: "pci", parent: name, min: 0) + } + + private enum CodingKeys: String, CodingKey { + case earfcn = "Earfcn" + case pci = "Pci" + } + } + + public struct LteNetworkMeasurements: AWSEncodableShape { + /// E-UTRAN Cell Identifier (ECI). + public let cellId: Int + /// E-UTRA (Evolved Universal Terrestrial Radio Access) absolute radio frequency channel number (EARFCN). + public let earfcn: Int + /// Physical Cell ID (PCI). + public let pci: Int + /// Signal power of the reference signal received, measured in dBm (decibel-milliwatts). + public let rsrp: Int? + /// Signal quality of the reference Signal received, measured in decibels (dB). + public let rsrq: Float? + + public init(cellId: Int = 0, earfcn: Int = 0, pci: Int = 0, rsrp: Int? = nil, rsrq: Float? = nil) { + self.cellId = cellId + self.earfcn = earfcn + self.pci = pci + self.rsrp = rsrp + self.rsrq = rsrq + } + + public func validate(name: String) throws { + try self.validate(self.cellId, name: "cellId", parent: name, max: 268435455) + try self.validate(self.cellId, name: "cellId", parent: name, min: 0) + try self.validate(self.earfcn, name: "earfcn", parent: name, max: 262143) + try self.validate(self.earfcn, name: "earfcn", parent: name, min: 0) + try self.validate(self.pci, name: "pci", parent: name, max: 503) + try self.validate(self.pci, name: "pci", parent: name, min: 0) + try self.validate(self.rsrp, name: "rsrp", parent: name, max: -44) + try self.validate(self.rsrp, name: "rsrp", parent: name, min: -140) + try self.validate(self.rsrq, name: "rsrq", parent: name, max: -3.0) + try self.validate(self.rsrq, name: "rsrq", parent: name, min: -19.5) + } + + private enum CodingKeys: String, CodingKey { + case cellId = "CellId" + case earfcn = "Earfcn" + case pci = "Pci" + case rsrp = "Rsrp" + case rsrq = "Rsrq" + } + } + public struct MapConfiguration: AWSEncodableShape & AWSDecodableShape { /// Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style. - /// Default is unset. Currenlty only VectorEsriNavigation supports CustomLayers. - /// For more information, see Custom Layers. + /// Default is unset. Not all map resources or styles support custom layers. See Custom Layers for more information. public let customLayers: [String]? /// Specifies the political view for the style. Leave unset to not use a political view, or, for styles that support specific political views, you can choose a view, such as IND for the Indian view. Default is unset. Not all map resources or styles support political view styles. See Political views for more information. public let politicalView: String? - /// Specifies the map style selected from an available data provider. Valid Esri map styles: VectorEsriNavigation – The Esri Navigation map style, which provides a detailed basemap for the world symbolized with a custom navigation map style that's designed for use during the day in mobile devices. It also includes a richer set of places, such as shops, services, restaurants, attractions, and other points of interest. Enable the POI layer by setting it in CustomLayers to leverage the additional places data. RasterEsriImagery – The Esri Imagery map style. A raster basemap that provides one meter or better satellite and aerial imagery in many parts of the world and lower resolution satellite imagery worldwide. VectorEsriLightGrayCanvas – The Esri Light Gray Canvas map style, which provides a detailed vector basemap with a light gray, neutral background style with minimal colors, labels, and features that's designed to draw attention to your thematic content. VectorEsriTopographic – The Esri Light map style, which provides a detailed vector basemap with a classic Esri map style. VectorEsriStreets – The Esri Street Map style, which provides a detailed vector basemap for the world symbolized with a classic Esri street map style. The vector tile layer is similar in content and style to the World Street Map raster map. VectorEsriDarkGrayCanvas – The Esri Dark Gray Canvas map style. A vector basemap with a dark gray, neutral background with minimal colors, labels, and features that's designed to draw attention to your thematic content. Valid HERE Technologies map styles: VectorHereExplore – A default HERE map style containing a neutral, global map and its features including roads, buildings, landmarks, and water features. It also now includes a fully designed map of Japan. RasterHereExploreSatellite – A global map containing high resolution satellite imagery. HybridHereExploreSatellite – A global map displaying the road network, street names, and city labels over satellite imagery. This style will automatically retrieve both raster and vector tiles, and your charges will be based on total tiles retrieved. Hybrid styles use both vector and raster tiles when rendering the map that you see. This means that more tiles are retrieved than when using either vector or raster tiles alone. Your charges will include all tiles retrieved. VectorHereContrast – The HERE Contrast (Berlin) map style is a high contrast detailed base map of the world that blends 3D and 2D rendering. The VectorHereContrast style has been renamed from VectorHereBerlin. VectorHereBerlin has been deprecated, but will continue to work in applications that use it. VectorHereExploreTruck – A global map containing truck restrictions and attributes (e.g. width / height / HAZMAT) symbolized with highlighted segments and icons on top of HERE Explore to support use cases within transport and logistics. Valid GrabMaps map styles: VectorGrabStandardLight – The Grab Standard Light map style provides a basemap with detailed land use coloring, area names, roads, landmarks, and points of interest covering Southeast Asia. VectorGrabStandardDark – The Grab Standard Dark map style provides a dark variation of the standard basemap covering Southeast Asia. Grab provides maps only for countries in Southeast Asia, and is only available in the Asia Pacific (Singapore) Region (ap-southeast-1). For more information, see GrabMaps countries and area covered. Valid Open Data map styles: VectorOpenDataStandardLight – The Open Data Standard Light map style provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries. VectorOpenDataStandardDark – Open Data Standard Dark is a dark-themed map style that provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries. VectorOpenDataVisualizationLight – The Open Data Visualization Light map style is a light-themed style with muted colors and fewer features that aids in understanding overlaid data. VectorOpenDataVisualizationDark – The Open Data Visualization Dark map style is a dark-themed style with muted colors and fewer features that aids in understanding overlaid data. + /// Specifies the map style selected from an available data provider. Valid Esri map styles: VectorEsriDarkGrayCanvas – The Esri Dark Gray Canvas map style. A vector basemap with a dark gray, neutral background with minimal colors, labels, and features that's designed to draw attention to your thematic content. RasterEsriImagery – The Esri Imagery map style. A raster basemap that provides one meter or better satellite and aerial imagery in many parts of the world and lower resolution satellite imagery worldwide. VectorEsriLightGrayCanvas – The Esri Light Gray Canvas map style, which provides a detailed vector basemap with a light gray, neutral background style with minimal colors, labels, and features that's designed to draw attention to your thematic content. VectorEsriTopographic – The Esri Light map style, which provides a detailed vector basemap with a classic Esri map style. VectorEsriStreets – The Esri Street Map style, which provides a detailed vector basemap for the world symbolized with a classic Esri street map style. The vector tile layer is similar in content and style to the World Street Map raster map. VectorEsriNavigation – The Esri Navigation map style, which provides a detailed basemap for the world symbolized with a custom navigation map style that's designed for use during the day in mobile devices. Valid HERE Technologies map styles: VectorHereContrast – The HERE Contrast (Berlin) map style is a high contrast detailed base map of the world that blends 3D and 2D rendering. The VectorHereContrast style has been renamed from VectorHereBerlin. VectorHereBerlin has been deprecated, but will continue to work in applications that use it. VectorHereExplore – A default HERE map style containing a neutral, global map and its features including roads, buildings, landmarks, and water features. It also now includes a fully designed map of Japan. VectorHereExploreTruck – A global map containing truck restrictions and attributes (e.g. width / height / HAZMAT) symbolized with highlighted segments and icons on top of HERE Explore to support use cases within transport and logistics. RasterHereExploreSatellite – A global map containing high resolution satellite imagery. HybridHereExploreSatellite – A global map displaying the road network, street names, and city labels over satellite imagery. This style will automatically retrieve both raster and vector tiles, and your charges will be based on total tiles retrieved. Hybrid styles use both vector and raster tiles when rendering the map that you see. This means that more tiles are retrieved than when using either vector or raster tiles alone. Your charges will include all tiles retrieved. Valid GrabMaps map styles: VectorGrabStandardLight – The Grab Standard Light map style provides a basemap with detailed land use coloring, area names, roads, landmarks, and points of interest covering Southeast Asia. VectorGrabStandardDark – The Grab Standard Dark map style provides a dark variation of the standard basemap covering Southeast Asia. Grab provides maps only for countries in Southeast Asia, and is only available in the Asia Pacific (Singapore) Region (ap-southeast-1). For more information, see GrabMaps countries and area covered. Valid Open Data map styles: VectorOpenDataStandardLight – The Open Data Standard Light map style provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries. VectorOpenDataStandardDark – Open Data Standard Dark is a dark-themed map style that provides a detailed basemap for the world suitable for website and mobile application use. The map includes highways major roads, minor roads, railways, water features, cities, parks, landmarks, building footprints, and administrative boundaries. VectorOpenDataVisualizationLight – The Open Data Visualization Light map style is a light-themed style with muted colors and fewer features that aids in understanding overlaid data. VectorOpenDataVisualizationDark – The Open Data Visualization Dark map style is a dark-themed style with muted colors and fewer features that aids in understanding overlaid data. public let style: String public init(customLayers: [String]? = nil, politicalView: String? = nil, style: String) { @@ -3649,8 +4029,7 @@ extension Location { public struct MapConfigurationUpdate: AWSEncodableShape { /// Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style. - /// Default is unset. Currenlty only VectorEsriNavigation supports CustomLayers. - /// For more information, see Custom Layers. + /// Default is unset. Not all map resources or styles support custom layers. See Custom Layers for more information. public let customLayers: [String]? /// Specifies the political view for the style. Set to an empty string to not use a political view, or, for styles that support specific political views, you can choose a view, such as IND for the Indian view. Not all map resources or styles support political view styles. See Political views for more information. public let politicalView: String? @@ -3699,7 +4078,7 @@ extension Location { public let region: String? /// The name for a street or a road to identify a location. For example, Main Street. public let street: String? - /// An area that's part of a larger municipality. For example, Blissville is a submunicipality in the Queen County in New York. This property is only returned for a place index that uses Esri as a data provider. The property is represented as a district. For more information about data providers, see Amazon Location Service data providers. + /// An area that's part of a larger municipality. For example, Blissville is a submunicipality in the Queen County in New York. This property supported by Esri and OpenData. The Esri property is district, and the OpenData property is borough. public let subMunicipality: String? /// A county, or an area that's part of a larger region. For example, Metro Vancouver. public let subRegion: String? @@ -3707,9 +4086,9 @@ extension Location { public let supplementalCategories: [String]? /// The time zone in which the Place is located. Returned only when using HERE or Grab as the selected partner. public let timeZone: TimeZone? - /// For addresses with multiple units, the unit identifier. Can include numbers and letters, for example 3B or Unit 123. This property is returned only for a place index that uses Esri or Grab as a data provider. It is not returned for SearchPlaceIndexForPosition. + /// For addresses with multiple units, the unit identifier. Can include numbers and letters, for example 3B or Unit 123. Returned only for a place index that uses Esri or Grab as a data provider. Is not returned for SearchPlaceIndexForPosition. public let unitNumber: String? - /// For addresses with a UnitNumber, the type of unit. For example, Apartment. This property is returned only for a place index that uses Esri as a data provider. + /// For addresses with a UnitNumber, the type of unit. For example, Apartment. Returned only for a place index that uses Esri as a data provider. public let unitType: String? public init(addressNumber: String? = nil, categories: [String]? = nil, country: String? = nil, geometry: PlaceGeometry, interpolated: Bool? = nil, label: String? = nil, municipality: String? = nil, neighborhood: String? = nil, postalCode: String? = nil, region: String? = nil, street: String? = nil, subMunicipality: String? = nil, subRegion: String? = nil, supplementalCategories: [String]? = nil, timeZone: TimeZone? = nil, unitNumber: String? = nil, unitType: String? = nil) { @@ -3786,7 +4165,7 @@ extension Location { public let geofenceId: String /// Associates one of more properties with the geofence. A property is a key-value pair stored with the geofence and added to any geofence event triggered with that geofence. Format: "key" : "value" public let geofenceProperties: [String: String]? - /// Contains the details to specify the position of the geofence. Can be either a polygon or a circle. Including both will return a validation error. Each geofence polygon can have a maximum of 1,000 vertices. + /// Contains the details to specify the position of the geofence. Can be a polygon, a circle or a polygon encoded in Geobuf format. Including multiple selections will return a validation error. The geofence polygon format supports a maximum of 1,000 vertices. The Geofence Geobuf format supports a maximum of 100,000 vertices. public let geometry: GeofenceGeometry public init(collectionName: String, geofenceId: String, geofenceProperties: [String: String]? = nil, geometry: GeofenceGeometry) { @@ -3907,7 +4286,7 @@ extension Location { public struct SearchForSuggestionsResult: AWSDecodableShape { /// The Amazon Location categories that describe the Place. For more information about using categories, including a list of Amazon Location categories, see Categories and filtering, in the Amazon Location Service Developer Guide. public let categories: [String]? - /// The unique identifier of the Place. You can use this with the GetPlace operation to find the place again later, or to get full information for the Place. The GetPlace request must use the same PlaceIndex resource as the SearchPlaceIndexForSuggestions that generated the Place ID. For SearchPlaceIndexForSuggestions operations, the PlaceId is returned by place indexes that use Esri, Grab, or HERE as data providers. While you can use PlaceID in subsequent requests, PlaceID is not intended to be a permanent identifier and the ID can change between consecutive API calls. Please see the following PlaceID behaviour for each data provider: Esri: Place IDs will change every quarter at a minimum. The typical time period for these changes would be March, June, September, and December. Place IDs might also change between the typical quarterly change but that will be much less frequent. HERE: We recommend that you cache data for no longer than a week to keep your data data fresh. You can assume that less than 1% ID shifts will release over release which is approximately 1 - 2 times per week. Grab: Place IDs can expire or become invalid in the following situations. Data operations: The POI may be removed from Grab POI database by Grab Map Ops based on the ground-truth, such as being closed in the real world, being detected as a duplicate POI, or having incorrect information. Grab will synchronize data to the Waypoint environment on weekly basis. Interpolated POI: Interpolated POI is a temporary POI generated in real time when serving a request, and it will be marked as derived in the place.result_type field in the response. The information of interpolated POIs will be retained for at least 30 days, which means that within 30 days, you are able to obtain POI details by Place ID from Place Details API. After 30 days, the interpolated POIs(both Place ID and details) may expire and inaccessible from the Places Details API. + /// The unique identifier of the Place. You can use this with the GetPlace operation to find the place again later, or to get full information for the Place. The GetPlace request must use the same PlaceIndex resource as the SearchPlaceIndexForSuggestions that generated the Place ID. For SearchPlaceIndexForSuggestions operations, the PlaceId is returned by place indexes that use Esri, Grab, or HERE as data providers. public let placeId: String? /// Categories from the data provider that describe the Place that are not mapped to any Amazon Location categories. public let supplementalCategories: [String]? @@ -4103,6 +4482,8 @@ extension Location { try self.validate(self.filterCategories, name: "filterCategories", parent: name, max: 5) try self.validate(self.filterCategories, name: "filterCategories", parent: name, min: 1) try self.filterCountries?.forEach { + try validate($0, name: "filterCountries[]", parent: name, max: 3) + try validate($0, name: "filterCountries[]", parent: name, min: 3) try validate($0, name: "filterCountries[]", parent: name, pattern: "^[A-Z]{3}$") } try self.validate(self.filterCountries, name: "filterCountries", parent: name, max: 100) @@ -4241,6 +4622,8 @@ extension Location { try self.validate(self.filterCategories, name: "filterCategories", parent: name, max: 5) try self.validate(self.filterCategories, name: "filterCategories", parent: name, min: 1) try self.filterCountries?.forEach { + try validate($0, name: "filterCountries[]", parent: name, max: 3) + try validate($0, name: "filterCountries[]", parent: name, min: 3) try validate($0, name: "filterCountries[]", parent: name, pattern: "^[A-Z]{3}$") } try self.validate(self.filterCountries, name: "filterCountries", parent: name, max: 100) @@ -4944,6 +5327,89 @@ extension Location { case updateTime = "UpdateTime" } } + + public struct VerifyDevicePositionRequest: AWSEncodableShape { + /// The device's state, including position, IP address, cell signals and Wi-Fi access points. + public let deviceState: DeviceState + /// The distance unit for the verification request. Default Value: Kilometers + public let distanceUnit: DistanceUnit? + /// The name of the tracker resource to be associated with verification request. + public let trackerName: String + + public init(deviceState: DeviceState, distanceUnit: DistanceUnit? = nil, trackerName: String) { + self.deviceState = deviceState + self.distanceUnit = distanceUnit + self.trackerName = trackerName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.deviceState, forKey: .deviceState) + try container.encodeIfPresent(self.distanceUnit, forKey: .distanceUnit) + request.encodePath(self.trackerName, key: "TrackerName") + } + + public func validate(name: String) throws { + try self.deviceState.validate(name: "\(name).deviceState") + try self.validate(self.trackerName, name: "trackerName", parent: name, max: 100) + try self.validate(self.trackerName, name: "trackerName", parent: name, min: 1) + try self.validate(self.trackerName, name: "trackerName", parent: name, pattern: "^[-._\\w]+$") + } + + private enum CodingKeys: String, CodingKey { + case deviceState = "DeviceState" + case distanceUnit = "DistanceUnit" + } + } + + public struct VerifyDevicePositionResponse: AWSDecodableShape { + /// The device identifier. + public let deviceId: String + /// The distance unit for the verification response. + public let distanceUnit: DistanceUnit + /// The inferred state of the device, given the provided position, IP address, cellular signals, and Wi-Fi- access points. + public let inferredState: InferredState + /// The timestamp for when the tracker resource received the device position in ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. + @CustomCoding + public var receivedTime: Date + /// The timestamp at which the device's position was determined. Uses ISO 8601 format: YYYY-MM-DDThh:mm:ss.sssZ. + @CustomCoding + public var sampleTime: Date + + public init(deviceId: String, distanceUnit: DistanceUnit, inferredState: InferredState, receivedTime: Date, sampleTime: Date) { + self.deviceId = deviceId + self.distanceUnit = distanceUnit + self.inferredState = inferredState + self.receivedTime = receivedTime + self.sampleTime = sampleTime + } + + private enum CodingKeys: String, CodingKey { + case deviceId = "DeviceId" + case distanceUnit = "DistanceUnit" + case inferredState = "InferredState" + case receivedTime = "ReceivedTime" + case sampleTime = "SampleTime" + } + } + + public struct WiFiAccessPoint: AWSEncodableShape { + /// Medium access control address (Mac). + public let macAddress: String + /// Received signal strength (dBm) of the WLAN measurement data. + public let rss: Int + + public init(macAddress: String, rss: Int) { + self.macAddress = macAddress + self.rss = rss + } + + private enum CodingKeys: String, CodingKey { + case macAddress = "MacAddress" + case rss = "Rss" + } + } } // MARK: - Errors diff --git a/Sources/Soto/Services/MWAA/MWAA_shapes.swift b/Sources/Soto/Services/MWAA/MWAA_shapes.swift index ce47dff4f0..b9143f049b 100644 --- a/Sources/Soto/Services/MWAA/MWAA_shapes.swift +++ b/Sources/Soto/Services/MWAA/MWAA_shapes.swift @@ -152,7 +152,7 @@ extension MWAA { public let dagS3Path: String /// Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to SERVICE, Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to CUSTOMER, you must create, and manage, the VPC endpoints for your VPC. If you choose to create an environment in a shared VPC, you must set this value to CUSTOMER. In a shared VPC deployment, the environment will remain in PENDING status until you create the VPC endpoints. If you do not take action to create the endpoints within 72 hours, the status will change to CREATE_FAILED. You can delete the failed environment and create a new one. public let endpointManagement: EndpointManagement? - /// The environment class type. Valid values: mw1.small, mw1.medium, mw1.large. For more information, see Amazon MWAA environment class. + /// The environment class type. Valid values: mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class. public let environmentClass: String? /// The Amazon Resource Name (ARN) of the execution role for your environment. An execution role is an Amazon Web Services Identity and Access Management (IAM) role that grants MWAA permission to access Amazon Web Services services and resources used by your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Amazon MWAA Execution role. public let executionRoleArn: String @@ -160,8 +160,12 @@ extension MWAA { public let kmsKey: String? /// Defines the Apache Airflow logs to send to CloudWatch Logs. public let loggingConfiguration: LoggingConfigurationInput? + /// The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers. Valid values: Accepts between 2 and 5. Defaults to 2. + public let maxWebservers: Int? /// The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers. public let maxWorkers: Int? + /// The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers. Valid values: Accepts between 2 and 5. Defaults to 2. + public let minWebservers: Int? /// The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the worker count you specify in the MinWorkers field. For example, 2. public let minWorkers: Int? /// The name of the Amazon MWAA environment. For example, MyMWAAEnvironment. @@ -191,7 +195,7 @@ extension MWAA { /// The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only. public let weeklyMaintenanceWindowStart: String? - public init(airflowConfigurationOptions: [String: String]? = nil, airflowVersion: String? = nil, dagS3Path: String, endpointManagement: EndpointManagement? = nil, environmentClass: String? = nil, executionRoleArn: String, kmsKey: String? = nil, loggingConfiguration: LoggingConfigurationInput? = nil, maxWorkers: Int? = nil, minWorkers: Int? = nil, name: String, networkConfiguration: NetworkConfiguration, pluginsS3ObjectVersion: String? = nil, pluginsS3Path: String? = nil, requirementsS3ObjectVersion: String? = nil, requirementsS3Path: String? = nil, schedulers: Int? = nil, sourceBucketArn: String, startupScriptS3ObjectVersion: String? = nil, startupScriptS3Path: String? = nil, tags: [String: String]? = nil, webserverAccessMode: WebserverAccessMode? = nil, weeklyMaintenanceWindowStart: String? = nil) { + public init(airflowConfigurationOptions: [String: String]? = nil, airflowVersion: String? = nil, dagS3Path: String, endpointManagement: EndpointManagement? = nil, environmentClass: String? = nil, executionRoleArn: String, kmsKey: String? = nil, loggingConfiguration: LoggingConfigurationInput? = nil, maxWebservers: Int? = nil, maxWorkers: Int? = nil, minWebservers: Int? = nil, minWorkers: Int? = nil, name: String, networkConfiguration: NetworkConfiguration, pluginsS3ObjectVersion: String? = nil, pluginsS3Path: String? = nil, requirementsS3ObjectVersion: String? = nil, requirementsS3Path: String? = nil, schedulers: Int? = nil, sourceBucketArn: String, startupScriptS3ObjectVersion: String? = nil, startupScriptS3Path: String? = nil, tags: [String: String]? = nil, webserverAccessMode: WebserverAccessMode? = nil, weeklyMaintenanceWindowStart: String? = nil) { self.airflowConfigurationOptions = airflowConfigurationOptions self.airflowVersion = airflowVersion self.dagS3Path = dagS3Path @@ -200,7 +204,9 @@ extension MWAA { self.executionRoleArn = executionRoleArn self.kmsKey = kmsKey self.loggingConfiguration = loggingConfiguration + self.maxWebservers = maxWebservers self.maxWorkers = maxWorkers + self.minWebservers = minWebservers self.minWorkers = minWorkers self.name = name self.networkConfiguration = networkConfiguration @@ -228,7 +234,9 @@ extension MWAA { try container.encode(self.executionRoleArn, forKey: .executionRoleArn) try container.encodeIfPresent(self.kmsKey, forKey: .kmsKey) try container.encodeIfPresent(self.loggingConfiguration, forKey: .loggingConfiguration) + try container.encodeIfPresent(self.maxWebservers, forKey: .maxWebservers) try container.encodeIfPresent(self.maxWorkers, forKey: .maxWorkers) + try container.encodeIfPresent(self.minWebservers, forKey: .minWebservers) try container.encodeIfPresent(self.minWorkers, forKey: .minWorkers) request.encodePath(self.name, key: "Name") try container.encode(self.networkConfiguration, forKey: .networkConfiguration) @@ -268,7 +276,9 @@ extension MWAA { try self.validate(self.kmsKey, name: "kmsKey", parent: name, max: 1224) try self.validate(self.kmsKey, name: "kmsKey", parent: name, min: 1) try self.validate(self.kmsKey, name: "kmsKey", parent: name, pattern: "^(((arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?key\\/)?[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}|(arn:aws(-[a-z]+)?:kms:[a-z]{2}-[a-z]+-\\d:\\d+:)?alias/.+)$") + try self.validate(self.maxWebservers, name: "maxWebservers", parent: name, min: 2) try self.validate(self.maxWorkers, name: "maxWorkers", parent: name, min: 1) + try self.validate(self.minWebservers, name: "minWebservers", parent: name, min: 2) try self.validate(self.minWorkers, name: "minWorkers", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, max: 80) try self.validate(self.name, name: "name", parent: name, min: 1) @@ -317,7 +327,9 @@ extension MWAA { case executionRoleArn = "ExecutionRoleArn" case kmsKey = "KmsKey" case loggingConfiguration = "LoggingConfiguration" + case maxWebservers = "MaxWebservers" case maxWorkers = "MaxWorkers" + case minWebservers = "MinWebservers" case minWorkers = "MinWorkers" case networkConfiguration = "NetworkConfiguration" case pluginsS3ObjectVersion = "PluginsS3ObjectVersion" @@ -442,7 +454,7 @@ extension MWAA { public struct Environment: AWSDecodableShape { /// A list of key-value pairs containing the Apache Airflow configuration options attached to your environment. For more information, see Apache Airflow configuration options. public let airflowConfigurationOptions: [String: String]? - /// The Apache Airflow version on your environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2. + /// The Apache Airflow version on your environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1. public let airflowVersion: String? /// The Amazon Resource Name (ARN) of the Amazon MWAA environment. public let arn: String? @@ -456,7 +468,7 @@ extension MWAA { public let databaseVpcEndpointService: String? /// Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to SERVICE, Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to CUSTOMER, you must create, and manage, the VPC endpoints in your VPC. public let endpointManagement: EndpointManagement? - /// The environment class type. Valid values: mw1.small, mw1.medium, mw1.large. For more information, see Amazon MWAA environment class. + /// The environment class type. Valid values: mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class. public let environmentClass: String? /// The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Amazon MWAA Execution role. public let executionRoleArn: String? @@ -466,8 +478,12 @@ extension MWAA { public let lastUpdate: LastUpdate? /// The Apache Airflow logs published to CloudWatch Logs. public let loggingConfiguration: LoggingConfiguration? + /// The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers. Valid values: Accepts between 2 and 5. Defaults to 2. + public let maxWebservers: Int? /// The maximum number of workers that run in your environment. For example, 20. public let maxWorkers: Int? + /// The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers. Valid values: Accepts between 2 and 5. Defaults to 2. + public let minWebservers: Int? /// The minimum number of workers that run in your environment. For example, 2. public let minWorkers: Int? /// The name of the Amazon MWAA environment. For example, MyMWAAEnvironment. @@ -505,7 +521,7 @@ extension MWAA { /// The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. For example: TUE:03:30. public let weeklyMaintenanceWindowStart: String? - public init(airflowConfigurationOptions: [String: String]? = nil, airflowVersion: String? = nil, arn: String? = nil, celeryExecutorQueue: String? = nil, createdAt: Date? = nil, dagS3Path: String? = nil, databaseVpcEndpointService: String? = nil, endpointManagement: EndpointManagement? = nil, environmentClass: String? = nil, executionRoleArn: String? = nil, kmsKey: String? = nil, lastUpdate: LastUpdate? = nil, loggingConfiguration: LoggingConfiguration? = nil, maxWorkers: Int? = nil, minWorkers: Int? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, pluginsS3ObjectVersion: String? = nil, pluginsS3Path: String? = nil, requirementsS3ObjectVersion: String? = nil, requirementsS3Path: String? = nil, schedulers: Int? = nil, serviceRoleArn: String? = nil, sourceBucketArn: String? = nil, startupScriptS3ObjectVersion: String? = nil, startupScriptS3Path: String? = nil, status: EnvironmentStatus? = nil, tags: [String: String]? = nil, webserverAccessMode: WebserverAccessMode? = nil, webserverUrl: String? = nil, webserverVpcEndpointService: String? = nil, weeklyMaintenanceWindowStart: String? = nil) { + public init(airflowConfigurationOptions: [String: String]? = nil, airflowVersion: String? = nil, arn: String? = nil, celeryExecutorQueue: String? = nil, createdAt: Date? = nil, dagS3Path: String? = nil, databaseVpcEndpointService: String? = nil, endpointManagement: EndpointManagement? = nil, environmentClass: String? = nil, executionRoleArn: String? = nil, kmsKey: String? = nil, lastUpdate: LastUpdate? = nil, loggingConfiguration: LoggingConfiguration? = nil, maxWebservers: Int? = nil, maxWorkers: Int? = nil, minWebservers: Int? = nil, minWorkers: Int? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, pluginsS3ObjectVersion: String? = nil, pluginsS3Path: String? = nil, requirementsS3ObjectVersion: String? = nil, requirementsS3Path: String? = nil, schedulers: Int? = nil, serviceRoleArn: String? = nil, sourceBucketArn: String? = nil, startupScriptS3ObjectVersion: String? = nil, startupScriptS3Path: String? = nil, status: EnvironmentStatus? = nil, tags: [String: String]? = nil, webserverAccessMode: WebserverAccessMode? = nil, webserverUrl: String? = nil, webserverVpcEndpointService: String? = nil, weeklyMaintenanceWindowStart: String? = nil) { self.airflowConfigurationOptions = airflowConfigurationOptions self.airflowVersion = airflowVersion self.arn = arn @@ -519,7 +535,9 @@ extension MWAA { self.kmsKey = kmsKey self.lastUpdate = lastUpdate self.loggingConfiguration = loggingConfiguration + self.maxWebservers = maxWebservers self.maxWorkers = maxWorkers + self.minWebservers = minWebservers self.minWorkers = minWorkers self.name = name self.networkConfiguration = networkConfiguration @@ -554,7 +572,9 @@ extension MWAA { case kmsKey = "KmsKey" case lastUpdate = "LastUpdate" case loggingConfiguration = "LoggingConfiguration" + case maxWebservers = "MaxWebservers" case maxWorkers = "MaxWorkers" + case minWebservers = "MinWebservers" case minWorkers = "MinWorkers" case name = "Name" case networkConfiguration = "NetworkConfiguration" @@ -1019,18 +1039,22 @@ extension MWAA { public struct UpdateEnvironmentInput: AWSEncodableShape { /// A list of key-value pairs containing the Apache Airflow configuration options you want to attach to your environment. For more information, see Apache Airflow configuration options. public let airflowConfigurationOptions: [String: String]? - /// The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2. + /// The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1. public let airflowVersion: String? /// The relative path to the DAGs folder on your Amazon S3 bucket. For example, dags. For more information, see Adding or updating DAGs. public let dagS3Path: String? - /// The environment class type. Valid values: mw1.small, mw1.medium, mw1.large. For more information, see Amazon MWAA environment class. + /// The environment class type. Valid values: mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class. public let environmentClass: String? /// The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. For more information, see Amazon MWAA Execution role. public let executionRoleArn: String? /// The Apache Airflow log types to send to CloudWatch Logs. public let loggingConfiguration: LoggingConfigurationInput? + /// The maximum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS) rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers. Valid values: Accepts between 2 and 5. Defaults to 2. + public let maxWebservers: Int? /// The maximum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. For example, 20. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the one worker that is included with your environment, or the number you specify in MinWorkers. public let maxWorkers: Int? + /// The minimum number of web servers that you want to run in your environment. Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers when you interact with your Apache Airflow environment using Apache Airflow REST API, or the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease, Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers. Valid values: Accepts between 2 and 5. Defaults to 2. + public let minWebservers: Int? /// The minimum number of workers that you want to run in your environment. MWAA scales the number of Apache Airflow workers up to the number you specify in the MaxWorkers field. When there are no more tasks running, and no more in the queue, MWAA disposes of the extra workers leaving the worker count you specify in the MinWorkers field. For example, 2. public let minWorkers: Int? /// The name of your Amazon MWAA environment. For example, MyMWAAEnvironment. @@ -1058,14 +1082,16 @@ extension MWAA { /// The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only. public let weeklyMaintenanceWindowStart: String? - public init(airflowConfigurationOptions: [String: String]? = nil, airflowVersion: String? = nil, dagS3Path: String? = nil, environmentClass: String? = nil, executionRoleArn: String? = nil, loggingConfiguration: LoggingConfigurationInput? = nil, maxWorkers: Int? = nil, minWorkers: Int? = nil, name: String, networkConfiguration: UpdateNetworkConfigurationInput? = nil, pluginsS3ObjectVersion: String? = nil, pluginsS3Path: String? = nil, requirementsS3ObjectVersion: String? = nil, requirementsS3Path: String? = nil, schedulers: Int? = nil, sourceBucketArn: String? = nil, startupScriptS3ObjectVersion: String? = nil, startupScriptS3Path: String? = nil, webserverAccessMode: WebserverAccessMode? = nil, weeklyMaintenanceWindowStart: String? = nil) { + public init(airflowConfigurationOptions: [String: String]? = nil, airflowVersion: String? = nil, dagS3Path: String? = nil, environmentClass: String? = nil, executionRoleArn: String? = nil, loggingConfiguration: LoggingConfigurationInput? = nil, maxWebservers: Int? = nil, maxWorkers: Int? = nil, minWebservers: Int? = nil, minWorkers: Int? = nil, name: String, networkConfiguration: UpdateNetworkConfigurationInput? = nil, pluginsS3ObjectVersion: String? = nil, pluginsS3Path: String? = nil, requirementsS3ObjectVersion: String? = nil, requirementsS3Path: String? = nil, schedulers: Int? = nil, sourceBucketArn: String? = nil, startupScriptS3ObjectVersion: String? = nil, startupScriptS3Path: String? = nil, webserverAccessMode: WebserverAccessMode? = nil, weeklyMaintenanceWindowStart: String? = nil) { self.airflowConfigurationOptions = airflowConfigurationOptions self.airflowVersion = airflowVersion self.dagS3Path = dagS3Path self.environmentClass = environmentClass self.executionRoleArn = executionRoleArn self.loggingConfiguration = loggingConfiguration + self.maxWebservers = maxWebservers self.maxWorkers = maxWorkers + self.minWebservers = minWebservers self.minWorkers = minWorkers self.name = name self.networkConfiguration = networkConfiguration @@ -1090,7 +1116,9 @@ extension MWAA { try container.encodeIfPresent(self.environmentClass, forKey: .environmentClass) try container.encodeIfPresent(self.executionRoleArn, forKey: .executionRoleArn) try container.encodeIfPresent(self.loggingConfiguration, forKey: .loggingConfiguration) + try container.encodeIfPresent(self.maxWebservers, forKey: .maxWebservers) try container.encodeIfPresent(self.maxWorkers, forKey: .maxWorkers) + try container.encodeIfPresent(self.minWebservers, forKey: .minWebservers) try container.encodeIfPresent(self.minWorkers, forKey: .minWorkers) request.encodePath(self.name, key: "Name") try container.encodeIfPresent(self.networkConfiguration, forKey: .networkConfiguration) @@ -1126,7 +1154,9 @@ extension MWAA { try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, max: 1224) try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, min: 1) try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, pattern: "^arn:aws(-[a-z]+)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + try self.validate(self.maxWebservers, name: "maxWebservers", parent: name, min: 2) try self.validate(self.maxWorkers, name: "maxWorkers", parent: name, min: 1) + try self.validate(self.minWebservers, name: "minWebservers", parent: name, min: 2) try self.validate(self.minWorkers, name: "minWorkers", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, max: 80) try self.validate(self.name, name: "name", parent: name, min: 1) @@ -1163,7 +1193,9 @@ extension MWAA { case environmentClass = "EnvironmentClass" case executionRoleArn = "ExecutionRoleArn" case loggingConfiguration = "LoggingConfiguration" + case maxWebservers = "MaxWebservers" case maxWorkers = "MaxWorkers" + case minWebservers = "MinWebservers" case minWorkers = "MinWorkers" case networkConfiguration = "NetworkConfiguration" case pluginsS3ObjectVersion = "PluginsS3ObjectVersion" diff --git a/Sources/Soto/Services/Macie2/Macie2_api.swift b/Sources/Soto/Services/Macie2/Macie2_api.swift index 1c8086e26c..45a5a0e06f 100644 --- a/Sources/Soto/Services/Macie2/Macie2_api.swift +++ b/Sources/Soto/Services/Macie2/Macie2_api.swift @@ -109,6 +109,19 @@ public struct Macie2: AWSService { ) } + /// Changes the status of automated sensitive data discovery for one or more accounts. + @Sendable + public func batchUpdateAutomatedDiscoveryAccounts(_ input: BatchUpdateAutomatedDiscoveryAccountsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchUpdateAutomatedDiscoveryAccountsResponse { + return try await self.client.execute( + operation: "BatchUpdateAutomatedDiscoveryAccounts", + path: "/automated-discovery/accounts", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates and defines the settings for an allow list. @Sendable public func createAllowList(_ input: CreateAllowListRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAllowListResponse { @@ -434,7 +447,7 @@ public struct Macie2: AWSService { ) } - /// Retrieves the configuration settings and status of automated sensitive data discovery for an account. + /// Retrieves the configuration settings and status of automated sensitive data discovery for an organization or standalone account. @Sendable public func getAutomatedDiscoveryConfiguration(_ input: GetAutomatedDiscoveryConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAutomatedDiscoveryConfigurationResponse { return try await self.client.execute( @@ -707,6 +720,19 @@ public struct Macie2: AWSService { ) } + /// Retrieves the status of automated sensitive data discovery for one or more accounts. + @Sendable + public func listAutomatedDiscoveryAccounts(_ input: ListAutomatedDiscoveryAccountsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAutomatedDiscoveryAccountsResponse { + return try await self.client.execute( + operation: "ListAutomatedDiscoveryAccounts", + path: "/automated-discovery/accounts", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves a subset of information about one or more classification jobs. @Sendable public func listClassificationJobs(_ input: ListClassificationJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListClassificationJobsResponse { @@ -772,7 +798,7 @@ public struct Macie2: AWSService { ) } - /// Retrieves information about the Amazon Macie membership invitations that were received by an account. + /// Retrieves information about Amazon Macie membership invitations that were received by an account. @Sendable public func listInvitations(_ input: ListInvitationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListInvitationsResponse { return try await self.client.execute( @@ -824,7 +850,7 @@ public struct Macie2: AWSService { ) } - /// Retrieves information about objects that were selected from an S3 bucket for automated sensitive data discovery. + /// Retrieves information about objects that Amazon Macie selected from an S3 bucket for automated sensitive data discovery. @Sendable public func listResourceProfileArtifacts(_ input: ListResourceProfileArtifactsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListResourceProfileArtifactsResponse { return try await self.client.execute( @@ -876,7 +902,7 @@ public struct Macie2: AWSService { ) } - /// Creates or updates the configuration settings for storing data classification results. + /// Adds or updates the configuration settings for storing data classification results. @Sendable public func putClassificationExportConfiguration(_ input: PutClassificationExportConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutClassificationExportConfigurationResponse { return try await self.client.execute( @@ -928,7 +954,7 @@ public struct Macie2: AWSService { ) } - /// Tests a custom data identifier. + /// Tests criteria for a custom data identifier. @Sendable public func testCustomDataIdentifier(_ input: TestCustomDataIdentifierRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TestCustomDataIdentifierResponse { return try await self.client.execute( @@ -967,7 +993,7 @@ public struct Macie2: AWSService { ) } - /// Enables or disables automated sensitive data discovery for an account. + /// Changes the configuration settings and status of automated sensitive data discovery for an organization or standalone account. @Sendable public func updateAutomatedDiscoveryConfiguration(_ input: UpdateAutomatedDiscoveryConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateAutomatedDiscoveryConfigurationResponse { return try await self.client.execute( @@ -1181,6 +1207,25 @@ extension Macie2 { ) } + /// Retrieves the status of automated sensitive data discovery for one or more accounts. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listAutomatedDiscoveryAccountsPaginator( + _ input: ListAutomatedDiscoveryAccountsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listAutomatedDiscoveryAccounts, + inputKey: \ListAutomatedDiscoveryAccountsRequest.nextToken, + outputKey: \ListAutomatedDiscoveryAccountsResponse.nextToken, + logger: logger + ) + } + /// Retrieves a subset of information about one or more classification jobs. /// Return PaginatorSequence for operation. /// @@ -1276,7 +1321,7 @@ extension Macie2 { ) } - /// Retrieves information about the Amazon Macie membership invitations that were received by an account. + /// Retrieves information about Amazon Macie membership invitations that were received by an account. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -1352,7 +1397,7 @@ extension Macie2 { ) } - /// Retrieves information about objects that were selected from an S3 bucket for automated sensitive data discovery. + /// Retrieves information about objects that Amazon Macie selected from an S3 bucket for automated sensitive data discovery. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -1461,6 +1506,16 @@ extension Macie2.ListAllowListsRequest: AWSPaginateToken { } } +extension Macie2.ListAutomatedDiscoveryAccountsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Macie2.ListAutomatedDiscoveryAccountsRequest { + return .init( + accountIds: self.accountIds, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension Macie2.ListClassificationJobsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Macie2.ListClassificationJobsRequest { return .init( diff --git a/Sources/Soto/Services/Macie2/Macie2_shapes.swift b/Sources/Soto/Services/Macie2/Macie2_shapes.swift index 3d2de7734b..86a67b770c 100644 --- a/Sources/Soto/Services/Macie2/Macie2_shapes.swift +++ b/Sources/Soto/Services/Macie2/Macie2_shapes.swift @@ -51,6 +51,31 @@ extension Macie2 { public var description: String { return self.rawValue } } + public enum AutoEnableMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case all = "ALL" + case new = "NEW" + case none = "NONE" + public var description: String { return self.rawValue } + } + + public enum AutomatedDiscoveryAccountStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + + public enum AutomatedDiscoveryAccountUpdateErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case accountNotFound = "ACCOUNT_NOT_FOUND" + case accountPaused = "ACCOUNT_PAUSED" + public var description: String { return self.rawValue } + } + + public enum AutomatedDiscoveryMonitoringStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case monitored = "MONITORED" + case notMonitored = "NOT_MONITORED" + public var description: String { return self.rawValue } + } + public enum AutomatedDiscoveryStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" @@ -319,6 +344,7 @@ extension Macie2 { public enum SearchResourcesSimpleCriterionKey: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case accountId = "ACCOUNT_ID" + case automatedDiscoveryMonitoringStatus = "AUTOMATED_DISCOVERY_MONITORING_STATUS" case s3BucketEffectivePermission = "S3_BUCKET_EFFECTIVE_PERMISSION" case s3BucketName = "S3_BUCKET_NAME" case s3BucketSharedAccess = "S3_BUCKET_SHARED_ACCESS" @@ -684,6 +710,57 @@ extension Macie2 { } } + public struct AutomatedDiscoveryAccount: AWSDecodableShape { + /// The Amazon Web Services account ID for the account. + public let accountId: String? + /// The current status of automated sensitive data discovery for the account. Possible values are: ENABLED, perform automated sensitive data discovery activities for the account; and, DISABLED, don't perform automated sensitive data discovery activities for the account. + public let status: AutomatedDiscoveryAccountStatus? + + public init(accountId: String? = nil, status: AutomatedDiscoveryAccountStatus? = nil) { + self.accountId = accountId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + case status = "status" + } + } + + public struct AutomatedDiscoveryAccountUpdate: AWSEncodableShape { + /// The Amazon Web Services account ID for the account. + public let accountId: String? + /// The new status of automated sensitive data discovery for the account. Valid values are: ENABLED, perform automated sensitive data discovery activities for the account; and, DISABLED, don't perform automated sensitive data discovery activities for the account. + public let status: AutomatedDiscoveryAccountStatus? + + public init(accountId: String? = nil, status: AutomatedDiscoveryAccountStatus? = nil) { + self.accountId = accountId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + case status = "status" + } + } + + public struct AutomatedDiscoveryAccountUpdateError: AWSDecodableShape { + /// The Amazon Web Services account ID for the account that the request applied to. + public let accountId: String? + /// The error code for the error that caused the request to fail for the account (accountId). Possible values are: ACCOUNT_NOT_FOUND, the account doesn’t exist or you're not the Amazon Macie administrator for the account; and, ACCOUNT_PAUSED, Macie isn’t enabled for the account in the current Amazon Web Services Region. + public let errorCode: AutomatedDiscoveryAccountUpdateErrorCode? + + public init(accountId: String? = nil, errorCode: AutomatedDiscoveryAccountUpdateErrorCode? = nil) { + self.accountId = accountId + self.errorCode = errorCode + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + case errorCode = "errorCode" + } + } + public struct AwsAccount: AWSDecodableShape { /// The unique identifier for the Amazon Web Services account. public let accountId: String? @@ -778,6 +855,32 @@ extension Macie2 { } } + public struct BatchUpdateAutomatedDiscoveryAccountsRequest: AWSEncodableShape { + /// An array of objects, one for each account to change the status of automated sensitive data discovery for. Each object specifies the Amazon Web Services account ID for an account and a new status for that account. + public let accounts: [AutomatedDiscoveryAccountUpdate]? + + public init(accounts: [AutomatedDiscoveryAccountUpdate]? = nil) { + self.accounts = accounts + } + + private enum CodingKeys: String, CodingKey { + case accounts = "accounts" + } + } + + public struct BatchUpdateAutomatedDiscoveryAccountsResponse: AWSDecodableShape { + /// An array of objects, one for each account whose status wasn’t changed. Each object identifies the account and explains why the status of automated sensitive data discovery wasn’t changed for the account. This value is null if the request succeeded for all specified accounts. + public let errors: [AutomatedDiscoveryAccountUpdateError]? + + public init(errors: [AutomatedDiscoveryAccountUpdateError]? = nil) { + self.errors = errors + } + + private enum CodingKeys: String, CodingKey { + case errors = "errors" + } + } + public struct BlockPublicAccess: AWSDecodableShape { /// Specifies whether Amazon S3 blocks public access control lists (ACLs) for the bucket and objects in the bucket. public let blockPublicAcls: Bool? @@ -962,6 +1065,8 @@ extension Macie2 { public let accountId: String? /// Specifies whether the bucket policy for the bucket requires server-side encryption of objects when objects are added to the bucket. Possible values are: FALSE - The bucket policy requires server-side encryption of new objects. PutObject requests must include a valid server-side encryption header. TRUE - The bucket doesn't have a bucket policy or it has a bucket policy that doesn't require server-side encryption of new objects. If a bucket policy exists, it doesn't require PutObject requests to include a valid server-side encryption header. UNKNOWN - Amazon Macie can't determine whether the bucket policy requires server-side encryption of new objects. Valid server-side encryption headers are: x-amz-server-side-encryption with a value of AES256 or aws:kms, and x-amz-server-side-encryption-customer-algorithm with a value of AES256. public let allowsUnencryptedObjectUploads: AllowsUnencryptedObjectUploads? + /// Specifies whether automated sensitive data discovery is currently configured to analyze objects in the bucket. Possible values are: MONITORED, the bucket is included in analyses; and, NOT_MONITORED, the bucket is excluded from analyses. If automated sensitive data discovery is disabled for your account, this value is NOT_MONITORED. + public let automatedDiscoveryMonitoringStatus: AutomatedDiscoveryMonitoringStatus? /// The Amazon Resource Name (ARN) of the bucket. public let bucketArn: String? /// The date and time, in UTC and extended ISO 8601 format, when the bucket was created. This value can also indicate when changes such as edits to the bucket's policy were most recently made to the bucket. @@ -977,9 +1082,9 @@ extension Macie2 { public let errorCode: BucketMetadataErrorCode? /// A brief description of the error (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information. public let errorMessage: String? - /// Specifies whether any one-time or recurring classification jobs are configured to analyze data in the bucket, and, if so, the details of the job that ran most recently. + /// Specifies whether any one-time or recurring classification jobs are configured to analyze objects in the bucket, and, if so, the details of the job that ran most recently. public let jobDetails: JobDetails? - /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed data in the bucket while performing automated sensitive data discovery for your account. This value is null if automated sensitive data discovery is currently disabled for your account. + /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account. @OptionalCustomCoding public var lastAutomatedDiscoveryTime: Date? /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently retrieved bucket or object metadata from Amazon S3 for the bucket. @@ -995,7 +1100,7 @@ extension Macie2 { public let region: String? /// Specifies whether the bucket is configured to replicate one or more objects to buckets for other Amazon Web Services accounts and, if so, which accounts. public let replicationDetails: ReplicationDetails? - /// The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive). This value is null if automated sensitive data discovery is currently disabled for your account. + /// The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses. public let sensitivityScore: Int? /// The default server-side encryption settings for the bucket. public let serverSideEncryption: BucketServerSideEncryption? @@ -1014,9 +1119,10 @@ extension Macie2 { /// Specifies whether versioning is enabled for the bucket. public let versioning: Bool? - public init(accountId: String? = nil, allowsUnencryptedObjectUploads: AllowsUnencryptedObjectUploads? = nil, bucketArn: String? = nil, bucketCreatedAt: Date? = nil, bucketName: String? = nil, classifiableObjectCount: Int64? = nil, classifiableSizeInBytes: Int64? = nil, errorCode: BucketMetadataErrorCode? = nil, errorMessage: String? = nil, jobDetails: JobDetails? = nil, lastAutomatedDiscoveryTime: Date? = nil, lastUpdated: Date? = nil, objectCount: Int64? = nil, objectCountByEncryptionType: ObjectCountByEncryptionType? = nil, publicAccess: BucketPublicAccess? = nil, region: String? = nil, replicationDetails: ReplicationDetails? = nil, sensitivityScore: Int? = nil, serverSideEncryption: BucketServerSideEncryption? = nil, sharedAccess: SharedAccess? = nil, sizeInBytes: Int64? = nil, sizeInBytesCompressed: Int64? = nil, tags: [KeyValuePair]? = nil, unclassifiableObjectCount: ObjectLevelStatistics? = nil, unclassifiableObjectSizeInBytes: ObjectLevelStatistics? = nil, versioning: Bool? = nil) { + public init(accountId: String? = nil, allowsUnencryptedObjectUploads: AllowsUnencryptedObjectUploads? = nil, automatedDiscoveryMonitoringStatus: AutomatedDiscoveryMonitoringStatus? = nil, bucketArn: String? = nil, bucketCreatedAt: Date? = nil, bucketName: String? = nil, classifiableObjectCount: Int64? = nil, classifiableSizeInBytes: Int64? = nil, errorCode: BucketMetadataErrorCode? = nil, errorMessage: String? = nil, jobDetails: JobDetails? = nil, lastAutomatedDiscoveryTime: Date? = nil, lastUpdated: Date? = nil, objectCount: Int64? = nil, objectCountByEncryptionType: ObjectCountByEncryptionType? = nil, publicAccess: BucketPublicAccess? = nil, region: String? = nil, replicationDetails: ReplicationDetails? = nil, sensitivityScore: Int? = nil, serverSideEncryption: BucketServerSideEncryption? = nil, sharedAccess: SharedAccess? = nil, sizeInBytes: Int64? = nil, sizeInBytesCompressed: Int64? = nil, tags: [KeyValuePair]? = nil, unclassifiableObjectCount: ObjectLevelStatistics? = nil, unclassifiableObjectSizeInBytes: ObjectLevelStatistics? = nil, versioning: Bool? = nil) { self.accountId = accountId self.allowsUnencryptedObjectUploads = allowsUnencryptedObjectUploads + self.automatedDiscoveryMonitoringStatus = automatedDiscoveryMonitoringStatus self.bucketArn = bucketArn self.bucketCreatedAt = bucketCreatedAt self.bucketName = bucketName @@ -1046,6 +1152,7 @@ extension Macie2 { private enum CodingKeys: String, CodingKey { case accountId = "accountId" case allowsUnencryptedObjectUploads = "allowsUnencryptedObjectUploads" + case automatedDiscoveryMonitoringStatus = "automatedDiscoveryMonitoringStatus" case bucketArn = "bucketArn" case bucketCreatedAt = "bucketCreatedAt" case bucketName = "bucketName" @@ -1384,11 +1491,11 @@ extension Macie2 { public let description: String? /// For a recurring job, specifies whether to analyze all existing, eligible objects immediately after the job is created (true). To analyze only those objects that are created or changed after you create the job and before the job's first scheduled run, set this value to false. If you configure the job to run only once, don't specify a value for this property. public let initialRun: Bool? - /// The schedule for running the job. Valid values are: ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the scheduleFrequency property. SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this value, use the scheduleFrequency property to define the recurrence pattern for the job. + /// The schedule for running the job. Valid values are: ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the scheduleFrequency property. SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this value, use the scheduleFrequency property to specify the recurrence pattern for the job. public let jobType: JobType? /// An array of unique identifiers, one for each managed data identifier for the job to include (use) or exclude (not use) when it analyzes data. Inclusion or exclusion depends on the managed data identifier selection type that you specify for the job (managedDataIdentifierSelector). To retrieve a list of valid values for this property, use the ListManagedDataIdentifiers operation. public let managedDataIdentifierIds: [String]? - /// The selection type to apply when determining which managed data identifiers the job uses to analyze data. Valid values are: ALL - Use all managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property. EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property. INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property. NONE - Don't use any managed data identifiers. If you specify this value, specify at least one value for the customDataIdentifierIds property and don't specify any values for the managedDataIdentifierIds property. RECOMMENDED (default) - Use the recommended set of managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property. If you don't specify a value for this property, the job uses the recommended set of managed data identifiers. If the job is a recurring job and you specify ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If you don't specify a value for this property or you specify RECOMMENDED for a recurring job, each job run automatically uses all the managed data identifiers that are in the recommended set when the run starts. For information about individual managed data identifiers or to determine which ones are in the recommended set, see Using managed data identifiers and Recommended managed data identifiers in the Amazon Macie User Guide. + /// The selection type to apply when determining which managed data identifiers the job uses to analyze data. Valid values are: ALL - Use all managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property. EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property. INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property. NONE - Don't use any managed data identifiers. If you specify this value, specify at least one value for the customDataIdentifierIds property and don't specify any values for the managedDataIdentifierIds property. RECOMMENDED (default) - Use the recommended set of managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property. If you don't specify a value for this property, the job uses the recommended set of managed data identifiers. If the job is a recurring job and you specify ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If you don't specify a value for this property or you specify RECOMMENDED for a recurring job, each job run automatically uses all the managed data identifiers that are in the recommended set when the run starts. To learn about individual managed data identifiers or determine which ones are in the recommended set, see Using managed data identifiers or Recommended managed data identifiers in the Amazon Macie User Guide. public let managedDataIdentifierSelector: ManagedDataIdentifierSelector? /// A custom name for the job. The name can contain as many as 500 characters. public let name: String? @@ -2008,14 +2115,14 @@ extension Macie2 { } public struct DescribeClassificationJobResponse: AWSDecodableShape { - /// An array of unique identifiers, one for each allow list that the job uses when it analyzes data. + /// An array of unique identifiers, one for each allow list that the job is configured to use when it analyzes data. public let allowListIds: [String]? /// The token that was provided to ensure the idempotency of the request to create the job. public let clientToken: String? /// The date and time, in UTC and extended ISO 8601 format, when the job was created. @OptionalCustomCoding public var createdAt: Date? - /// An array of unique identifiers, one for each custom data identifier that the job uses when it analyzes data. This value is null if the job uses only managed data identifiers to analyze data. + /// An array of unique identifiers, one for each custom data identifier that the job is configured to use when it analyzes data. This value is null if the job is configured to use only managed data identifiers to analyze data. public let customDataIdentifierIds: [String]? /// The custom description of the job. public let description: String? @@ -2036,7 +2143,7 @@ extension Macie2 { public var lastRunTime: Date? /// An array of unique identifiers, one for each managed data identifier that the job is explicitly configured to include (use) or exclude (not use) when it analyzes data. Inclusion or exclusion depends on the managed data identifier selection type specified for the job (managedDataIdentifierSelector).This value is null if the job's managed data identifier selection type is ALL, NONE, or RECOMMENDED. public let managedDataIdentifierIds: [String]? - /// The selection type that determines which managed data identifiers the job uses when it analyzes data. Possible values are: ALL - Use all managed data identifiers. EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property. INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property. NONE - Don't use any managed data identifiers. Use only custom data identifiers (customDataIdentifierIds). RECOMMENDED (default) - Use the recommended set of managed data identifiers. If this value is null, the job uses the recommended set of managed data identifiers. If the job is a recurring job and this value is ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If this value is null or RECOMMENDED for a recurring job, each job run uses all the managed data identifiers that are in the recommended set when the run starts. For information about individual managed data identifiers or to determine which ones are in the recommended set, see Using managed data identifiers and Recommended managed data identifiers in the Amazon Macie User Guide. + /// The selection type that determines which managed data identifiers the job uses when it analyzes data. Possible values are: ALL - Use all managed data identifiers. EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property. INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property. NONE - Don't use any managed data identifiers. Use only custom data identifiers (customDataIdentifierIds). RECOMMENDED (default) - Use the recommended set of managed data identifiers. If this value is null, the job uses the recommended set of managed data identifiers. If the job is a recurring job and this value is ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If this value is null or RECOMMENDED for a recurring job, each job run uses all the managed data identifiers that are in the recommended set when the run starts. To learn about individual managed data identifiers or determine which ones are in the recommended set, see Using managed data identifiers or Recommended managed data identifiers in the Amazon Macie User Guide. public let managedDataIdentifierSelector: ManagedDataIdentifierSelector? /// The custom name of the job. public let name: String? @@ -2048,7 +2155,7 @@ extension Macie2 { public let scheduleFrequency: JobScheduleFrequency? /// The number of times that the job has run and processing statistics for the job's current run. public let statistics: Statistics? - /// A map of key-value pairs that specifies which tags (keys and values) are associated with the classification job. + /// A map of key-value pairs that specifies which tags (keys and values) are associated with the job. public let tags: [String: String]? /// If the current status of the job is USER_PAUSED, specifies when the job was paused and when the job or job run will expire and be cancelled if it isn't resumed. This value is present only if the value for jobStatus is USER_PAUSED. public let userPausedDetails: UserPausedDetails? @@ -2426,9 +2533,9 @@ extension Macie2 { public struct FindingActor: AWSDecodableShape { /// The domain name of the device that the entity used to perform the action on the affected resource. public let domainDetails: DomainDetails? - /// The IP address of the device that the entity used to perform the action on the affected resource. This object also provides information such as the owner and geographic location for the IP address. + /// The IP address and related details about the device that the entity used to perform the action on the affected resource. The details can include information such as the owner and geographic location of the IP address. public let ipAddressDetails: IpAddressDetails? - /// The type and other characteristics of the entity that performed the action on the affected resource. + /// The type and other characteristics of the entity that performed the action on the affected resource. This value is null if the action was performed by an anonymous (unauthenticated) entity. public let userIdentity: UserIdentity? public init(domainDetails: DomainDetails? = nil, ipAddressDetails: IpAddressDetails? = nil, userIdentity: UserIdentity? = nil) { @@ -2589,23 +2696,26 @@ extension Macie2 { } public struct GetAutomatedDiscoveryConfigurationResponse: AWSDecodableShape { - /// The unique identifier for the classification scope that's used when performing automated sensitive data discovery for the account. The classification scope specifies S3 buckets to exclude from automated sensitive data discovery. + /// Specifies whether automated sensitive data discovery is enabled automatically for accounts in the organization. Possible values are: ALL, enable it for all existing accounts and new member accounts; NEW, enable it only for new member accounts; and, NONE, don't enable it for any accounts. + public let autoEnableOrganizationMembers: AutoEnableMode? + /// The unique identifier for the classification scope that's used when performing automated sensitive data discovery. The classification scope specifies S3 buckets to exclude from analyses. public let classificationScopeId: String? - /// The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was most recently disabled for the account. This value is null if automated sensitive data discovery wasn't enabled and subsequently disabled for the account. + /// The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was most recently disabled. This value is null if automated sensitive data discovery is currently enabled. @OptionalCustomCoding public var disabledAt: Date? - /// The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was initially enabled for the account. This value is null if automated sensitive data discovery has never been enabled for the account. + /// The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was initially enabled. This value is null if automated sensitive data discovery has never been enabled. @OptionalCustomCoding public var firstEnabledAt: Date? - /// The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was most recently enabled or disabled for the account. + /// The date and time, in UTC and extended ISO 8601 format, when the configuration settings or status of automated sensitive data discovery was most recently changed. @OptionalCustomCoding public var lastUpdatedAt: Date? - /// The unique identifier for the sensitivity inspection template that's used when performing automated sensitive data discovery for the account. The template specifies which allow lists, custom data identifiers, and managed data identifiers to use when analyzing data. + /// The unique identifier for the sensitivity inspection template that's used when performing automated sensitive data discovery. The template specifies which allow lists, custom data identifiers, and managed data identifiers to use when analyzing data. public let sensitivityInspectionTemplateId: String? - /// The current status of the automated sensitive data discovery configuration for the account. Possible values are: ENABLED, use the specified settings to perform automated sensitive data discovery activities for the account; and, DISABLED, don't perform automated sensitive data discovery activities for the account. + /// The current status of automated sensitive data discovery for the organization or account. Possible values are: ENABLED, use the specified settings to perform automated sensitive data discovery activities; and, DISABLED, don't perform automated sensitive data discovery activities. public let status: AutomatedDiscoveryStatus? - public init(classificationScopeId: String? = nil, disabledAt: Date? = nil, firstEnabledAt: Date? = nil, lastUpdatedAt: Date? = nil, sensitivityInspectionTemplateId: String? = nil, status: AutomatedDiscoveryStatus? = nil) { + public init(autoEnableOrganizationMembers: AutoEnableMode? = nil, classificationScopeId: String? = nil, disabledAt: Date? = nil, firstEnabledAt: Date? = nil, lastUpdatedAt: Date? = nil, sensitivityInspectionTemplateId: String? = nil, status: AutomatedDiscoveryStatus? = nil) { + self.autoEnableOrganizationMembers = autoEnableOrganizationMembers self.classificationScopeId = classificationScopeId self.disabledAt = disabledAt self.firstEnabledAt = firstEnabledAt @@ -2615,6 +2725,7 @@ extension Macie2 { } private enum CodingKeys: String, CodingKey { + case autoEnableOrganizationMembers = "autoEnableOrganizationMembers" case classificationScopeId = "classificationScopeId" case disabledAt = "disabledAt" case firstEnabledAt = "firstEnabledAt" @@ -3265,9 +3376,9 @@ extension Macie2 { public struct GetSensitivityInspectionTemplateResponse: AWSDecodableShape { /// The custom description of the template. public let description: String? - /// The managed data identifiers that are explicitly excluded (not used) when analyzing data. + /// The managed data identifiers that are explicitly excluded (not used) when performing automated sensitive data discovery. public let excludes: SensitivityInspectionTemplateExcludes? - /// The allow lists, custom data identifiers, and managed data identifiers that are explicitly included (used) when analyzing data. + /// The allow lists, custom data identifiers, and managed data identifiers that are explicitly included (used) when performing automated sensitive data discovery. public let includes: SensitivityInspectionTemplateIncludes? /// The name of the template: automated-sensitive-data-discovery. public let name: String? @@ -3545,11 +3656,11 @@ extension Macie2 { } public struct JobDetails: AWSDecodableShape { - /// Specifies whether any one-time or recurring jobs are configured to analyze data in the bucket. Possible values are: TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more jobs and at least one of those jobs has a status other than CANCELLED. Or the bucket matched the bucket criteria (S3BucketCriteriaForJob) for at least one job that previously ran. FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any jobs, all the jobs that explicitly include the bucket in their bucket definitions have a status of CANCELLED, or the bucket didn't match the bucket criteria (S3BucketCriteriaForJob) for any jobs that previously ran. UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket. + /// Specifies whether any one-time or recurring jobs are configured to analyze objects in the bucket. Possible values are: TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more jobs and at least one of those jobs has a status other than CANCELLED. Or the bucket matched the bucket criteria (S3BucketCriteriaForJob) for at least one job that previously ran. FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any jobs, all the jobs that explicitly include the bucket in their bucket definitions have a status of CANCELLED, or the bucket didn't match the bucket criteria (S3BucketCriteriaForJob) for any jobs that previously ran. UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket. public let isDefinedInJob: IsDefinedInJob? - /// Specifies whether any recurring jobs are configured to analyze data in the bucket. Possible values are: TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more recurring jobs or the bucket matches the bucket criteria (S3BucketCriteriaForJob) for one or more recurring jobs. At least one of those jobs has a status other than CANCELLED. FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any recurring jobs, the bucket doesn't match the bucket criteria (S3BucketCriteriaForJob) for any recurring jobs, or all the recurring jobs that are configured to analyze data in the bucket have a status of CANCELLED. UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket. + /// Specifies whether any recurring jobs are configured to analyze objects in the bucket. Possible values are: TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more recurring jobs or the bucket matches the bucket criteria (S3BucketCriteriaForJob) for one or more recurring jobs. At least one of those jobs has a status other than CANCELLED. FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any recurring jobs, the bucket doesn't match the bucket criteria (S3BucketCriteriaForJob) for any recurring jobs, or all the recurring jobs that are configured to analyze data in the bucket have a status of CANCELLED. UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket. public let isMonitoredByJob: IsMonitoredByJob? - /// The unique identifier for the job that ran most recently and is configured to analyze data in the bucket, either the latest run of a recurring job or the only run of a one-time job. This value is typically null if the value for the isDefinedInJob property is FALSE or UNKNOWN. + /// The unique identifier for the job that ran most recently and is configured to analyze objects in the bucket, either the latest run of a recurring job or the only run of a one-time job. This value is typically null if the value for the isDefinedInJob property is FALSE or UNKNOWN. public let lastJobId: String? /// The date and time, in UTC and extended ISO 8601 format, when the job (lastJobId) started. If the job is a recurring job, this value indicates when the most recent run started. This value is typically null if the value for the isDefinedInJob property is FALSE or UNKNOWN. @OptionalCustomCoding @@ -3740,6 +3851,53 @@ extension Macie2 { } } + public struct ListAutomatedDiscoveryAccountsRequest: AWSEncodableShape { + /// The Amazon Web Services account ID for each account, for as many as 50 accounts. To retrieve the status for multiple accounts, append the accountIds parameter and argument for each account, separated by an ampersand (&). To retrieve the status for all the accounts in an organization, omit this parameter. + public let accountIds: [String]? + /// The maximum number of items to include in each page of a paginated response. + public let maxResults: Int? + /// The nextToken string that specifies which page of results to return in a paginated response. + public let nextToken: String? + + public init(accountIds: [String]? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.accountIds = accountIds + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.accountIds, key: "accountIds") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 25) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListAutomatedDiscoveryAccountsResponse: AWSDecodableShape { + /// An array of objects, one for each account specified in the request. Each object specifies the Amazon Web Services account ID for an account and the current status of automated sensitive data discovery for that account. + public let items: [AutomatedDiscoveryAccount]? + /// The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages. + public let nextToken: String? + + public init(items: [AutomatedDiscoveryAccount]? = nil, nextToken: String? = nil) { + self.items = items + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case items = "items" + case nextToken = "nextToken" + } + } + public struct ListClassificationJobsRequest: AWSEncodableShape { /// The criteria to use to filter the results. public let filterCriteria: ListJobsFilterCriteria? @@ -4335,6 +4493,8 @@ extension Macie2 { public struct MatchingBucket: AWSDecodableShape { /// The unique identifier for the Amazon Web Services account that owns the bucket. public let accountId: String? + /// Specifies whether automated sensitive data discovery is currently configured to analyze objects in the bucket. Possible values are: MONITORED, the bucket is included in analyses; and, NOT_MONITORED, the bucket is excluded from analyses. If automated sensitive data discovery is disabled for your account, this value is NOT_MONITORED. + public let automatedDiscoveryMonitoringStatus: AutomatedDiscoveryMonitoringStatus? /// The name of the bucket. public let bucketName: String? /// The total number of objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format. @@ -4347,14 +4507,14 @@ extension Macie2 { public let errorMessage: String? /// Specifies whether any one-time or recurring classification jobs are configured to analyze objects in the bucket, and, if so, the details of the job that ran most recently. public let jobDetails: JobDetails? - /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed data in the bucket while performing automated sensitive data discovery for your account. This value is null if automated sensitive data discovery is currently disabled for your account. + /// The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account. @OptionalCustomCoding public var lastAutomatedDiscoveryTime: Date? /// The total number of objects in the bucket. public let objectCount: Int64? /// The total number of objects in the bucket, grouped by server-side encryption type. This includes a grouping that reports the total number of objects that aren't encrypted or use client-side encryption. public let objectCountByEncryptionType: ObjectCountByEncryptionType? - /// The current sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive). This value is null if automated sensitive data discovery is currently disabled for your account. + /// The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses. public let sensitivityScore: Int? /// The total storage size, in bytes, of the bucket. If versioning is enabled for the bucket, Amazon Macie calculates this value based on the size of the latest version of each object in the bucket. This value doesn't reflect the storage size of all versions of each object in the bucket. public let sizeInBytes: Int64? @@ -4365,8 +4525,9 @@ extension Macie2 { /// The total storage size, in bytes, of the objects that Amazon Macie can't analyze in the bucket. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format. public let unclassifiableObjectSizeInBytes: ObjectLevelStatistics? - public init(accountId: String? = nil, bucketName: String? = nil, classifiableObjectCount: Int64? = nil, classifiableSizeInBytes: Int64? = nil, errorCode: BucketMetadataErrorCode? = nil, errorMessage: String? = nil, jobDetails: JobDetails? = nil, lastAutomatedDiscoveryTime: Date? = nil, objectCount: Int64? = nil, objectCountByEncryptionType: ObjectCountByEncryptionType? = nil, sensitivityScore: Int? = nil, sizeInBytes: Int64? = nil, sizeInBytesCompressed: Int64? = nil, unclassifiableObjectCount: ObjectLevelStatistics? = nil, unclassifiableObjectSizeInBytes: ObjectLevelStatistics? = nil) { + public init(accountId: String? = nil, automatedDiscoveryMonitoringStatus: AutomatedDiscoveryMonitoringStatus? = nil, bucketName: String? = nil, classifiableObjectCount: Int64? = nil, classifiableSizeInBytes: Int64? = nil, errorCode: BucketMetadataErrorCode? = nil, errorMessage: String? = nil, jobDetails: JobDetails? = nil, lastAutomatedDiscoveryTime: Date? = nil, objectCount: Int64? = nil, objectCountByEncryptionType: ObjectCountByEncryptionType? = nil, sensitivityScore: Int? = nil, sizeInBytes: Int64? = nil, sizeInBytesCompressed: Int64? = nil, unclassifiableObjectCount: ObjectLevelStatistics? = nil, unclassifiableObjectSizeInBytes: ObjectLevelStatistics? = nil) { self.accountId = accountId + self.automatedDiscoveryMonitoringStatus = automatedDiscoveryMonitoringStatus self.bucketName = bucketName self.classifiableObjectCount = classifiableObjectCount self.classifiableSizeInBytes = classifiableSizeInBytes @@ -4385,6 +4546,7 @@ extension Macie2 { private enum CodingKeys: String, CodingKey { case accountId = "accountId" + case automatedDiscoveryMonitoringStatus = "automatedDiscoveryMonitoringStatus" case bucketName = "bucketName" case classifiableObjectCount = "classifiableObjectCount" case classifiableSizeInBytes = "classifiableSizeInBytes" @@ -4984,7 +5146,7 @@ extension Macie2 { } public struct S3Destination: AWSEncodableShape & AWSDecodableShape { - /// The name of the bucket. + /// The name of the bucket. This must be the name of an existing general purpose bucket. public let bucketName: String? /// The path prefix to use in the path to the location in the bucket. This prefix specifies where to store classification results in the bucket. public let keyPrefix: String? @@ -5220,7 +5382,7 @@ extension Macie2 { public let comparator: SearchResourcesComparator? /// The property to use in the condition. public let key: SearchResourcesSimpleCriterionKey? - /// An array that lists one or more values to use in the condition. If you specify multiple values, Amazon Macie uses OR logic to join the values. Valid values for each supported property (key) are: ACCOUNT_ID - A string that represents the unique identifier for the Amazon Web Services account that owns the resource. S3_BUCKET_EFFECTIVE_PERMISSION - A string that represents an enumerated value that Macie defines for the BucketPublicAccess.effectivePermission property of an S3 bucket. S3_BUCKET_NAME - A string that represents the name of an S3 bucket. S3_BUCKET_SHARED_ACCESS - A string that represents an enumerated value that Macie defines for the BucketMetadata.sharedAccess property of an S3 bucket. Values are case sensitive. Also, Macie doesn't support use of partial values or wildcard characters in values. + /// An array that lists one or more values to use in the condition. If you specify multiple values, Amazon Macie uses OR logic to join the values. Valid values for each supported property (key) are: ACCOUNT_ID - A string that represents the unique identifier for the Amazon Web Services account that owns the resource. AUTOMATED_DISCOVERY_MONITORING_STATUS - A string that represents an enumerated value that Macie defines for the BucketMetadata.automatedDiscoveryMonitoringStatus property of an S3 bucket. S3_BUCKET_EFFECTIVE_PERMISSION - A string that represents an enumerated value that Macie defines for the BucketPublicAccess.effectivePermission property of an S3 bucket. S3_BUCKET_NAME - A string that represents the name of an S3 bucket. S3_BUCKET_SHARED_ACCESS - A string that represents an enumerated value that Macie defines for the BucketMetadata.sharedAccess property of an S3 bucket. Values are case sensitive. Also, Macie doesn't support use of partial values or wildcard characters in values. public let values: [String]? public init(comparator: SearchResourcesComparator? = nil, key: SearchResourcesSimpleCriterionKey? = nil, values: [String]? = nil) { @@ -5882,14 +6044,18 @@ extension Macie2 { } public struct UpdateAutomatedDiscoveryConfigurationRequest: AWSEncodableShape { - /// The new status of automated sensitive data discovery for the account. Valid values are: ENABLED, start or resume automated sensitive data discovery activities for the account; and, DISABLED, stop performing automated sensitive data discovery activities for the account. When you enable automated sensitive data discovery for the first time, Amazon Macie uses default configuration settings to determine which data sources to analyze and which managed data identifiers to use. To change these settings, use the UpdateClassificationScope and UpdateSensitivityInspectionTemplate operations, respectively. If you change the settings and subsequently disable the configuration, Amazon Macie retains your changes. + /// Specifies whether to automatically enable automated sensitive data discovery for accounts in the organization. Valid values are: ALL (default), enable it for all existing accounts and new member accounts; NEW, enable it only for new member accounts; and, NONE, don't enable it for any accounts. If you specify NEW or NONE, automated sensitive data discovery continues to be enabled for any existing accounts that it's currently enabled for. To enable or disable it for individual member accounts, specify NEW or NONE, and then enable or disable it for each account by using the BatchUpdateAutomatedDiscoveryAccounts operation. + public let autoEnableOrganizationMembers: AutoEnableMode? + /// The new status of automated sensitive data discovery for the organization or account. Valid values are: ENABLED, start or resume all automated sensitive data discovery activities; and, DISABLED, stop performing all automated sensitive data discovery activities. If you specify DISABLED for an administrator account, you also disable automated sensitive data discovery for all member accounts in the organization. public let status: AutomatedDiscoveryStatus? - public init(status: AutomatedDiscoveryStatus? = nil) { + public init(autoEnableOrganizationMembers: AutoEnableMode? = nil, status: AutomatedDiscoveryStatus? = nil) { + self.autoEnableOrganizationMembers = autoEnableOrganizationMembers self.status = status } private enum CodingKeys: String, CodingKey { + case autoEnableOrganizationMembers = "autoEnableOrganizationMembers" case status = "status" } } @@ -6070,7 +6236,7 @@ extension Macie2 { } public struct UpdateOrganizationConfigurationRequest: AWSEncodableShape { - /// Specifies whether to enable Amazon Macie automatically for an account when the account is added to the organization in Organizations. + /// Specifies whether to enable Amazon Macie automatically for accounts that are added to the organization in Organizations. public let autoEnable: Bool? public init(autoEnable: Bool? = nil) { @@ -6205,11 +6371,11 @@ extension Macie2 { public struct UpdateSensitivityInspectionTemplateRequest: AWSEncodableShape { /// A custom description of the template. The description can contain as many as 200 characters. public let description: String? - /// The managed data identifiers to explicitly exclude (not use) when analyzing data. To exclude an allow list or custom data identifier that's currently included by the template, update the values for the SensitivityInspectionTemplateIncludes.allowListIds and SensitivityInspectionTemplateIncludes.customDataIdentifierIds properties, respectively. + /// The managed data identifiers to explicitly exclude (not use) when performing automated sensitive data discovery. To exclude an allow list or custom data identifier that's currently included by the template, update the values for the SensitivityInspectionTemplateIncludes.allowListIds and SensitivityInspectionTemplateIncludes.customDataIdentifierIds properties, respectively. public let excludes: SensitivityInspectionTemplateExcludes? /// The unique identifier for the Amazon Macie resource that the request applies to. public let id: String - /// The allow lists, custom data identifiers, and managed data identifiers to explicitly include (use) when analyzing data. + /// The allow lists, custom data identifiers, and managed data identifiers to explicitly include (use) when performing automated sensitive data discovery. public let includes: SensitivityInspectionTemplateIncludes? public init(description: String? = nil, excludes: SensitivityInspectionTemplateExcludes? = nil, id: String, includes: SensitivityInspectionTemplateIncludes? = nil) { @@ -6267,7 +6433,7 @@ extension Macie2 { public struct UsageRecord: AWSDecodableShape { /// The unique identifier for the Amazon Web Services account that the data applies to. public let accountId: String? - /// The date and time, in UTC and extended ISO 8601 format, when the free trial of automated sensitive data discovery started for the account. If the account is a member account in an organization, this value is the same as the value for the organization's Amazon Macie administrator account. + /// The date and time, in UTC and extended ISO 8601 format, when the free trial of automated sensitive data discovery started for the account. This value is null if automated sensitive data discovery hasn't been enabled for the account. @OptionalCustomCoding public var automatedDiscoveryFreeTrialStartDate: Date? /// The date and time, in UTC and extended ISO 8601 format, when the Amazon Macie free trial started for the account. diff --git a/Sources/Soto/Services/MailManager/MailManager_api.swift b/Sources/Soto/Services/MailManager/MailManager_api.swift new file mode 100644 index 0000000000..c504ebdf9a --- /dev/null +++ b/Sources/Soto/Services/MailManager/MailManager_api.swift @@ -0,0 +1,957 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +@_exported import SotoCore + +/// Service object for interacting with AWS MailManager service. +/// +/// AWS SES Mail Manager API AWS SES Mail Manager API contains operations and data types that comprise the Mail Manager feature of Amazon Simple Email Service. Mail Manager is a set of Amazon SES email gateway features designed to help you strengthen your organization's email infrastructure, simplify email workflow management, and streamline email compliance control. To learn more, see the Mail Manager chapter in the Amazon SES Developer Guide. +/// API Reference: https://w.amazon.com/bin/view/AWS/Border +public struct MailManager: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the MailManager client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded + /// - timeout: Timeout value for HTTP requests + /// - byteBufferAllocator: Allocator for ByteBuffers + /// - options: Service options + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + middleware: AWSMiddlewareProtocol? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + amzTarget: "MailManagerSvc", + serviceName: "MailManager", + serviceIdentifier: "mail-manager", + signingName: "ses", + serviceProtocol: .json(version: "1.0"), + apiVersion: "2023-10-17", + endpoint: endpoint, + errorType: MailManagerErrorType.self, + middleware: middleware, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + + + + + // MARK: API Calls + + /// Creates an Add On instance for the subscription indicated in the request. The resulting Amazon Resource Name (ARN) can be used in a conditional statement for a rule set or traffic policy. + @Sendable + public func createAddonInstance(_ input: CreateAddonInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAddonInstanceResponse { + return try await self.client.execute( + operation: "CreateAddonInstance", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a subscription for an Add On representing the acceptance of its terms of use and additional pricing. The subscription can then be used to create an instance for use in rule sets or traffic policies. + @Sendable + public func createAddonSubscription(_ input: CreateAddonSubscriptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAddonSubscriptionResponse { + return try await self.client.execute( + operation: "CreateAddonSubscription", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a new email archive resource for storing and retaining emails. + @Sendable + public func createArchive(_ input: CreateArchiveRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateArchiveResponse { + return try await self.client.execute( + operation: "CreateArchive", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Provision a new ingress endpoint resource. + @Sendable + public func createIngressPoint(_ input: CreateIngressPointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateIngressPointResponse { + return try await self.client.execute( + operation: "CreateIngressPoint", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a relay resource which can be used in rules to relay incoming emails to defined relay destinations. + @Sendable + public func createRelay(_ input: CreateRelayRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateRelayResponse { + return try await self.client.execute( + operation: "CreateRelay", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Provision a new rule set. + @Sendable + public func createRuleSet(_ input: CreateRuleSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateRuleSetResponse { + return try await self.client.execute( + operation: "CreateRuleSet", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Provision a new traffic policy resource. + @Sendable + public func createTrafficPolicy(_ input: CreateTrafficPolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTrafficPolicyResponse { + return try await self.client.execute( + operation: "CreateTrafficPolicy", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes an Add On instance. + @Sendable + public func deleteAddonInstance(_ input: DeleteAddonInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAddonInstanceResponse { + return try await self.client.execute( + operation: "DeleteAddonInstance", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes an Add On subscription. + @Sendable + public func deleteAddonSubscription(_ input: DeleteAddonSubscriptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAddonSubscriptionResponse { + return try await self.client.execute( + operation: "DeleteAddonSubscription", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Initiates deletion of an email archive. This changes the archive state to pending deletion. In this state, no new emails can be added, and existing archived emails become inaccessible (search, export, download). The archive and all of its contents will be permanently deleted 30 days after entering the pending deletion state, regardless of the configured retention period. + @Sendable + public func deleteArchive(_ input: DeleteArchiveRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteArchiveResponse { + return try await self.client.execute( + operation: "DeleteArchive", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Delete an ingress endpoint resource. + @Sendable + public func deleteIngressPoint(_ input: DeleteIngressPointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteIngressPointResponse { + return try await self.client.execute( + operation: "DeleteIngressPoint", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes an existing relay resource. + @Sendable + public func deleteRelay(_ input: DeleteRelayRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteRelayResponse { + return try await self.client.execute( + operation: "DeleteRelay", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Delete a rule set. + @Sendable + public func deleteRuleSet(_ input: DeleteRuleSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteRuleSetResponse { + return try await self.client.execute( + operation: "DeleteRuleSet", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Delete a traffic policy resource. + @Sendable + public func deleteTrafficPolicy(_ input: DeleteTrafficPolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTrafficPolicyResponse { + return try await self.client.execute( + operation: "DeleteTrafficPolicy", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Gets detailed information about an Add On instance. + @Sendable + public func getAddonInstance(_ input: GetAddonInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAddonInstanceResponse { + return try await self.client.execute( + operation: "GetAddonInstance", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Gets detailed information about an Add On subscription. + @Sendable + public func getAddonSubscription(_ input: GetAddonSubscriptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAddonSubscriptionResponse { + return try await self.client.execute( + operation: "GetAddonSubscription", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the full details and current state of a specified email archive. + @Sendable + public func getArchive(_ input: GetArchiveRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetArchiveResponse { + return try await self.client.execute( + operation: "GetArchive", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the details and current status of a specific email archive export job. + @Sendable + public func getArchiveExport(_ input: GetArchiveExportRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetArchiveExportResponse { + return try await self.client.execute( + operation: "GetArchiveExport", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a pre-signed URL that provides temporary download access to the specific email message stored in the archive. + @Sendable + public func getArchiveMessage(_ input: GetArchiveMessageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetArchiveMessageResponse { + return try await self.client.execute( + operation: "GetArchiveMessage", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns the textual content of a specific email message stored in the archive. Attachments are not included. + @Sendable + public func getArchiveMessageContent(_ input: GetArchiveMessageContentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetArchiveMessageContentResponse { + return try await self.client.execute( + operation: "GetArchiveMessageContent", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the details and current status of a specific email archive search job. + @Sendable + public func getArchiveSearch(_ input: GetArchiveSearchRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetArchiveSearchResponse { + return try await self.client.execute( + operation: "GetArchiveSearch", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns the results of a completed email archive search job. + @Sendable + public func getArchiveSearchResults(_ input: GetArchiveSearchResultsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetArchiveSearchResultsResponse { + return try await self.client.execute( + operation: "GetArchiveSearchResults", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Fetch ingress endpoint resource attributes. + @Sendable + public func getIngressPoint(_ input: GetIngressPointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetIngressPointResponse { + return try await self.client.execute( + operation: "GetIngressPoint", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Fetch the relay resource and it's attributes. + @Sendable + public func getRelay(_ input: GetRelayRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetRelayResponse { + return try await self.client.execute( + operation: "GetRelay", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Fetch attributes of a rule set. + @Sendable + public func getRuleSet(_ input: GetRuleSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetRuleSetResponse { + return try await self.client.execute( + operation: "GetRuleSet", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Fetch attributes of a traffic policy resource. + @Sendable + public func getTrafficPolicy(_ input: GetTrafficPolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTrafficPolicyResponse { + return try await self.client.execute( + operation: "GetTrafficPolicy", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists all Add On instances in your account. + @Sendable + public func listAddonInstances(_ input: ListAddonInstancesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAddonInstancesResponse { + return try await self.client.execute( + operation: "ListAddonInstances", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists all Add On subscriptions in your account. + @Sendable + public func listAddonSubscriptions(_ input: ListAddonSubscriptionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAddonSubscriptionsResponse { + return try await self.client.execute( + operation: "ListAddonSubscriptions", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of email archive export jobs. + @Sendable + public func listArchiveExports(_ input: ListArchiveExportsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListArchiveExportsResponse { + return try await self.client.execute( + operation: "ListArchiveExports", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of email archive search jobs. + @Sendable + public func listArchiveSearches(_ input: ListArchiveSearchesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListArchiveSearchesResponse { + return try await self.client.execute( + operation: "ListArchiveSearches", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Returns a list of all email archives in your account. + @Sendable + public func listArchives(_ input: ListArchivesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListArchivesResponse { + return try await self.client.execute( + operation: "ListArchives", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// List all ingress endpoint resources. + @Sendable + public func listIngressPoints(_ input: ListIngressPointsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListIngressPointsResponse { + return try await self.client.execute( + operation: "ListIngressPoints", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists all the existing relay resources. + @Sendable + public func listRelays(_ input: ListRelaysRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListRelaysResponse { + return try await self.client.execute( + operation: "ListRelays", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// List rule sets for this account. + @Sendable + public func listRuleSets(_ input: ListRuleSetsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListRuleSetsResponse { + return try await self.client.execute( + operation: "ListRuleSets", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the list of tags (keys and values) assigned to the resource. + @Sendable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + return try await self.client.execute( + operation: "ListTagsForResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// List traffic policy resources. + @Sendable + public func listTrafficPolicies(_ input: ListTrafficPoliciesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTrafficPoliciesResponse { + return try await self.client.execute( + operation: "ListTrafficPolicies", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Initiates an export of emails from the specified archive. + @Sendable + public func startArchiveExport(_ input: StartArchiveExportRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartArchiveExportResponse { + return try await self.client.execute( + operation: "StartArchiveExport", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Initiates a search across emails in the specified archive. + @Sendable + public func startArchiveSearch(_ input: StartArchiveSearchRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartArchiveSearchResponse { + return try await self.client.execute( + operation: "StartArchiveSearch", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Stops an in-progress export of emails from an archive. + @Sendable + public func stopArchiveExport(_ input: StopArchiveExportRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopArchiveExportResponse { + return try await self.client.execute( + operation: "StopArchiveExport", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Stops an in-progress archive search job. + @Sendable + public func stopArchiveSearch(_ input: StopArchiveSearchRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopArchiveSearchResponse { + return try await self.client.execute( + operation: "StopArchiveSearch", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Adds one or more tags (keys and values) to a specified resource. + @Sendable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + return try await self.client.execute( + operation: "TagResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Remove one or more tags (keys and values) from a specified resource. + @Sendable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + return try await self.client.execute( + operation: "UntagResource", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates the attributes of an existing email archive. + @Sendable + public func updateArchive(_ input: UpdateArchiveRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateArchiveResponse { + return try await self.client.execute( + operation: "UpdateArchive", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Update attributes of a provisioned ingress endpoint resource. + @Sendable + public func updateIngressPoint(_ input: UpdateIngressPointRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateIngressPointResponse { + return try await self.client.execute( + operation: "UpdateIngressPoint", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Updates the attributes of an existing relay resource. + @Sendable + public func updateRelay(_ input: UpdateRelayRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateRelayResponse { + return try await self.client.execute( + operation: "UpdateRelay", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// >Update attributes of an already provisioned rule set. + @Sendable + public func updateRuleSet(_ input: UpdateRuleSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateRuleSetResponse { + return try await self.client.execute( + operation: "UpdateRuleSet", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Update attributes of an already provisioned traffic policy resource. + @Sendable + public func updateTrafficPolicy(_ input: UpdateTrafficPolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateTrafficPolicyResponse { + return try await self.client.execute( + operation: "UpdateTrafficPolicy", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } +} + +extension MailManager { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: MailManager, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension MailManager { + /// Lists all Add On instances in your account. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listAddonInstancesPaginator( + _ input: ListAddonInstancesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listAddonInstances, + inputKey: \ListAddonInstancesRequest.nextToken, + outputKey: \ListAddonInstancesResponse.nextToken, + logger: logger + ) + } + + /// Lists all Add On subscriptions in your account. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listAddonSubscriptionsPaginator( + _ input: ListAddonSubscriptionsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listAddonSubscriptions, + inputKey: \ListAddonSubscriptionsRequest.nextToken, + outputKey: \ListAddonSubscriptionsResponse.nextToken, + logger: logger + ) + } + + /// Returns a list of email archive export jobs. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listArchiveExportsPaginator( + _ input: ListArchiveExportsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listArchiveExports, + inputKey: \ListArchiveExportsRequest.nextToken, + outputKey: \ListArchiveExportsResponse.nextToken, + logger: logger + ) + } + + /// Returns a list of email archive search jobs. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listArchiveSearchesPaginator( + _ input: ListArchiveSearchesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listArchiveSearches, + inputKey: \ListArchiveSearchesRequest.nextToken, + outputKey: \ListArchiveSearchesResponse.nextToken, + logger: logger + ) + } + + /// Returns a list of all email archives in your account. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listArchivesPaginator( + _ input: ListArchivesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listArchives, + inputKey: \ListArchivesRequest.nextToken, + outputKey: \ListArchivesResponse.nextToken, + logger: logger + ) + } + + /// List all ingress endpoint resources. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listIngressPointsPaginator( + _ input: ListIngressPointsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listIngressPoints, + inputKey: \ListIngressPointsRequest.nextToken, + outputKey: \ListIngressPointsResponse.nextToken, + logger: logger + ) + } + + /// Lists all the existing relay resources. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listRelaysPaginator( + _ input: ListRelaysRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listRelays, + inputKey: \ListRelaysRequest.nextToken, + outputKey: \ListRelaysResponse.nextToken, + logger: logger + ) + } + + /// List rule sets for this account. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listRuleSetsPaginator( + _ input: ListRuleSetsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listRuleSets, + inputKey: \ListRuleSetsRequest.nextToken, + outputKey: \ListRuleSetsResponse.nextToken, + logger: logger + ) + } + + /// List traffic policy resources. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listTrafficPoliciesPaginator( + _ input: ListTrafficPoliciesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listTrafficPolicies, + inputKey: \ListTrafficPoliciesRequest.nextToken, + outputKey: \ListTrafficPoliciesResponse.nextToken, + logger: logger + ) + } +} + +extension MailManager.ListAddonInstancesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> MailManager.ListAddonInstancesRequest { + return .init( + nextToken: token, + pageSize: self.pageSize + ) + } +} + +extension MailManager.ListAddonSubscriptionsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> MailManager.ListAddonSubscriptionsRequest { + return .init( + nextToken: token, + pageSize: self.pageSize + ) + } +} + +extension MailManager.ListArchiveExportsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> MailManager.ListArchiveExportsRequest { + return .init( + archiveId: self.archiveId, + nextToken: token, + pageSize: self.pageSize + ) + } +} + +extension MailManager.ListArchiveSearchesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> MailManager.ListArchiveSearchesRequest { + return .init( + archiveId: self.archiveId, + nextToken: token, + pageSize: self.pageSize + ) + } +} + +extension MailManager.ListArchivesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> MailManager.ListArchivesRequest { + return .init( + nextToken: token, + pageSize: self.pageSize + ) + } +} + +extension MailManager.ListIngressPointsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> MailManager.ListIngressPointsRequest { + return .init( + nextToken: token, + pageSize: self.pageSize + ) + } +} + +extension MailManager.ListRelaysRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> MailManager.ListRelaysRequest { + return .init( + nextToken: token, + pageSize: self.pageSize + ) + } +} + +extension MailManager.ListRuleSetsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> MailManager.ListRuleSetsRequest { + return .init( + nextToken: token, + pageSize: self.pageSize + ) + } +} + +extension MailManager.ListTrafficPoliciesRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> MailManager.ListTrafficPoliciesRequest { + return .init( + nextToken: token, + pageSize: self.pageSize + ) + } +} diff --git a/Sources/Soto/Services/MailManager/MailManager_shapes.swift b/Sources/Soto/Services/MailManager/MailManager_shapes.swift new file mode 100644 index 0000000000..ff1a5eb51f --- /dev/null +++ b/Sources/Soto/Services/MailManager/MailManager_shapes.swift @@ -0,0 +1,3958 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_spi(SotoInternal) import SotoCore + +extension MailManager { + // MARK: Enums + + public enum AcceptAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case allow = "ALLOW" + case deny = "DENY" + public var description: String { return self.rawValue } + } + + public enum ActionFailurePolicy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `continue` = "CONTINUE" + case drop = "DROP" + public var description: String { return self.rawValue } + } + + public enum ArchiveBooleanEmailAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case hasAttachments = "HAS_ATTACHMENTS" + public var description: String { return self.rawValue } + } + + public enum ArchiveBooleanOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case isFalse = "IS_FALSE" + case isTrue = "IS_TRUE" + public var description: String { return self.rawValue } + } + + public enum ArchiveState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case pendingDeletion = "PENDING_DELETION" + public var description: String { return self.rawValue } + } + + public enum ArchiveStringEmailAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cc = "CC" + case from = "FROM" + case subject = "SUBJECT" + case to = "TO" + public var description: String { return self.rawValue } + } + + public enum ArchiveStringOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case contains = "CONTAINS" + public var description: String { return self.rawValue } + } + + public enum ExportState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cancelled = "CANCELLED" + case completed = "COMPLETED" + case failed = "FAILED" + case preprocessing = "PREPROCESSING" + case processing = "PROCESSING" + case queued = "QUEUED" + public var description: String { return self.rawValue } + } + + public enum IngressBooleanOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case isFalse = "IS_FALSE" + case isTrue = "IS_TRUE" + public var description: String { return self.rawValue } + } + + public enum IngressIpOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cidrMatches = "CIDR_MATCHES" + case notCidrMatches = "NOT_CIDR_MATCHES" + public var description: String { return self.rawValue } + } + + public enum IngressIpv4Attribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case senderIp = "SENDER_IP" + public var description: String { return self.rawValue } + } + + public enum IngressPointStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case closed = "CLOSED" + case deprovisioning = "DEPROVISIONING" + case failed = "FAILED" + case provisioning = "PROVISIONING" + case updating = "UPDATING" + public var description: String { return self.rawValue } + } + + public enum IngressPointStatusToUpdate: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case closed = "CLOSED" + public var description: String { return self.rawValue } + } + + public enum IngressPointType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case auth = "AUTH" + case open = "OPEN" + public var description: String { return self.rawValue } + } + + public enum IngressStringEmailAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case recipient = "RECIPIENT" + public var description: String { return self.rawValue } + } + + public enum IngressStringOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case contains = "CONTAINS" + case endsWith = "ENDS_WITH" + case equals = "EQUALS" + case notEquals = "NOT_EQUALS" + case startsWith = "STARTS_WITH" + public var description: String { return self.rawValue } + } + + public enum IngressTlsAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case tlsProtocol = "TLS_PROTOCOL" + public var description: String { return self.rawValue } + } + + public enum IngressTlsProtocolAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case tls12 = "TLS1_2" + case tls13 = "TLS1_3" + public var description: String { return self.rawValue } + } + + public enum IngressTlsProtocolOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `is` = "IS" + case minimumTlsVersion = "MINIMUM_TLS_VERSION" + public var description: String { return self.rawValue } + } + + public enum MailFrom: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case preserve = "PRESERVE" + case replace = "REPLACE" + public var description: String { return self.rawValue } + } + + public enum RetentionPeriod: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case eightYears = "EIGHT_YEARS" + case eighteenMonths = "EIGHTEEN_MONTHS" + case fiveYears = "FIVE_YEARS" + case fourYears = "FOUR_YEARS" + case nineMonths = "NINE_MONTHS" + case nineYears = "NINE_YEARS" + case oneYear = "ONE_YEAR" + case permanent = "PERMANENT" + case sevenYears = "SEVEN_YEARS" + case sixMonths = "SIX_MONTHS" + case sixYears = "SIX_YEARS" + case tenYears = "TEN_YEARS" + case thirtyMonths = "THIRTY_MONTHS" + case threeMonths = "THREE_MONTHS" + case threeYears = "THREE_YEARS" + case twoYears = "TWO_YEARS" + public var description: String { return self.rawValue } + } + + public enum RuleBooleanEmailAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case readReceiptRequested = "READ_RECEIPT_REQUESTED" + case tls = "TLS" + case tlsWrapped = "TLS_WRAPPED" + public var description: String { return self.rawValue } + } + + public enum RuleBooleanOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case isFalse = "IS_FALSE" + case isTrue = "IS_TRUE" + public var description: String { return self.rawValue } + } + + public enum RuleDmarcOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case equals = "EQUALS" + case notEquals = "NOT_EQUALS" + public var description: String { return self.rawValue } + } + + public enum RuleDmarcPolicy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case none = "NONE" + case quarantine = "QUARANTINE" + case reject = "REJECT" + public var description: String { return self.rawValue } + } + + public enum RuleIpEmailAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case sourceIp = "SOURCE_IP" + public var description: String { return self.rawValue } + } + + public enum RuleIpOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cidrMatches = "CIDR_MATCHES" + case notCidrMatches = "NOT_CIDR_MATCHES" + public var description: String { return self.rawValue } + } + + public enum RuleNumberEmailAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case messageSize = "MESSAGE_SIZE" + public var description: String { return self.rawValue } + } + + public enum RuleNumberOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case equals = "EQUALS" + case greaterThan = "GREATER_THAN" + case greaterThanOrEqual = "GREATER_THAN_OR_EQUAL" + case lessThan = "LESS_THAN" + case lessThanOrEqual = "LESS_THAN_OR_EQUAL" + case notEquals = "NOT_EQUALS" + public var description: String { return self.rawValue } + } + + public enum RuleStringEmailAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cc = "CC" + case from = "FROM" + case helo = "HELO" + case mailFrom = "MAIL_FROM" + case recipient = "RECIPIENT" + case sender = "SENDER" + case subject = "SUBJECT" + case to = "TO" + public var description: String { return self.rawValue } + } + + public enum RuleStringOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case contains = "CONTAINS" + case endsWith = "ENDS_WITH" + case equals = "EQUALS" + case notEquals = "NOT_EQUALS" + case startsWith = "STARTS_WITH" + public var description: String { return self.rawValue } + } + + public enum RuleVerdict: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case fail = "FAIL" + case gray = "GRAY" + case pass = "PASS" + case processingFailed = "PROCESSING_FAILED" + public var description: String { return self.rawValue } + } + + public enum RuleVerdictAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case dkim = "DKIM" + case spf = "SPF" + public var description: String { return self.rawValue } + } + + public enum RuleVerdictOperator: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case equals = "EQUALS" + case notEquals = "NOT_EQUALS" + public var description: String { return self.rawValue } + } + + public enum SearchState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cancelled = "CANCELLED" + case completed = "COMPLETED" + case failed = "FAILED" + case queued = "QUEUED" + case running = "RUNNING" + public var description: String { return self.rawValue } + } + + public enum ArchiveFilterCondition: AWSEncodableShape & AWSDecodableShape, Sendable { + /// A boolean expression to evaluate against email attributes. + case booleanExpression(ArchiveBooleanExpression) + /// A string expression to evaluate against email attributes. + case stringExpression(ArchiveStringExpression) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .booleanExpression: + let value = try container.decode(ArchiveBooleanExpression.self, forKey: .booleanExpression) + self = .booleanExpression(value) + case .stringExpression: + let value = try container.decode(ArchiveStringExpression.self, forKey: .stringExpression) + self = .stringExpression(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .booleanExpression(let value): + try container.encode(value, forKey: .booleanExpression) + case .stringExpression(let value): + try container.encode(value, forKey: .stringExpression) + } + } + + public func validate(name: String) throws { + switch self { + case .stringExpression(let value): + try value.validate(name: "\(name).stringExpression") + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case booleanExpression = "BooleanExpression" + case stringExpression = "StringExpression" + } + } + + public enum IngressPointConfiguration: AWSEncodableShape, Sendable { + /// The SecretsManager::Secret ARN of the ingress endpoint resource. + case secretArn(String) + /// The password of the ingress endpoint resource. + case smtpPassword(String) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .secretArn(let value): + try container.encode(value, forKey: .secretArn) + case .smtpPassword(let value): + try container.encode(value, forKey: .smtpPassword) + } + } + + public func validate(name: String) throws { + switch self { + case .secretArn(let value): + try self.validate(value, name: "secretArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):secretsmanager:[a-z0-9-]+:\\d{12}:secret:[a-zA-Z0-9/_+=,.@-]+$") + case .smtpPassword(let value): + try self.validate(value, name: "smtpPassword", parent: name, max: 64) + try self.validate(value, name: "smtpPassword", parent: name, min: 8) + try self.validate(value, name: "smtpPassword", parent: name, pattern: "^[A-Za-z0-9!@#$%^&*()_+\\-=\\[\\]{}|.,?]+$") + } + } + + private enum CodingKeys: String, CodingKey { + case secretArn = "SecretArn" + case smtpPassword = "SmtpPassword" + } + } + + public enum PolicyCondition: AWSEncodableShape & AWSDecodableShape, Sendable { + /// This represents a boolean type condition matching on the incoming mail. It performs the boolean operation configured in 'Operator' and evaluates the 'Protocol' object against the 'Value'. + case booleanExpression(IngressBooleanExpression) + /// This represents an IP based condition matching on the incoming mail. It performs the operation configured in 'Operator' and evaluates the 'Protocol' object against the 'Value'. + case ipExpression(IngressIpv4Expression) + /// This represents a string based condition matching on the incoming mail. It performs the string operation configured in 'Operator' and evaluates the 'Protocol' object against the 'Value'. + case stringExpression(IngressStringExpression) + /// This represents a TLS based condition matching on the incoming mail. It performs the operation configured in 'Operator' and evaluates the 'Protocol' object against the 'Value'. + case tlsExpression(IngressTlsProtocolExpression) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .booleanExpression: + let value = try container.decode(IngressBooleanExpression.self, forKey: .booleanExpression) + self = .booleanExpression(value) + case .ipExpression: + let value = try container.decode(IngressIpv4Expression.self, forKey: .ipExpression) + self = .ipExpression(value) + case .stringExpression: + let value = try container.decode(IngressStringExpression.self, forKey: .stringExpression) + self = .stringExpression(value) + case .tlsExpression: + let value = try container.decode(IngressTlsProtocolExpression.self, forKey: .tlsExpression) + self = .tlsExpression(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .booleanExpression(let value): + try container.encode(value, forKey: .booleanExpression) + case .ipExpression(let value): + try container.encode(value, forKey: .ipExpression) + case .stringExpression(let value): + try container.encode(value, forKey: .stringExpression) + case .tlsExpression(let value): + try container.encode(value, forKey: .tlsExpression) + } + } + + public func validate(name: String) throws { + switch self { + case .booleanExpression(let value): + try value.validate(name: "\(name).booleanExpression") + case .ipExpression(let value): + try value.validate(name: "\(name).ipExpression") + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case booleanExpression = "BooleanExpression" + case ipExpression = "IpExpression" + case stringExpression = "StringExpression" + case tlsExpression = "TlsExpression" + } + } + + public enum RelayAuthentication: AWSEncodableShape & AWSDecodableShape, Sendable { + /// Keep an empty structure if the relay destination server does not require SMTP credential authentication. + case noAuthentication(NoAuthentication) + /// The ARN of the secret created in secrets manager where the relay server's SMTP credentials are stored. + case secretArn(String) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .noAuthentication: + let value = try container.decode(NoAuthentication.self, forKey: .noAuthentication) + self = .noAuthentication(value) + case .secretArn: + let value = try container.decode(String.self, forKey: .secretArn) + self = .secretArn(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .noAuthentication(let value): + try container.encode(value, forKey: .noAuthentication) + case .secretArn(let value): + try container.encode(value, forKey: .secretArn) + } + } + + public func validate(name: String) throws { + switch self { + case .secretArn(let value): + try self.validate(value, name: "secretArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov):secretsmanager:[a-z0-9-]+:\\d{12}:secret:[a-zA-Z0-9/_+=,.@-]+$") + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case noAuthentication = "NoAuthentication" + case secretArn = "SecretArn" + } + } + + public enum RuleAction: AWSEncodableShape & AWSDecodableShape, Sendable { + /// This action adds a header. This can be used to add arbitrary email headers. + case addHeader(AddHeaderAction) + /// This action archives the email. This can be used to deliver an email to an archive. + case archive(ArchiveAction) + /// This action delivers an email to a WorkMail mailbox. + case deliverToMailbox(DeliverToMailboxAction) + /// This action terminates the evaluation of rules in the rule set. + case drop(DropAction) + /// This action relays the email to another SMTP server. + case relay(RelayAction) + /// The action replaces certain or all recipients with a different set of recipients. + case replaceRecipient(ReplaceRecipientAction) + /// This action sends the email to the internet. + case send(SendAction) + /// This action writes the MIME content of the email to an S3 bucket. + case writeToS3(S3Action) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .addHeader: + let value = try container.decode(AddHeaderAction.self, forKey: .addHeader) + self = .addHeader(value) + case .archive: + let value = try container.decode(ArchiveAction.self, forKey: .archive) + self = .archive(value) + case .deliverToMailbox: + let value = try container.decode(DeliverToMailboxAction.self, forKey: .deliverToMailbox) + self = .deliverToMailbox(value) + case .drop: + let value = try container.decode(DropAction.self, forKey: .drop) + self = .drop(value) + case .relay: + let value = try container.decode(RelayAction.self, forKey: .relay) + self = .relay(value) + case .replaceRecipient: + let value = try container.decode(ReplaceRecipientAction.self, forKey: .replaceRecipient) + self = .replaceRecipient(value) + case .send: + let value = try container.decode(SendAction.self, forKey: .send) + self = .send(value) + case .writeToS3: + let value = try container.decode(S3Action.self, forKey: .writeToS3) + self = .writeToS3(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .addHeader(let value): + try container.encode(value, forKey: .addHeader) + case .archive(let value): + try container.encode(value, forKey: .archive) + case .deliverToMailbox(let value): + try container.encode(value, forKey: .deliverToMailbox) + case .drop(let value): + try container.encode(value, forKey: .drop) + case .relay(let value): + try container.encode(value, forKey: .relay) + case .replaceRecipient(let value): + try container.encode(value, forKey: .replaceRecipient) + case .send(let value): + try container.encode(value, forKey: .send) + case .writeToS3(let value): + try container.encode(value, forKey: .writeToS3) + } + } + + public func validate(name: String) throws { + switch self { + case .addHeader(let value): + try value.validate(name: "\(name).addHeader") + case .archive(let value): + try value.validate(name: "\(name).archive") + case .deliverToMailbox(let value): + try value.validate(name: "\(name).deliverToMailbox") + case .relay(let value): + try value.validate(name: "\(name).relay") + case .replaceRecipient(let value): + try value.validate(name: "\(name).replaceRecipient") + case .send(let value): + try value.validate(name: "\(name).send") + case .writeToS3(let value): + try value.validate(name: "\(name).writeToS3") + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case addHeader = "AddHeader" + case archive = "Archive" + case deliverToMailbox = "DeliverToMailbox" + case drop = "Drop" + case relay = "Relay" + case replaceRecipient = "ReplaceRecipient" + case send = "Send" + case writeToS3 = "WriteToS3" + } + } + + public enum RuleCondition: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The condition applies to a boolean expression passed in this field. + case booleanExpression(RuleBooleanExpression) + /// The condition applies to a DMARC policy expression passed in this field. + case dmarcExpression(RuleDmarcExpression) + /// The condition applies to an IP address expression passed in this field. + case ipExpression(RuleIpExpression) + /// The condition applies to a number expression passed in this field. + case numberExpression(RuleNumberExpression) + /// The condition applies to a string expression passed in this field. + case stringExpression(RuleStringExpression) + /// The condition applies to a verdict expression passed in this field. + case verdictExpression(RuleVerdictExpression) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .booleanExpression: + let value = try container.decode(RuleBooleanExpression.self, forKey: .booleanExpression) + self = .booleanExpression(value) + case .dmarcExpression: + let value = try container.decode(RuleDmarcExpression.self, forKey: .dmarcExpression) + self = .dmarcExpression(value) + case .ipExpression: + let value = try container.decode(RuleIpExpression.self, forKey: .ipExpression) + self = .ipExpression(value) + case .numberExpression: + let value = try container.decode(RuleNumberExpression.self, forKey: .numberExpression) + self = .numberExpression(value) + case .stringExpression: + let value = try container.decode(RuleStringExpression.self, forKey: .stringExpression) + self = .stringExpression(value) + case .verdictExpression: + let value = try container.decode(RuleVerdictExpression.self, forKey: .verdictExpression) + self = .verdictExpression(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .booleanExpression(let value): + try container.encode(value, forKey: .booleanExpression) + case .dmarcExpression(let value): + try container.encode(value, forKey: .dmarcExpression) + case .ipExpression(let value): + try container.encode(value, forKey: .ipExpression) + case .numberExpression(let value): + try container.encode(value, forKey: .numberExpression) + case .stringExpression(let value): + try container.encode(value, forKey: .stringExpression) + case .verdictExpression(let value): + try container.encode(value, forKey: .verdictExpression) + } + } + + public func validate(name: String) throws { + switch self { + case .dmarcExpression(let value): + try value.validate(name: "\(name).dmarcExpression") + case .ipExpression(let value): + try value.validate(name: "\(name).ipExpression") + case .stringExpression(let value): + try value.validate(name: "\(name).stringExpression") + case .verdictExpression(let value): + try value.validate(name: "\(name).verdictExpression") + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case booleanExpression = "BooleanExpression" + case dmarcExpression = "DmarcExpression" + case ipExpression = "IpExpression" + case numberExpression = "NumberExpression" + case stringExpression = "StringExpression" + case verdictExpression = "VerdictExpression" + } + } + + public enum RuleVerdictToEvaluate: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The Add On ARN and its returned value to evaluate in a verdict condition expression. + case analysis(Analysis) + /// The email verdict attribute to evaluate in a string verdict expression. + case attribute(RuleVerdictAttribute) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .analysis: + let value = try container.decode(Analysis.self, forKey: .analysis) + self = .analysis(value) + case .attribute: + let value = try container.decode(RuleVerdictAttribute.self, forKey: .attribute) + self = .attribute(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .analysis(let value): + try container.encode(value, forKey: .analysis) + case .attribute(let value): + try container.encode(value, forKey: .attribute) + } + } + + public func validate(name: String) throws { + switch self { + case .analysis(let value): + try value.validate(name: "\(name).analysis") + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case analysis = "Analysis" + case attribute = "Attribute" + } + } + + // MARK: Shapes + + public struct AddHeaderAction: AWSEncodableShape & AWSDecodableShape { + /// The name of the header to add to an email. The header must be prefixed with "X-". Headers are added regardless of whether the header name pre-existed in the email. + public let headerName: String + /// The value of the header to add to the email. + public let headerValue: String + + public init(headerName: String, headerValue: String) { + self.headerName = headerName + self.headerValue = headerValue + } + + public func validate(name: String) throws { + try self.validate(self.headerName, name: "headerName", parent: name, max: 64) + try self.validate(self.headerName, name: "headerName", parent: name, min: 1) + try self.validate(self.headerName, name: "headerName", parent: name, pattern: "^[xX]\\-[a-zA-Z0-9\\-]+$") + try self.validate(self.headerValue, name: "headerValue", parent: name, max: 128) + try self.validate(self.headerValue, name: "headerValue", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case headerName = "HeaderName" + case headerValue = "HeaderValue" + } + } + + public struct AddonInstance: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Add On instance. + public let addonInstanceArn: String? + /// The unique ID of the Add On instance. + public let addonInstanceId: String? + /// The name of the Add On for the instance. + public let addonName: String? + /// The subscription ID for the instance. + public let addonSubscriptionId: String? + /// The timestamp of when the Add On instance was created. + public let createdTimestamp: Date? + + public init(addonInstanceArn: String? = nil, addonInstanceId: String? = nil, addonName: String? = nil, addonSubscriptionId: String? = nil, createdTimestamp: Date? = nil) { + self.addonInstanceArn = addonInstanceArn + self.addonInstanceId = addonInstanceId + self.addonName = addonName + self.addonSubscriptionId = addonSubscriptionId + self.createdTimestamp = createdTimestamp + } + + private enum CodingKeys: String, CodingKey { + case addonInstanceArn = "AddonInstanceArn" + case addonInstanceId = "AddonInstanceId" + case addonName = "AddonName" + case addonSubscriptionId = "AddonSubscriptionId" + case createdTimestamp = "CreatedTimestamp" + } + } + + public struct AddonSubscription: AWSDecodableShape { + /// The name of the Add On. + public let addonName: String? + /// The Amazon Resource Name (ARN) of the Add On subscription. + public let addonSubscriptionArn: String? + /// The unique ID of the Add On subscription. + public let addonSubscriptionId: String? + /// The timestamp of when the Add On subscription was created. + public let createdTimestamp: Date? + + public init(addonName: String? = nil, addonSubscriptionArn: String? = nil, addonSubscriptionId: String? = nil, createdTimestamp: Date? = nil) { + self.addonName = addonName + self.addonSubscriptionArn = addonSubscriptionArn + self.addonSubscriptionId = addonSubscriptionId + self.createdTimestamp = createdTimestamp + } + + private enum CodingKeys: String, CodingKey { + case addonName = "AddonName" + case addonSubscriptionArn = "AddonSubscriptionArn" + case addonSubscriptionId = "AddonSubscriptionId" + case createdTimestamp = "CreatedTimestamp" + } + } + + public struct Analysis: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of an Add On. + public let analyzer: String + /// The returned value from an Add On. + public let resultField: String + + public init(analyzer: String, resultField: String) { + self.analyzer = analyzer + self.resultField = resultField + } + + public func validate(name: String) throws { + try self.validate(self.analyzer, name: "analyzer", parent: name, pattern: "^[a-zA-Z0-9:_/+=,@.#-]+$") + try self.validate(self.resultField, name: "resultField", parent: name, max: 256) + try self.validate(self.resultField, name: "resultField", parent: name, min: 1) + try self.validate(self.resultField, name: "resultField", parent: name, pattern: "^[\\sa-zA-Z0-9_]+$") + } + + private enum CodingKeys: String, CodingKey { + case analyzer = "Analyzer" + case resultField = "ResultField" + } + } + + public struct Archive: AWSDecodableShape { + /// The unique identifier of the archive. + public let archiveId: String + /// The unique name assigned to the archive. + public let archiveName: String? + /// The current state of the archive: ACTIVE – The archive is ready and available for use. PENDING_DELETION – The archive has been marked for deletion and will be permanently deleted in 30 days. No further modifications can be made in this state. + public let archiveState: ArchiveState? + /// The timestamp of when the archive was last updated. + public let lastUpdatedTimestamp: Date? + + public init(archiveId: String, archiveName: String? = nil, archiveState: ArchiveState? = nil, lastUpdatedTimestamp: Date? = nil) { + self.archiveId = archiveId + self.archiveName = archiveName + self.archiveState = archiveState + self.lastUpdatedTimestamp = lastUpdatedTimestamp + } + + private enum CodingKeys: String, CodingKey { + case archiveId = "ArchiveId" + case archiveName = "ArchiveName" + case archiveState = "ArchiveState" + case lastUpdatedTimestamp = "LastUpdatedTimestamp" + } + } + + public struct ArchiveAction: AWSEncodableShape & AWSDecodableShape { + /// A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the specified archive has been deleted. + public let actionFailurePolicy: ActionFailurePolicy? + /// The identifier of the archive to send the email to. + public let targetArchive: String + + public init(actionFailurePolicy: ActionFailurePolicy? = nil, targetArchive: String) { + self.actionFailurePolicy = actionFailurePolicy + self.targetArchive = targetArchive + } + + public func validate(name: String) throws { + try self.validate(self.targetArchive, name: "targetArchive", parent: name, max: 2048) + try self.validate(self.targetArchive, name: "targetArchive", parent: name, min: 1) + try self.validate(self.targetArchive, name: "targetArchive", parent: name, pattern: "^[a-zA-Z0-9:_/+=,@.#-]+$") + } + + private enum CodingKeys: String, CodingKey { + case actionFailurePolicy = "ActionFailurePolicy" + case targetArchive = "TargetArchive" + } + } + + public struct ArchiveBooleanExpression: AWSEncodableShape & AWSDecodableShape { + /// The email attribute value to evaluate. + public let evaluate: ArchiveBooleanToEvaluate + /// The boolean operator to use for evaluation. + public let `operator`: ArchiveBooleanOperator + + public init(evaluate: ArchiveBooleanToEvaluate, operator: ArchiveBooleanOperator) { + self.evaluate = evaluate + self.`operator` = `operator` + } + + private enum CodingKeys: String, CodingKey { + case evaluate = "Evaluate" + case `operator` = "Operator" + } + } + + public struct ArchiveFilters: AWSEncodableShape & AWSDecodableShape { + /// The filter conditions for emails to include. + public let include: [ArchiveFilterCondition]? + /// The filter conditions for emails to exclude. + public let unless: [ArchiveFilterCondition]? + + public init(include: [ArchiveFilterCondition]? = nil, unless: [ArchiveFilterCondition]? = nil) { + self.include = include + self.unless = unless + } + + public func validate(name: String) throws { + try self.include?.forEach { + try $0.validate(name: "\(name).include[]") + } + try self.validate(self.include, name: "include", parent: name, max: 10) + try self.unless?.forEach { + try $0.validate(name: "\(name).unless[]") + } + try self.validate(self.unless, name: "unless", parent: name, max: 10) + } + + private enum CodingKeys: String, CodingKey { + case include = "Include" + case unless = "Unless" + } + } + + public struct ArchiveStringExpression: AWSEncodableShape & AWSDecodableShape { + /// The attribute of the email to evaluate. + public let evaluate: ArchiveStringToEvaluate + /// The operator to use when evaluating the string values. + public let `operator`: ArchiveStringOperator + /// The list of string values to evaluate the email attribute against. + public let values: [String] + + public init(evaluate: ArchiveStringToEvaluate, operator: ArchiveStringOperator, values: [String]) { + self.evaluate = evaluate + self.`operator` = `operator` + self.values = values + } + + public func validate(name: String) throws { + try self.validate(self.values, name: "values", parent: name, max: 10) + try self.validate(self.values, name: "values", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case evaluate = "Evaluate" + case `operator` = "Operator" + case values = "Values" + } + } + + public struct CreateAddonInstanceRequest: AWSEncodableShape { + /// The unique ID of a previously created subscription that an Add On instance is created for. You can only have one instance per subscription. + public let addonSubscriptionId: String + /// A unique token that Amazon SES uses to recognize subsequent retries of the same request. + public let clientToken: String? + /// The tags used to organize, track, or control access for the resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }. + public let tags: [Tag]? + + public init(addonSubscriptionId: String, clientToken: String? = CreateAddonInstanceRequest.idempotencyToken(), tags: [Tag]? = nil) { + self.addonSubscriptionId = addonSubscriptionId + self.clientToken = clientToken + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.addonSubscriptionId, name: "addonSubscriptionId", parent: name, max: 67) + try self.validate(self.addonSubscriptionId, name: "addonSubscriptionId", parent: name, min: 4) + try self.validate(self.addonSubscriptionId, name: "addonSubscriptionId", parent: name, pattern: "^as-[a-zA-Z0-9]{1,64}$") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case addonSubscriptionId = "AddonSubscriptionId" + case clientToken = "ClientToken" + case tags = "Tags" + } + } + + public struct CreateAddonInstanceResponse: AWSDecodableShape { + /// The unique ID of the Add On instance created by this API. + public let addonInstanceId: String + + public init(addonInstanceId: String) { + self.addonInstanceId = addonInstanceId + } + + private enum CodingKeys: String, CodingKey { + case addonInstanceId = "AddonInstanceId" + } + } + + public struct CreateAddonSubscriptionRequest: AWSEncodableShape { + /// The name of the Add On to subscribe to. You can only have one subscription for each Add On name. + public let addonName: String + /// A unique token that Amazon SES uses to recognize subsequent retries of the same request. + public let clientToken: String? + /// The tags used to organize, track, or control access for the resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }. + public let tags: [Tag]? + + public init(addonName: String, clientToken: String? = CreateAddonSubscriptionRequest.idempotencyToken(), tags: [Tag]? = nil) { + self.addonName = addonName + self.clientToken = clientToken + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case addonName = "AddonName" + case clientToken = "ClientToken" + case tags = "Tags" + } + } + + public struct CreateAddonSubscriptionResponse: AWSDecodableShape { + /// The unique ID of the Add On subscription created by this API. + public let addonSubscriptionId: String + + public init(addonSubscriptionId: String) { + self.addonSubscriptionId = addonSubscriptionId + } + + private enum CodingKeys: String, CodingKey { + case addonSubscriptionId = "AddonSubscriptionId" + } + } + + public struct CreateArchiveRequest: AWSEncodableShape { + /// A unique name for the new archive. + public let archiveName: String + /// A unique token Amazon SES uses to recognize retries of this request. + public let clientToken: String? + /// The Amazon Resource Name (ARN) of the KMS key for encrypting emails in the archive. + public let kmsKeyArn: String? + /// The period for retaining emails in the archive before automatic deletion. + public let retention: ArchiveRetention? + /// The tags used to organize, track, or control access for the resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }. + public let tags: [Tag]? + + public init(archiveName: String, clientToken: String? = CreateArchiveRequest.idempotencyToken(), kmsKeyArn: String? = nil, retention: ArchiveRetention? = nil, tags: [Tag]? = nil) { + self.archiveName = archiveName + self.clientToken = clientToken + self.kmsKeyArn = kmsKeyArn + self.retention = retention + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.archiveName, name: "archiveName", parent: name, max: 64) + try self.validate(self.archiveName, name: "archiveName", parent: name, min: 1) + try self.validate(self.archiveName, name: "archiveName", parent: name, pattern: "^[a-zA-Z0-9][a-zA-Z0-9_-]*[a-zA-Z0-9]$") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-z0-9-]{1,20}:[0-9]{12}:(key|alias)/.+$") + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case archiveName = "ArchiveName" + case clientToken = "ClientToken" + case kmsKeyArn = "KmsKeyArn" + case retention = "Retention" + case tags = "Tags" + } + } + + public struct CreateArchiveResponse: AWSDecodableShape { + /// The unique identifier for the newly created archive. + public let archiveId: String + + public init(archiveId: String) { + self.archiveId = archiveId + } + + private enum CodingKeys: String, CodingKey { + case archiveId = "ArchiveId" + } + } + + public struct CreateIngressPointRequest: AWSEncodableShape { + /// A unique token that Amazon SES uses to recognize subsequent retries of the same request. + public let clientToken: String? + /// If you choose an Authenticated ingress endpoint, you must configure either an SMTP password or a secret ARN. + public let ingressPointConfiguration: IngressPointConfiguration? + /// A user friendly name for an ingress endpoint resource. + public let ingressPointName: String + /// The identifier of an existing rule set that you attach to an ingress endpoint resource. + public let ruleSetId: String + /// The tags used to organize, track, or control access for the resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }. + public let tags: [Tag]? + /// The identifier of an existing traffic policy that you attach to an ingress endpoint resource. + public let trafficPolicyId: String + /// The type of the ingress endpoint to create. + public let type: IngressPointType + + public init(clientToken: String? = CreateIngressPointRequest.idempotencyToken(), ingressPointConfiguration: IngressPointConfiguration? = nil, ingressPointName: String, ruleSetId: String, tags: [Tag]? = nil, trafficPolicyId: String, type: IngressPointType) { + self.clientToken = clientToken + self.ingressPointConfiguration = ingressPointConfiguration + self.ingressPointName = ingressPointName + self.ruleSetId = ruleSetId + self.tags = tags + self.trafficPolicyId = trafficPolicyId + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.ingressPointConfiguration?.validate(name: "\(name).ingressPointConfiguration") + try self.validate(self.ingressPointName, name: "ingressPointName", parent: name, max: 63) + try self.validate(self.ingressPointName, name: "ingressPointName", parent: name, min: 3) + try self.validate(self.ingressPointName, name: "ingressPointName", parent: name, pattern: "^[A-Za-z0-9_\\-]+$") + try self.validate(self.ruleSetId, name: "ruleSetId", parent: name, max: 100) + try self.validate(self.ruleSetId, name: "ruleSetId", parent: name, min: 1) + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.trafficPolicyId, name: "trafficPolicyId", parent: name, max: 100) + try self.validate(self.trafficPolicyId, name: "trafficPolicyId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case ingressPointConfiguration = "IngressPointConfiguration" + case ingressPointName = "IngressPointName" + case ruleSetId = "RuleSetId" + case tags = "Tags" + case trafficPolicyId = "TrafficPolicyId" + case type = "Type" + } + } + + public struct CreateIngressPointResponse: AWSDecodableShape { + /// The unique identifier for a previously created ingress endpoint. + public let ingressPointId: String + + public init(ingressPointId: String) { + self.ingressPointId = ingressPointId + } + + private enum CodingKeys: String, CodingKey { + case ingressPointId = "IngressPointId" + } + } + + public struct CreateRelayRequest: AWSEncodableShape { + /// Authentication for the relay destination server—specify the secretARN where the SMTP credentials are stored. + public let authentication: RelayAuthentication + /// A unique token that Amazon SES uses to recognize subsequent retries of the same request. + public let clientToken: String? + /// The unique name of the relay resource. + public let relayName: String + /// The destination relay server address. + public let serverName: String + /// The destination relay server port. + public let serverPort: Int + /// The tags used to organize, track, or control access for the resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }. + public let tags: [Tag]? + + public init(authentication: RelayAuthentication, clientToken: String? = CreateRelayRequest.idempotencyToken(), relayName: String, serverName: String, serverPort: Int, tags: [Tag]? = nil) { + self.authentication = authentication + self.clientToken = clientToken + self.relayName = relayName + self.serverName = serverName + self.serverPort = serverPort + self.tags = tags + } + + public func validate(name: String) throws { + try self.authentication.validate(name: "\(name).authentication") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.relayName, name: "relayName", parent: name, max: 100) + try self.validate(self.relayName, name: "relayName", parent: name, min: 1) + try self.validate(self.relayName, name: "relayName", parent: name, pattern: "^[a-zA-Z0-9-_]+$") + try self.validate(self.serverName, name: "serverName", parent: name, max: 100) + try self.validate(self.serverName, name: "serverName", parent: name, min: 1) + try self.validate(self.serverName, name: "serverName", parent: name, pattern: "^[a-zA-Z0-9-\\.]+$") + try self.validate(self.serverPort, name: "serverPort", parent: name, max: 65535) + try self.validate(self.serverPort, name: "serverPort", parent: name, min: 1) + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case authentication = "Authentication" + case clientToken = "ClientToken" + case relayName = "RelayName" + case serverName = "ServerName" + case serverPort = "ServerPort" + case tags = "Tags" + } + } + + public struct CreateRelayResponse: AWSDecodableShape { + /// A unique identifier of the created relay resource. + public let relayId: String + + public init(relayId: String) { + self.relayId = relayId + } + + private enum CodingKeys: String, CodingKey { + case relayId = "RelayId" + } + } + + public struct CreateRuleSetRequest: AWSEncodableShape { + /// A unique token that Amazon SES uses to recognize subsequent retries of the same request. + public let clientToken: String? + /// Conditional rules that are evaluated for determining actions on email. + public let rules: [Rule] + /// A user-friendly name for the rule set. + public let ruleSetName: String + /// The tags used to organize, track, or control access for the resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }. + public let tags: [Tag]? + + public init(clientToken: String? = CreateRuleSetRequest.idempotencyToken(), rules: [Rule], ruleSetName: String, tags: [Tag]? = nil) { + self.clientToken = clientToken + self.rules = rules + self.ruleSetName = ruleSetName + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.rules.forEach { + try $0.validate(name: "\(name).rules[]") + } + try self.validate(self.rules, name: "rules", parent: name, max: 40) + try self.validate(self.ruleSetName, name: "ruleSetName", parent: name, max: 100) + try self.validate(self.ruleSetName, name: "ruleSetName", parent: name, min: 1) + try self.validate(self.ruleSetName, name: "ruleSetName", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case rules = "Rules" + case ruleSetName = "RuleSetName" + case tags = "Tags" + } + } + + public struct CreateRuleSetResponse: AWSDecodableShape { + /// The identifier of the created rule set. + public let ruleSetId: String + + public init(ruleSetId: String) { + self.ruleSetId = ruleSetId + } + + private enum CodingKeys: String, CodingKey { + case ruleSetId = "RuleSetId" + } + } + + public struct CreateTrafficPolicyRequest: AWSEncodableShape { + /// A unique token that Amazon SES uses to recognize subsequent retries of the same request. + public let clientToken: String? + /// Default action instructs the traffic policy to either Allow or Deny (block) messages that fall outside of (or not addressed by) the conditions of your policy statements + public let defaultAction: AcceptAction + /// The maximum message size in bytes of email which is allowed in by this traffic policy—anything larger will be blocked. + public let maxMessageSizeBytes: Int? + /// Conditional statements for filtering email traffic. + public let policyStatements: [PolicyStatement] + /// The tags used to organize, track, or control access for the resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }. + public let tags: [Tag]? + /// A user-friendly name for the traffic policy resource. + public let trafficPolicyName: String + + public init(clientToken: String? = CreateTrafficPolicyRequest.idempotencyToken(), defaultAction: AcceptAction, maxMessageSizeBytes: Int? = nil, policyStatements: [PolicyStatement], tags: [Tag]? = nil, trafficPolicyName: String) { + self.clientToken = clientToken + self.defaultAction = defaultAction + self.maxMessageSizeBytes = maxMessageSizeBytes + self.policyStatements = policyStatements + self.tags = tags + self.trafficPolicyName = trafficPolicyName + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.maxMessageSizeBytes, name: "maxMessageSizeBytes", parent: name, min: 1) + try self.policyStatements.forEach { + try $0.validate(name: "\(name).policyStatements[]") + } + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.trafficPolicyName, name: "trafficPolicyName", parent: name, max: 63) + try self.validate(self.trafficPolicyName, name: "trafficPolicyName", parent: name, min: 3) + try self.validate(self.trafficPolicyName, name: "trafficPolicyName", parent: name, pattern: "^[A-Za-z0-9_\\-]+$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case defaultAction = "DefaultAction" + case maxMessageSizeBytes = "MaxMessageSizeBytes" + case policyStatements = "PolicyStatements" + case tags = "Tags" + case trafficPolicyName = "TrafficPolicyName" + } + } + + public struct CreateTrafficPolicyResponse: AWSDecodableShape { + /// The identifier of the traffic policy resource. + public let trafficPolicyId: String + + public init(trafficPolicyId: String) { + self.trafficPolicyId = trafficPolicyId + } + + private enum CodingKeys: String, CodingKey { + case trafficPolicyId = "TrafficPolicyId" + } + } + + public struct DeleteAddonInstanceRequest: AWSEncodableShape { + /// The Add On instance ID to delete. + public let addonInstanceId: String + + public init(addonInstanceId: String) { + self.addonInstanceId = addonInstanceId + } + + public func validate(name: String) throws { + try self.validate(self.addonInstanceId, name: "addonInstanceId", parent: name, max: 67) + try self.validate(self.addonInstanceId, name: "addonInstanceId", parent: name, min: 4) + try self.validate(self.addonInstanceId, name: "addonInstanceId", parent: name, pattern: "^ai-[a-zA-Z0-9]{1,64}$") + } + + private enum CodingKeys: String, CodingKey { + case addonInstanceId = "AddonInstanceId" + } + } + + public struct DeleteAddonInstanceResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteAddonSubscriptionRequest: AWSEncodableShape { + /// The Add On subscription ID to delete. + public let addonSubscriptionId: String + + public init(addonSubscriptionId: String) { + self.addonSubscriptionId = addonSubscriptionId + } + + public func validate(name: String) throws { + try self.validate(self.addonSubscriptionId, name: "addonSubscriptionId", parent: name, max: 67) + try self.validate(self.addonSubscriptionId, name: "addonSubscriptionId", parent: name, min: 4) + try self.validate(self.addonSubscriptionId, name: "addonSubscriptionId", parent: name, pattern: "^as-[a-zA-Z0-9]{1,64}$") + } + + private enum CodingKeys: String, CodingKey { + case addonSubscriptionId = "AddonSubscriptionId" + } + } + + public struct DeleteAddonSubscriptionResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteArchiveRequest: AWSEncodableShape { + /// The identifier of the archive to delete. + public let archiveId: String + + public init(archiveId: String) { + self.archiveId = archiveId + } + + public func validate(name: String) throws { + try self.validate(self.archiveId, name: "archiveId", parent: name, max: 66) + try self.validate(self.archiveId, name: "archiveId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case archiveId = "ArchiveId" + } + } + + public struct DeleteArchiveResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteIngressPointRequest: AWSEncodableShape { + /// The identifier of the ingress endpoint resource that you want to delete. + public let ingressPointId: String + + public init(ingressPointId: String) { + self.ingressPointId = ingressPointId + } + + public func validate(name: String) throws { + try self.validate(self.ingressPointId, name: "ingressPointId", parent: name, max: 100) + try self.validate(self.ingressPointId, name: "ingressPointId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case ingressPointId = "IngressPointId" + } + } + + public struct DeleteIngressPointResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteRelayRequest: AWSEncodableShape { + /// The unique relay identifier. + public let relayId: String + + public init(relayId: String) { + self.relayId = relayId + } + + public func validate(name: String) throws { + try self.validate(self.relayId, name: "relayId", parent: name, max: 100) + try self.validate(self.relayId, name: "relayId", parent: name, min: 1) + try self.validate(self.relayId, name: "relayId", parent: name, pattern: "^[a-zA-Z0-9-]+$") + } + + private enum CodingKeys: String, CodingKey { + case relayId = "RelayId" + } + } + + public struct DeleteRelayResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteRuleSetRequest: AWSEncodableShape { + /// The identifier of an existing rule set resource to delete. + public let ruleSetId: String + + public init(ruleSetId: String) { + self.ruleSetId = ruleSetId + } + + public func validate(name: String) throws { + try self.validate(self.ruleSetId, name: "ruleSetId", parent: name, max: 100) + try self.validate(self.ruleSetId, name: "ruleSetId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case ruleSetId = "RuleSetId" + } + } + + public struct DeleteRuleSetResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteTrafficPolicyRequest: AWSEncodableShape { + /// The identifier of the traffic policy that you want to delete. + public let trafficPolicyId: String + + public init(trafficPolicyId: String) { + self.trafficPolicyId = trafficPolicyId + } + + public func validate(name: String) throws { + try self.validate(self.trafficPolicyId, name: "trafficPolicyId", parent: name, max: 100) + try self.validate(self.trafficPolicyId, name: "trafficPolicyId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case trafficPolicyId = "TrafficPolicyId" + } + } + + public struct DeleteTrafficPolicyResponse: AWSDecodableShape { + public init() {} + } + + public struct DeliverToMailboxAction: AWSEncodableShape & AWSDecodableShape { + /// A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the mailbox ARN is no longer valid. + public let actionFailurePolicy: ActionFailurePolicy? + /// The Amazon Resource Name (ARN) of a WorkMail organization to deliver the email to. + public let mailboxArn: String + /// The Amazon Resource Name (ARN) of an IAM role to use to execute this action. The role must have access to the workmail:DeliverToMailbox API. + public let roleArn: String + + public init(actionFailurePolicy: ActionFailurePolicy? = nil, mailboxArn: String, roleArn: String) { + self.actionFailurePolicy = actionFailurePolicy + self.mailboxArn = mailboxArn + self.roleArn = roleArn + } + + public func validate(name: String) throws { + try self.validate(self.mailboxArn, name: "mailboxArn", parent: name, max: 2048) + try self.validate(self.mailboxArn, name: "mailboxArn", parent: name, min: 1) + try self.validate(self.mailboxArn, name: "mailboxArn", parent: name, pattern: "^[a-zA-Z0-9:_/+=,@.#-]+$") + try self.validate(self.roleArn, name: "roleArn", parent: name, max: 2048) + try self.validate(self.roleArn, name: "roleArn", parent: name, min: 20) + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^[a-zA-Z0-9:_/+=,@.#-]+$") + } + + private enum CodingKeys: String, CodingKey { + case actionFailurePolicy = "ActionFailurePolicy" + case mailboxArn = "MailboxArn" + case roleArn = "RoleArn" + } + } + + public struct DropAction: AWSEncodableShape & AWSDecodableShape { + public init() {} + } + + public struct ExportStatus: AWSDecodableShape { + /// The timestamp of when the export job completed (if finished). + public let completionTimestamp: Date? + /// An error message if the export job failed. + public let errorMessage: String? + /// The current state of the export job. + public let state: ExportState? + /// The timestamp of when the export job was submitted. + public let submissionTimestamp: Date? + + public init(completionTimestamp: Date? = nil, errorMessage: String? = nil, state: ExportState? = nil, submissionTimestamp: Date? = nil) { + self.completionTimestamp = completionTimestamp + self.errorMessage = errorMessage + self.state = state + self.submissionTimestamp = submissionTimestamp + } + + private enum CodingKeys: String, CodingKey { + case completionTimestamp = "CompletionTimestamp" + case errorMessage = "ErrorMessage" + case state = "State" + case submissionTimestamp = "SubmissionTimestamp" + } + } + + public struct ExportSummary: AWSDecodableShape { + /// The unique identifier of the export job. + public let exportId: String? + /// The current status of the export job. + public let status: ExportStatus? + + public init(exportId: String? = nil, status: ExportStatus? = nil) { + self.exportId = exportId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case exportId = "ExportId" + case status = "Status" + } + } + + public struct GetAddonInstanceRequest: AWSEncodableShape { + /// The Add On instance ID to retrieve information for. + public let addonInstanceId: String + + public init(addonInstanceId: String) { + self.addonInstanceId = addonInstanceId + } + + public func validate(name: String) throws { + try self.validate(self.addonInstanceId, name: "addonInstanceId", parent: name, max: 67) + try self.validate(self.addonInstanceId, name: "addonInstanceId", parent: name, min: 4) + try self.validate(self.addonInstanceId, name: "addonInstanceId", parent: name, pattern: "^ai-[a-zA-Z0-9]{1,64}$") + } + + private enum CodingKeys: String, CodingKey { + case addonInstanceId = "AddonInstanceId" + } + } + + public struct GetAddonInstanceResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Add On instance. + public let addonInstanceArn: String? + /// The name of the Add On provider associated to the subscription of the instance. + public let addonName: String? + /// The subscription ID associated to the instance. + public let addonSubscriptionId: String? + /// The timestamp of when the Add On instance was created. + public let createdTimestamp: Date? + + public init(addonInstanceArn: String? = nil, addonName: String? = nil, addonSubscriptionId: String? = nil, createdTimestamp: Date? = nil) { + self.addonInstanceArn = addonInstanceArn + self.addonName = addonName + self.addonSubscriptionId = addonSubscriptionId + self.createdTimestamp = createdTimestamp + } + + private enum CodingKeys: String, CodingKey { + case addonInstanceArn = "AddonInstanceArn" + case addonName = "AddonName" + case addonSubscriptionId = "AddonSubscriptionId" + case createdTimestamp = "CreatedTimestamp" + } + } + + public struct GetAddonSubscriptionRequest: AWSEncodableShape { + /// The Add On subscription ID to retrieve information for. + public let addonSubscriptionId: String + + public init(addonSubscriptionId: String) { + self.addonSubscriptionId = addonSubscriptionId + } + + public func validate(name: String) throws { + try self.validate(self.addonSubscriptionId, name: "addonSubscriptionId", parent: name, max: 67) + try self.validate(self.addonSubscriptionId, name: "addonSubscriptionId", parent: name, min: 4) + try self.validate(self.addonSubscriptionId, name: "addonSubscriptionId", parent: name, pattern: "^as-[a-zA-Z0-9]{1,64}$") + } + + private enum CodingKeys: String, CodingKey { + case addonSubscriptionId = "AddonSubscriptionId" + } + } + + public struct GetAddonSubscriptionResponse: AWSDecodableShape { + /// The name of the Add On for the subscription. + public let addonName: String? + /// Amazon Resource Name (ARN) for the subscription. + public let addonSubscriptionArn: String? + /// The timestamp of when the Add On subscription was created. + public let createdTimestamp: Date? + + public init(addonName: String? = nil, addonSubscriptionArn: String? = nil, createdTimestamp: Date? = nil) { + self.addonName = addonName + self.addonSubscriptionArn = addonSubscriptionArn + self.createdTimestamp = createdTimestamp + } + + private enum CodingKeys: String, CodingKey { + case addonName = "AddonName" + case addonSubscriptionArn = "AddonSubscriptionArn" + case createdTimestamp = "CreatedTimestamp" + } + } + + public struct GetArchiveExportRequest: AWSEncodableShape { + /// The identifier of the export job to get details for. + public let exportId: String + + public init(exportId: String) { + self.exportId = exportId + } + + public func validate(name: String) throws { + try self.validate(self.exportId, name: "exportId", parent: name, max: 64) + try self.validate(self.exportId, name: "exportId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case exportId = "ExportId" + } + } + + public struct GetArchiveExportResponse: AWSDecodableShape { + /// The identifier of the archive the email export was performed from. + public let archiveId: String? + /// Where the exported emails are being delivered. + public let exportDestinationConfiguration: ExportDestinationConfiguration? + /// The criteria used to filter emails included in the export. + public let filters: ArchiveFilters? + /// The start of the timestamp range the exported emails cover. + public let fromTimestamp: Date? + /// The maximum number of email items included in the export. + public let maxResults: Int? + /// The current status of the export job. + public let status: ExportStatus? + /// The end of the date range the exported emails cover. + public let toTimestamp: Date? + + public init(archiveId: String? = nil, exportDestinationConfiguration: ExportDestinationConfiguration? = nil, filters: ArchiveFilters? = nil, fromTimestamp: Date? = nil, maxResults: Int? = nil, status: ExportStatus? = nil, toTimestamp: Date? = nil) { + self.archiveId = archiveId + self.exportDestinationConfiguration = exportDestinationConfiguration + self.filters = filters + self.fromTimestamp = fromTimestamp + self.maxResults = maxResults + self.status = status + self.toTimestamp = toTimestamp + } + + private enum CodingKeys: String, CodingKey { + case archiveId = "ArchiveId" + case exportDestinationConfiguration = "ExportDestinationConfiguration" + case filters = "Filters" + case fromTimestamp = "FromTimestamp" + case maxResults = "MaxResults" + case status = "Status" + case toTimestamp = "ToTimestamp" + } + } + + public struct GetArchiveMessageContentRequest: AWSEncodableShape { + /// The unique identifier of the archived email message. + public let archivedMessageId: String + + public init(archivedMessageId: String) { + self.archivedMessageId = archivedMessageId + } + + private enum CodingKeys: String, CodingKey { + case archivedMessageId = "ArchivedMessageId" + } + } + + public struct GetArchiveMessageContentResponse: AWSDecodableShape { + /// The textual body content of the email message. + public let body: MessageBody? + + public init(body: MessageBody? = nil) { + self.body = body + } + + private enum CodingKeys: String, CodingKey { + case body = "Body" + } + } + + public struct GetArchiveMessageRequest: AWSEncodableShape { + /// The unique identifier of the archived email message. + public let archivedMessageId: String + + public init(archivedMessageId: String) { + self.archivedMessageId = archivedMessageId + } + + private enum CodingKeys: String, CodingKey { + case archivedMessageId = "ArchivedMessageId" + } + } + + public struct GetArchiveMessageResponse: AWSDecodableShape { + /// A pre-signed URL to temporarily download the full message content. + public let messageDownloadLink: String? + + public init(messageDownloadLink: String? = nil) { + self.messageDownloadLink = messageDownloadLink + } + + private enum CodingKeys: String, CodingKey { + case messageDownloadLink = "MessageDownloadLink" + } + } + + public struct GetArchiveRequest: AWSEncodableShape { + /// The identifier of the archive to retrieve. + public let archiveId: String + + public init(archiveId: String) { + self.archiveId = archiveId + } + + public func validate(name: String) throws { + try self.validate(self.archiveId, name: "archiveId", parent: name, max: 66) + try self.validate(self.archiveId, name: "archiveId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case archiveId = "ArchiveId" + } + } + + public struct GetArchiveResponse: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the archive. + public let archiveArn: String + /// The unique identifier of the archive. + public let archiveId: String + /// The unique name assigned to the archive. + public let archiveName: String + /// The current state of the archive: ACTIVE – The archive is ready and available for use. PENDING_DELETION – The archive has been marked for deletion and will be permanently deleted in 30 days. No further modifications can be made in this state. + public let archiveState: ArchiveState + /// The timestamp of when the archive was created. + public let createdTimestamp: Date? + /// The Amazon Resource Name (ARN) of the KMS key used to encrypt the archive. + public let kmsKeyArn: String? + /// The timestamp of when the archive was modified. + public let lastUpdatedTimestamp: Date? + /// The retention period for emails in this archive. + public let retention: ArchiveRetention + + public init(archiveArn: String, archiveId: String, archiveName: String, archiveState: ArchiveState, createdTimestamp: Date? = nil, kmsKeyArn: String? = nil, lastUpdatedTimestamp: Date? = nil, retention: ArchiveRetention) { + self.archiveArn = archiveArn + self.archiveId = archiveId + self.archiveName = archiveName + self.archiveState = archiveState + self.createdTimestamp = createdTimestamp + self.kmsKeyArn = kmsKeyArn + self.lastUpdatedTimestamp = lastUpdatedTimestamp + self.retention = retention + } + + private enum CodingKeys: String, CodingKey { + case archiveArn = "ArchiveArn" + case archiveId = "ArchiveId" + case archiveName = "ArchiveName" + case archiveState = "ArchiveState" + case createdTimestamp = "CreatedTimestamp" + case kmsKeyArn = "KmsKeyArn" + case lastUpdatedTimestamp = "LastUpdatedTimestamp" + case retention = "Retention" + } + } + + public struct GetArchiveSearchRequest: AWSEncodableShape { + /// The identifier of the search job to get details for. + public let searchId: String + + public init(searchId: String) { + self.searchId = searchId + } + + public func validate(name: String) throws { + try self.validate(self.searchId, name: "searchId", parent: name, max: 64) + try self.validate(self.searchId, name: "searchId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case searchId = "SearchId" + } + } + + public struct GetArchiveSearchResponse: AWSDecodableShape { + /// The identifier of the archive the email search was performed in. + public let archiveId: String? + /// The criteria used to filter emails included in the search. + public let filters: ArchiveFilters? + /// The start timestamp of the range the searched emails cover. + public let fromTimestamp: Date? + /// The maximum number of search results to return. + public let maxResults: Int? + /// The current status of the search job. + public let status: SearchStatus? + /// The end timestamp of the range the searched emails cover. + public let toTimestamp: Date? + + public init(archiveId: String? = nil, filters: ArchiveFilters? = nil, fromTimestamp: Date? = nil, maxResults: Int? = nil, status: SearchStatus? = nil, toTimestamp: Date? = nil) { + self.archiveId = archiveId + self.filters = filters + self.fromTimestamp = fromTimestamp + self.maxResults = maxResults + self.status = status + self.toTimestamp = toTimestamp + } + + private enum CodingKeys: String, CodingKey { + case archiveId = "ArchiveId" + case filters = "Filters" + case fromTimestamp = "FromTimestamp" + case maxResults = "MaxResults" + case status = "Status" + case toTimestamp = "ToTimestamp" + } + } + + public struct GetArchiveSearchResultsRequest: AWSEncodableShape { + /// The identifier of the completed search job. + public let searchId: String + + public init(searchId: String) { + self.searchId = searchId + } + + public func validate(name: String) throws { + try self.validate(self.searchId, name: "searchId", parent: name, max: 64) + try self.validate(self.searchId, name: "searchId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case searchId = "SearchId" + } + } + + public struct GetArchiveSearchResultsResponse: AWSDecodableShape { + /// The list of email result objects matching the search criteria. + public let rows: [Row]? + + public init(rows: [Row]? = nil) { + self.rows = rows + } + + private enum CodingKeys: String, CodingKey { + case rows = "Rows" + } + } + + public struct GetIngressPointRequest: AWSEncodableShape { + /// The identifier of an ingress endpoint. + public let ingressPointId: String + + public init(ingressPointId: String) { + self.ingressPointId = ingressPointId + } + + public func validate(name: String) throws { + try self.validate(self.ingressPointId, name: "ingressPointId", parent: name, max: 100) + try self.validate(self.ingressPointId, name: "ingressPointId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case ingressPointId = "IngressPointId" + } + } + + public struct GetIngressPointResponse: AWSDecodableShape { + /// The DNS A Record that identifies your ingress endpoint. Configure your DNS Mail Exchange (MX) record with this value to route emails to Mail Manager. + public let aRecord: String? + /// The timestamp of when the ingress endpoint was created. + public let createdTimestamp: Date? + /// The Amazon Resource Name (ARN) of the ingress endpoint resource. + public let ingressPointArn: String? + /// The authentication configuration of the ingress endpoint resource. + public let ingressPointAuthConfiguration: IngressPointAuthConfiguration? + /// The identifier of an ingress endpoint resource. + public let ingressPointId: String + /// A user friendly name for the ingress endpoint. + public let ingressPointName: String + /// The timestamp of when the ingress endpoint was last updated. + public let lastUpdatedTimestamp: Date? + /// The identifier of a rule set resource associated with the ingress endpoint. + public let ruleSetId: String? + /// The status of the ingress endpoint resource. + public let status: IngressPointStatus? + /// The identifier of the traffic policy resource associated with the ingress endpoint. + public let trafficPolicyId: String? + /// The type of ingress endpoint. + public let type: IngressPointType? + + public init(aRecord: String? = nil, createdTimestamp: Date? = nil, ingressPointArn: String? = nil, ingressPointAuthConfiguration: IngressPointAuthConfiguration? = nil, ingressPointId: String, ingressPointName: String, lastUpdatedTimestamp: Date? = nil, ruleSetId: String? = nil, status: IngressPointStatus? = nil, trafficPolicyId: String? = nil, type: IngressPointType? = nil) { + self.aRecord = aRecord + self.createdTimestamp = createdTimestamp + self.ingressPointArn = ingressPointArn + self.ingressPointAuthConfiguration = ingressPointAuthConfiguration + self.ingressPointId = ingressPointId + self.ingressPointName = ingressPointName + self.lastUpdatedTimestamp = lastUpdatedTimestamp + self.ruleSetId = ruleSetId + self.status = status + self.trafficPolicyId = trafficPolicyId + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case aRecord = "ARecord" + case createdTimestamp = "CreatedTimestamp" + case ingressPointArn = "IngressPointArn" + case ingressPointAuthConfiguration = "IngressPointAuthConfiguration" + case ingressPointId = "IngressPointId" + case ingressPointName = "IngressPointName" + case lastUpdatedTimestamp = "LastUpdatedTimestamp" + case ruleSetId = "RuleSetId" + case status = "Status" + case trafficPolicyId = "TrafficPolicyId" + case type = "Type" + } + } + + public struct GetRelayRequest: AWSEncodableShape { + /// A unique relay identifier. + public let relayId: String + + public init(relayId: String) { + self.relayId = relayId + } + + public func validate(name: String) throws { + try self.validate(self.relayId, name: "relayId", parent: name, max: 100) + try self.validate(self.relayId, name: "relayId", parent: name, min: 1) + try self.validate(self.relayId, name: "relayId", parent: name, pattern: "^[a-zA-Z0-9-]+$") + } + + private enum CodingKeys: String, CodingKey { + case relayId = "RelayId" + } + } + + public struct GetRelayResponse: AWSDecodableShape { + /// The authentication attribute—contains the secret ARN where the customer relay server credentials are stored. + public let authentication: RelayAuthentication? + /// The timestamp of when the relay was created. + public let createdTimestamp: Date? + /// The timestamp of when relay was last updated. + public let lastModifiedTimestamp: Date? + /// The Amazon Resource Name (ARN) of the relay. + public let relayArn: String? + /// The unique relay identifier. + public let relayId: String + /// The unique name of the relay. + public let relayName: String? + /// The destination relay server address. + public let serverName: String? + /// The destination relay server port. + public let serverPort: Int? + + public init(authentication: RelayAuthentication? = nil, createdTimestamp: Date? = nil, lastModifiedTimestamp: Date? = nil, relayArn: String? = nil, relayId: String, relayName: String? = nil, serverName: String? = nil, serverPort: Int? = nil) { + self.authentication = authentication + self.createdTimestamp = createdTimestamp + self.lastModifiedTimestamp = lastModifiedTimestamp + self.relayArn = relayArn + self.relayId = relayId + self.relayName = relayName + self.serverName = serverName + self.serverPort = serverPort + } + + private enum CodingKeys: String, CodingKey { + case authentication = "Authentication" + case createdTimestamp = "CreatedTimestamp" + case lastModifiedTimestamp = "LastModifiedTimestamp" + case relayArn = "RelayArn" + case relayId = "RelayId" + case relayName = "RelayName" + case serverName = "ServerName" + case serverPort = "ServerPort" + } + } + + public struct GetRuleSetRequest: AWSEncodableShape { + /// The identifier of an existing rule set to be retrieved. + public let ruleSetId: String + + public init(ruleSetId: String) { + self.ruleSetId = ruleSetId + } + + public func validate(name: String) throws { + try self.validate(self.ruleSetId, name: "ruleSetId", parent: name, max: 100) + try self.validate(self.ruleSetId, name: "ruleSetId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case ruleSetId = "RuleSetId" + } + } + + public struct GetRuleSetResponse: AWSDecodableShape { + /// The date of when then rule set was created. + public let createdDate: Date + /// The date of when the rule set was last modified. + public let lastModificationDate: Date + /// The rules contained in the rule set. + public let rules: [Rule] + /// The Amazon Resource Name (ARN) of the rule set resource. + public let ruleSetArn: String + /// The identifier of the rule set resource. + public let ruleSetId: String + /// A user-friendly name for the rule set resource. + public let ruleSetName: String + + public init(createdDate: Date, lastModificationDate: Date, rules: [Rule], ruleSetArn: String, ruleSetId: String, ruleSetName: String) { + self.createdDate = createdDate + self.lastModificationDate = lastModificationDate + self.rules = rules + self.ruleSetArn = ruleSetArn + self.ruleSetId = ruleSetId + self.ruleSetName = ruleSetName + } + + private enum CodingKeys: String, CodingKey { + case createdDate = "CreatedDate" + case lastModificationDate = "LastModificationDate" + case rules = "Rules" + case ruleSetArn = "RuleSetArn" + case ruleSetId = "RuleSetId" + case ruleSetName = "RuleSetName" + } + } + + public struct GetTrafficPolicyRequest: AWSEncodableShape { + /// The identifier of the traffic policy resource. + public let trafficPolicyId: String + + public init(trafficPolicyId: String) { + self.trafficPolicyId = trafficPolicyId + } + + public func validate(name: String) throws { + try self.validate(self.trafficPolicyId, name: "trafficPolicyId", parent: name, max: 100) + try self.validate(self.trafficPolicyId, name: "trafficPolicyId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case trafficPolicyId = "TrafficPolicyId" + } + } + + public struct GetTrafficPolicyResponse: AWSDecodableShape { + /// The timestamp of when the traffic policy was created. + public let createdTimestamp: Date? + /// The default action of the traffic policy. + public let defaultAction: AcceptAction? + /// The timestamp of when the traffic policy was last updated. + public let lastUpdatedTimestamp: Date? + /// The maximum message size in bytes of email which is allowed in by this traffic policy—anything larger will be blocked. + public let maxMessageSizeBytes: Int? + /// The list of conditions which are in the traffic policy resource. + public let policyStatements: [PolicyStatement]? + /// The Amazon Resource Name (ARN) of the traffic policy resource. + public let trafficPolicyArn: String? + /// The identifier of the traffic policy resource. + public let trafficPolicyId: String + /// A user-friendly name for the traffic policy resource. + public let trafficPolicyName: String + + public init(createdTimestamp: Date? = nil, defaultAction: AcceptAction? = nil, lastUpdatedTimestamp: Date? = nil, maxMessageSizeBytes: Int? = nil, policyStatements: [PolicyStatement]? = nil, trafficPolicyArn: String? = nil, trafficPolicyId: String, trafficPolicyName: String) { + self.createdTimestamp = createdTimestamp + self.defaultAction = defaultAction + self.lastUpdatedTimestamp = lastUpdatedTimestamp + self.maxMessageSizeBytes = maxMessageSizeBytes + self.policyStatements = policyStatements + self.trafficPolicyArn = trafficPolicyArn + self.trafficPolicyId = trafficPolicyId + self.trafficPolicyName = trafficPolicyName + } + + private enum CodingKeys: String, CodingKey { + case createdTimestamp = "CreatedTimestamp" + case defaultAction = "DefaultAction" + case lastUpdatedTimestamp = "LastUpdatedTimestamp" + case maxMessageSizeBytes = "MaxMessageSizeBytes" + case policyStatements = "PolicyStatements" + case trafficPolicyArn = "TrafficPolicyArn" + case trafficPolicyId = "TrafficPolicyId" + case trafficPolicyName = "TrafficPolicyName" + } + } + + public struct IngressAnalysis: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of an Add On. + public let analyzer: String + /// The returned value from an Add On. + public let resultField: String + + public init(analyzer: String, resultField: String) { + self.analyzer = analyzer + self.resultField = resultField + } + + public func validate(name: String) throws { + try self.validate(self.analyzer, name: "analyzer", parent: name, pattern: "^[a-zA-Z0-9:_/+=,@.#-]+$") + try self.validate(self.resultField, name: "resultField", parent: name, max: 256) + try self.validate(self.resultField, name: "resultField", parent: name, min: 1) + try self.validate(self.resultField, name: "resultField", parent: name, pattern: "^[\\sa-zA-Z0-9_]+$") + } + + private enum CodingKeys: String, CodingKey { + case analyzer = "Analyzer" + case resultField = "ResultField" + } + } + + public struct IngressBooleanExpression: AWSEncodableShape & AWSDecodableShape { + /// The operand on which to perform a boolean condition operation. + public let evaluate: IngressBooleanToEvaluate + /// The matching operator for a boolean condition expression. + public let `operator`: IngressBooleanOperator + + public init(evaluate: IngressBooleanToEvaluate, operator: IngressBooleanOperator) { + self.evaluate = evaluate + self.`operator` = `operator` + } + + public func validate(name: String) throws { + try self.evaluate.validate(name: "\(name).evaluate") + } + + private enum CodingKeys: String, CodingKey { + case evaluate = "Evaluate" + case `operator` = "Operator" + } + } + + public struct IngressIpv4Expression: AWSEncodableShape & AWSDecodableShape { + /// The left hand side argument of an IP condition expression. + public let evaluate: IngressIpToEvaluate + /// The matching operator for an IP condition expression. + public let `operator`: IngressIpOperator + /// The right hand side argument of an IP condition expression. + public let values: [String] + + public init(evaluate: IngressIpToEvaluate, operator: IngressIpOperator, values: [String]) { + self.evaluate = evaluate + self.`operator` = `operator` + self.values = values + } + + public func validate(name: String) throws { + try self.values.forEach { + try validate($0, name: "values[]", parent: name, pattern: "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/([0-9]|[12][0-9]|3[0-2])$") + } + } + + private enum CodingKeys: String, CodingKey { + case evaluate = "Evaluate" + case `operator` = "Operator" + case values = "Values" + } + } + + public struct IngressPoint: AWSDecodableShape { + /// The DNS A Record that identifies your ingress endpoint. Configure your DNS Mail Exchange (MX) record with this value to route emails to Mail Manager. + public let aRecord: String? + /// The identifier of the ingress endpoint resource. + public let ingressPointId: String + /// A user friendly name for the ingress endpoint resource. + public let ingressPointName: String + /// The status of the ingress endpoint resource. + public let status: IngressPointStatus + /// The type of ingress endpoint resource. + public let type: IngressPointType + + public init(aRecord: String? = nil, ingressPointId: String, ingressPointName: String, status: IngressPointStatus, type: IngressPointType) { + self.aRecord = aRecord + self.ingressPointId = ingressPointId + self.ingressPointName = ingressPointName + self.status = status + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case aRecord = "ARecord" + case ingressPointId = "IngressPointId" + case ingressPointName = "IngressPointName" + case status = "Status" + case type = "Type" + } + } + + public struct IngressPointAuthConfiguration: AWSDecodableShape { + /// The ingress endpoint password configuration for the ingress endpoint resource. + public let ingressPointPasswordConfiguration: IngressPointPasswordConfiguration? + /// The ingress endpoint SecretsManager::Secret ARN configuration for the ingress endpoint resource. + public let secretArn: String? + + public init(ingressPointPasswordConfiguration: IngressPointPasswordConfiguration? = nil, secretArn: String? = nil) { + self.ingressPointPasswordConfiguration = ingressPointPasswordConfiguration + self.secretArn = secretArn + } + + private enum CodingKeys: String, CodingKey { + case ingressPointPasswordConfiguration = "IngressPointPasswordConfiguration" + case secretArn = "SecretArn" + } + } + + public struct IngressPointPasswordConfiguration: AWSDecodableShape { + /// The previous password expiry timestamp of the ingress endpoint resource. + public let previousSmtpPasswordExpiryTimestamp: Date? + /// The previous password version of the ingress endpoint resource. + public let previousSmtpPasswordVersion: String? + /// The current password expiry timestamp of the ingress endpoint resource. + public let smtpPasswordVersion: String? + + public init(previousSmtpPasswordExpiryTimestamp: Date? = nil, previousSmtpPasswordVersion: String? = nil, smtpPasswordVersion: String? = nil) { + self.previousSmtpPasswordExpiryTimestamp = previousSmtpPasswordExpiryTimestamp + self.previousSmtpPasswordVersion = previousSmtpPasswordVersion + self.smtpPasswordVersion = smtpPasswordVersion + } + + private enum CodingKeys: String, CodingKey { + case previousSmtpPasswordExpiryTimestamp = "PreviousSmtpPasswordExpiryTimestamp" + case previousSmtpPasswordVersion = "PreviousSmtpPasswordVersion" + case smtpPasswordVersion = "SmtpPasswordVersion" + } + } + + public struct IngressStringExpression: AWSEncodableShape & AWSDecodableShape { + /// The left hand side argument of a string condition expression. + public let evaluate: IngressStringToEvaluate + /// The matching operator for a string condition expression. + public let `operator`: IngressStringOperator + /// The right hand side argument of a string condition expression. + public let values: [String] + + public init(evaluate: IngressStringToEvaluate, operator: IngressStringOperator, values: [String]) { + self.evaluate = evaluate + self.`operator` = `operator` + self.values = values + } + + private enum CodingKeys: String, CodingKey { + case evaluate = "Evaluate" + case `operator` = "Operator" + case values = "Values" + } + } + + public struct IngressTlsProtocolExpression: AWSEncodableShape & AWSDecodableShape { + /// The left hand side argument of a TLS condition expression. + public let evaluate: IngressTlsProtocolToEvaluate + /// The matching operator for a TLS condition expression. + public let `operator`: IngressTlsProtocolOperator + /// The right hand side argument of a TLS condition expression. + public let value: IngressTlsProtocolAttribute + + public init(evaluate: IngressTlsProtocolToEvaluate, operator: IngressTlsProtocolOperator, value: IngressTlsProtocolAttribute) { + self.evaluate = evaluate + self.`operator` = `operator` + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case evaluate = "Evaluate" + case `operator` = "Operator" + case value = "Value" + } + } + + public struct ListAddonInstancesRequest: AWSEncodableShape { + /// If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results. + public let nextToken: String? + /// The maximum number of ingress endpoint resources that are returned per call. You can use NextToken to obtain further ingress endpoints. + public let pageSize: Int? + + public init(nextToken: String? = nil, pageSize: Int? = nil) { + self.nextToken = nextToken + self.pageSize = pageSize + } + + public func validate(name: String) throws { + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.pageSize, name: "pageSize", parent: name, max: 50) + try self.validate(self.pageSize, name: "pageSize", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case pageSize = "PageSize" + } + } + + public struct ListAddonInstancesResponse: AWSDecodableShape { + /// The list of ingress endpoints. + public let addonInstances: [AddonInstance]? + /// If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. + public let nextToken: String? + + public init(addonInstances: [AddonInstance]? = nil, nextToken: String? = nil) { + self.addonInstances = addonInstances + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case addonInstances = "AddonInstances" + case nextToken = "NextToken" + } + } + + public struct ListAddonSubscriptionsRequest: AWSEncodableShape { + /// If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results. + public let nextToken: String? + /// The maximum number of ingress endpoint resources that are returned per call. You can use NextToken to obtain further ingress endpoints. + public let pageSize: Int? + + public init(nextToken: String? = nil, pageSize: Int? = nil) { + self.nextToken = nextToken + self.pageSize = pageSize + } + + public func validate(name: String) throws { + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.pageSize, name: "pageSize", parent: name, max: 50) + try self.validate(self.pageSize, name: "pageSize", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case pageSize = "PageSize" + } + } + + public struct ListAddonSubscriptionsResponse: AWSDecodableShape { + /// The list of ingress endpoints. + public let addonSubscriptions: [AddonSubscription]? + /// If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. + public let nextToken: String? + + public init(addonSubscriptions: [AddonSubscription]? = nil, nextToken: String? = nil) { + self.addonSubscriptions = addonSubscriptions + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case addonSubscriptions = "AddonSubscriptions" + case nextToken = "NextToken" + } + } + + public struct ListArchiveExportsRequest: AWSEncodableShape { + /// The identifier of the archive. + public let archiveId: String + /// If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. + public let nextToken: String? + /// The maximum number of archive export jobs that are returned per call. You can use NextToken to obtain further pages of archives. + public let pageSize: Int? + + public init(archiveId: String, nextToken: String? = nil, pageSize: Int? = nil) { + self.archiveId = archiveId + self.nextToken = nextToken + self.pageSize = pageSize + } + + public func validate(name: String) throws { + try self.validate(self.archiveId, name: "archiveId", parent: name, max: 66) + try self.validate(self.archiveId, name: "archiveId", parent: name, min: 3) + try self.validate(self.archiveId, name: "archiveId", parent: name, pattern: "^a-[\\w]{1,64}$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.pageSize, name: "pageSize", parent: name, max: 50) + try self.validate(self.pageSize, name: "pageSize", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case archiveId = "ArchiveId" + case nextToken = "NextToken" + case pageSize = "PageSize" + } + } + + public struct ListArchiveExportsResponse: AWSDecodableShape { + /// The list of export job identifiers and statuses. + public let exports: [ExportSummary]? + /// If present, use to retrieve the next page of results. + public let nextToken: String? + + public init(exports: [ExportSummary]? = nil, nextToken: String? = nil) { + self.exports = exports + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case exports = "Exports" + case nextToken = "NextToken" + } + } + + public struct ListArchiveSearchesRequest: AWSEncodableShape { + /// The identifier of the archive. + public let archiveId: String + /// If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. + public let nextToken: String? + /// The maximum number of archive search jobs that are returned per call. You can use NextToken to obtain further pages of archives. + public let pageSize: Int? + + public init(archiveId: String, nextToken: String? = nil, pageSize: Int? = nil) { + self.archiveId = archiveId + self.nextToken = nextToken + self.pageSize = pageSize + } + + public func validate(name: String) throws { + try self.validate(self.archiveId, name: "archiveId", parent: name, max: 66) + try self.validate(self.archiveId, name: "archiveId", parent: name, min: 3) + try self.validate(self.archiveId, name: "archiveId", parent: name, pattern: "^a-[\\w]{1,64}$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.pageSize, name: "pageSize", parent: name, max: 50) + try self.validate(self.pageSize, name: "pageSize", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case archiveId = "ArchiveId" + case nextToken = "NextToken" + case pageSize = "PageSize" + } + } + + public struct ListArchiveSearchesResponse: AWSDecodableShape { + /// If present, use to retrieve the next page of results. + public let nextToken: String? + /// The list of search job identifiers and statuses. + public let searches: [SearchSummary]? + + public init(nextToken: String? = nil, searches: [SearchSummary]? = nil) { + self.nextToken = nextToken + self.searches = searches + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case searches = "Searches" + } + } + + public struct ListArchivesRequest: AWSEncodableShape { + /// If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. + public let nextToken: String? + /// The maximum number of archives that are returned per call. You can use NextToken to obtain further pages of archives. + public let pageSize: Int? + + public init(nextToken: String? = nil, pageSize: Int? = nil) { + self.nextToken = nextToken + self.pageSize = pageSize + } + + public func validate(name: String) throws { + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.pageSize, name: "pageSize", parent: name, max: 50) + try self.validate(self.pageSize, name: "pageSize", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case pageSize = "PageSize" + } + } + + public struct ListArchivesResponse: AWSDecodableShape { + /// The list of archive details. + public let archives: [Archive] + /// If present, use to retrieve the next page of results. + public let nextToken: String? + + public init(archives: [Archive], nextToken: String? = nil) { + self.archives = archives + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case archives = "Archives" + case nextToken = "NextToken" + } + } + + public struct ListIngressPointsRequest: AWSEncodableShape { + /// If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results. + public let nextToken: String? + /// The maximum number of ingress endpoint resources that are returned per call. You can use NextToken to obtain further ingress endpoints. + public let pageSize: Int? + + public init(nextToken: String? = nil, pageSize: Int? = nil) { + self.nextToken = nextToken + self.pageSize = pageSize + } + + public func validate(name: String) throws { + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.pageSize, name: "pageSize", parent: name, max: 50) + try self.validate(self.pageSize, name: "pageSize", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case pageSize = "PageSize" + } + } + + public struct ListIngressPointsResponse: AWSDecodableShape { + /// The list of ingress endpoints. + public let ingressPoints: [IngressPoint]? + /// If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. + public let nextToken: String? + + public init(ingressPoints: [IngressPoint]? = nil, nextToken: String? = nil) { + self.ingressPoints = ingressPoints + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case ingressPoints = "IngressPoints" + case nextToken = "NextToken" + } + } + + public struct ListRelaysRequest: AWSEncodableShape { + /// If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results. + public let nextToken: String? + /// The number of relays to be returned in one request. + public let pageSize: Int? + + public init(nextToken: String? = nil, pageSize: Int? = nil) { + self.nextToken = nextToken + self.pageSize = pageSize + } + + public func validate(name: String) throws { + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case pageSize = "PageSize" + } + } + + public struct ListRelaysResponse: AWSDecodableShape { + /// If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. + public let nextToken: String? + /// The list of returned relays. + public let relays: [Relay] + + public init(nextToken: String? = nil, relays: [Relay]) { + self.nextToken = nextToken + self.relays = relays + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case relays = "Relays" + } + } + + public struct ListRuleSetsRequest: AWSEncodableShape { + /// If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results. + public let nextToken: String? + /// The maximum number of rule set resources that are returned per call. You can use NextToken to obtain further rule sets. + public let pageSize: Int? + + public init(nextToken: String? = nil, pageSize: Int? = nil) { + self.nextToken = nextToken + self.pageSize = pageSize + } + + public func validate(name: String) throws { + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.pageSize, name: "pageSize", parent: name, max: 50) + try self.validate(self.pageSize, name: "pageSize", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case pageSize = "PageSize" + } + } + + public struct ListRuleSetsResponse: AWSDecodableShape { + /// If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. + public let nextToken: String? + /// The list of rule sets. + public let ruleSets: [RuleSet] + + public init(nextToken: String? = nil, ruleSets: [RuleSet]) { + self.nextToken = nextToken + self.ruleSets = ruleSets + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case ruleSets = "RuleSets" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource to retrieve tags from. + public let resourceArn: String + + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):ses:[a-z0-9-]{1,20}:[0-9]{12}:(mailmanager-|addon-).+$") + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + } + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// The tags used to organize, track, or control access for the resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }. + public let tags: [Tag] + + public init(tags: [Tag]) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "Tags" + } + } + + public struct ListTrafficPoliciesRequest: AWSEncodableShape { + /// If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results. + public let nextToken: String? + /// The maximum number of traffic policy resources that are returned per call. You can use NextToken to obtain further traffic policies. + public let pageSize: Int? + + public init(nextToken: String? = nil, pageSize: Int? = nil) { + self.nextToken = nextToken + self.pageSize = pageSize + } + + public func validate(name: String) throws { + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.pageSize, name: "pageSize", parent: name, max: 50) + try self.validate(self.pageSize, name: "pageSize", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case pageSize = "PageSize" + } + } + + public struct ListTrafficPoliciesResponse: AWSDecodableShape { + /// If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. + public let nextToken: String? + /// The list of traffic policies. + public let trafficPolicies: [TrafficPolicy]? + + public init(nextToken: String? = nil, trafficPolicies: [TrafficPolicy]? = nil) { + self.nextToken = nextToken + self.trafficPolicies = trafficPolicies + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case trafficPolicies = "TrafficPolicies" + } + } + + public struct MessageBody: AWSDecodableShape { + /// The HTML body content of the message. + public let html: String? + /// A flag indicating if the email was malformed. + public let messageMalformed: Bool? + /// The plain text body content of the message. + public let text: String? + + public init(html: String? = nil, messageMalformed: Bool? = nil, text: String? = nil) { + self.html = html + self.messageMalformed = messageMalformed + self.text = text + } + + private enum CodingKeys: String, CodingKey { + case html = "Html" + case messageMalformed = "MessageMalformed" + case text = "Text" + } + } + + public struct NoAuthentication: AWSEncodableShape & AWSDecodableShape { + public init() {} + } + + public struct PolicyStatement: AWSEncodableShape & AWSDecodableShape { + /// The action that informs a traffic policy resource to either allow or block the email if it matches a condition in the policy statement. + public let action: AcceptAction + /// The list of conditions to apply to incoming messages for filtering email traffic. + public let conditions: [PolicyCondition] + + public init(action: AcceptAction, conditions: [PolicyCondition]) { + self.action = action + self.conditions = conditions + } + + public func validate(name: String) throws { + try self.conditions.forEach { + try $0.validate(name: "\(name).conditions[]") + } + try self.validate(self.conditions, name: "conditions", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case action = "Action" + case conditions = "Conditions" + } + } + + public struct Relay: AWSDecodableShape { + /// The timestamp of when the relay was last modified. + public let lastModifiedTimestamp: Date? + /// The unique relay identifier. + public let relayId: String? + /// The unique relay name. + public let relayName: String? + + public init(lastModifiedTimestamp: Date? = nil, relayId: String? = nil, relayName: String? = nil) { + self.lastModifiedTimestamp = lastModifiedTimestamp + self.relayId = relayId + self.relayName = relayName + } + + private enum CodingKeys: String, CodingKey { + case lastModifiedTimestamp = "LastModifiedTimestamp" + case relayId = "RelayId" + case relayName = "RelayName" + } + } + + public struct RelayAction: AWSEncodableShape & AWSDecodableShape { + /// A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the specified relay has been deleted. + public let actionFailurePolicy: ActionFailurePolicy? + /// This action specifies whether to preserve or replace original mail from address while relaying received emails to a destination server. + public let mailFrom: MailFrom? + /// The identifier of the relay resource to be used when relaying an email. + public let relay: String + + public init(actionFailurePolicy: ActionFailurePolicy? = nil, mailFrom: MailFrom? = nil, relay: String) { + self.actionFailurePolicy = actionFailurePolicy + self.mailFrom = mailFrom + self.relay = relay + } + + public func validate(name: String) throws { + try self.validate(self.relay, name: "relay", parent: name, max: 2048) + try self.validate(self.relay, name: "relay", parent: name, min: 1) + try self.validate(self.relay, name: "relay", parent: name, pattern: "^[a-zA-Z0-9:_/+=,@.#-]+$") + } + + private enum CodingKeys: String, CodingKey { + case actionFailurePolicy = "ActionFailurePolicy" + case mailFrom = "MailFrom" + case relay = "Relay" + } + } + + public struct ReplaceRecipientAction: AWSEncodableShape & AWSDecodableShape { + /// This action specifies the replacement recipient email addresses to insert. + public let replaceWith: [String]? + + public init(replaceWith: [String]? = nil) { + self.replaceWith = replaceWith + } + + public func validate(name: String) throws { + try self.replaceWith?.forEach { + try validate($0, name: "replaceWith[]", parent: name, max: 254) + try validate($0, name: "replaceWith[]", parent: name, pattern: "^[0-9A-Za-z@+.-]+$") + } + try self.validate(self.replaceWith, name: "replaceWith", parent: name, max: 100) + try self.validate(self.replaceWith, name: "replaceWith", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case replaceWith = "ReplaceWith" + } + } + + public struct Row: AWSDecodableShape { + /// The unique identifier of the archived message. + public let archivedMessageId: String? + /// The email addresses in the CC header. + public let cc: String? + /// The date the email was sent. + public let date: String? + /// The email address of the sender. + public let from: String? + /// A flag indicating if the email has attachments. + public let hasAttachments: Bool? + /// The email message ID this is a reply to. + public let inReplyTo: String? + /// The unique message ID of the email. + public let messageId: String? + /// The received headers from the email delivery path. + public let receivedHeaders: [String]? + /// The timestamp of when the email was received. + public let receivedTimestamp: Date? + /// The subject header value of the email. + public let subject: String? + /// The email addresses in the To header. + public let to: String? + /// The user agent that sent the email. + public let xMailer: String? + /// The original user agent that sent the email. + public let xOriginalMailer: String? + /// The priority level of the email. + public let xPriority: String? + + public init(archivedMessageId: String? = nil, cc: String? = nil, date: String? = nil, from: String? = nil, hasAttachments: Bool? = nil, inReplyTo: String? = nil, messageId: String? = nil, receivedHeaders: [String]? = nil, receivedTimestamp: Date? = nil, subject: String? = nil, to: String? = nil, xMailer: String? = nil, xOriginalMailer: String? = nil, xPriority: String? = nil) { + self.archivedMessageId = archivedMessageId + self.cc = cc + self.date = date + self.from = from + self.hasAttachments = hasAttachments + self.inReplyTo = inReplyTo + self.messageId = messageId + self.receivedHeaders = receivedHeaders + self.receivedTimestamp = receivedTimestamp + self.subject = subject + self.to = to + self.xMailer = xMailer + self.xOriginalMailer = xOriginalMailer + self.xPriority = xPriority + } + + private enum CodingKeys: String, CodingKey { + case archivedMessageId = "ArchivedMessageId" + case cc = "Cc" + case date = "Date" + case from = "From" + case hasAttachments = "HasAttachments" + case inReplyTo = "InReplyTo" + case messageId = "MessageId" + case receivedHeaders = "ReceivedHeaders" + case receivedTimestamp = "ReceivedTimestamp" + case subject = "Subject" + case to = "To" + case xMailer = "XMailer" + case xOriginalMailer = "XOriginalMailer" + case xPriority = "XPriority" + } + } + + public struct Rule: AWSEncodableShape & AWSDecodableShape { + /// The list of actions to execute when the conditions match the incoming email, and none of the "unless conditions" match. + public let actions: [RuleAction] + /// The conditions of this rule. All conditions must match the email for the actions to be executed. An empty list of conditions means that all emails match, but are still subject to any "unless conditions" + public let conditions: [RuleCondition]? + /// The user-friendly name of the rule. + public let name: String? + /// The "unless conditions" of this rule. None of the conditions can match the email for the actions to be executed. If any of these conditions do match the email, then the actions are not executed. + public let unless: [RuleCondition]? + + public init(actions: [RuleAction], conditions: [RuleCondition]? = nil, name: String? = nil, unless: [RuleCondition]? = nil) { + self.actions = actions + self.conditions = conditions + self.name = name + self.unless = unless + } + + public func validate(name: String) throws { + try self.actions.forEach { + try $0.validate(name: "\(name).actions[]") + } + try self.validate(self.actions, name: "actions", parent: name, max: 10) + try self.validate(self.actions, name: "actions", parent: name, min: 1) + try self.conditions?.forEach { + try $0.validate(name: "\(name).conditions[]") + } + try self.validate(self.conditions, name: "conditions", parent: name, max: 10) + try self.validate(self.name, name: "name", parent: name, max: 32) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + try self.unless?.forEach { + try $0.validate(name: "\(name).unless[]") + } + try self.validate(self.unless, name: "unless", parent: name, max: 10) + } + + private enum CodingKeys: String, CodingKey { + case actions = "Actions" + case conditions = "Conditions" + case name = "Name" + case unless = "Unless" + } + } + + public struct RuleBooleanExpression: AWSEncodableShape & AWSDecodableShape { + /// The operand on which to perform a boolean condition operation. + public let evaluate: RuleBooleanToEvaluate + /// The matching operator for a boolean condition expression. + public let `operator`: RuleBooleanOperator + + public init(evaluate: RuleBooleanToEvaluate, operator: RuleBooleanOperator) { + self.evaluate = evaluate + self.`operator` = `operator` + } + + private enum CodingKeys: String, CodingKey { + case evaluate = "Evaluate" + case `operator` = "Operator" + } + } + + public struct RuleDmarcExpression: AWSEncodableShape & AWSDecodableShape { + /// The operator to apply to the DMARC policy of the incoming email. + public let `operator`: RuleDmarcOperator + /// The values to use for the given DMARC policy operator. For the operator EQUALS, if multiple values are given, they are evaluated as an OR. That is, if any of the given values match, the condition is deemed to match. For the operator NOT_EQUALS, if multiple values are given, they are evaluated as an AND. That is, only if the email's DMARC policy is not equal to any of the given values, then the condition is deemed to match. + public let values: [RuleDmarcPolicy] + + public init(operator: RuleDmarcOperator, values: [RuleDmarcPolicy]) { + self.`operator` = `operator` + self.values = values + } + + public func validate(name: String) throws { + try self.validate(self.values, name: "values", parent: name, max: 10) + try self.validate(self.values, name: "values", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case `operator` = "Operator" + case values = "Values" + } + } + + public struct RuleIpExpression: AWSEncodableShape & AWSDecodableShape { + /// The IP address to evaluate in this condition. + public let evaluate: RuleIpToEvaluate + /// The operator to evaluate the IP address. + public let `operator`: RuleIpOperator + /// The IP CIDR blocks in format "x.y.z.w/n" (eg 10.0.0.0/8) to match with the email's IP address. For the operator CIDR_MATCHES, if multiple values are given, they are evaluated as an OR. That is, if the IP address is contained within any of the given CIDR ranges, the condition is deemed to match. For NOT_CIDR_MATCHES, if multiple CIDR ranges are given, the condition is deemed to match if the IP address is not contained in any of the given CIDR ranges. + public let values: [String] + + public init(evaluate: RuleIpToEvaluate, operator: RuleIpOperator, values: [String]) { + self.evaluate = evaluate + self.`operator` = `operator` + self.values = values + } + + public func validate(name: String) throws { + try self.values.forEach { + try validate($0, name: "values[]", parent: name, max: 18) + try validate($0, name: "values[]", parent: name, min: 1) + try validate($0, name: "values[]", parent: name, pattern: "^(([0-9]|.|/)*)$") + } + try self.validate(self.values, name: "values", parent: name, max: 10) + try self.validate(self.values, name: "values", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case evaluate = "Evaluate" + case `operator` = "Operator" + case values = "Values" + } + } + + public struct RuleNumberExpression: AWSEncodableShape & AWSDecodableShape { + /// The number to evaluate in a numeric condition expression. + public let evaluate: RuleNumberToEvaluate + /// The operator for a numeric condition expression. + public let `operator`: RuleNumberOperator + /// The value to evaluate in a numeric condition expression. + public let value: Double + + public init(evaluate: RuleNumberToEvaluate, operator: RuleNumberOperator, value: Double) { + self.evaluate = evaluate + self.`operator` = `operator` + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case evaluate = "Evaluate" + case `operator` = "Operator" + case value = "Value" + } + } + + public struct RuleSet: AWSDecodableShape { + /// The last modification date of the rule set. + public let lastModificationDate: Date? + /// The identifier of the rule set. + public let ruleSetId: String? + /// A user-friendly name for the rule set. + public let ruleSetName: String? + + public init(lastModificationDate: Date? = nil, ruleSetId: String? = nil, ruleSetName: String? = nil) { + self.lastModificationDate = lastModificationDate + self.ruleSetId = ruleSetId + self.ruleSetName = ruleSetName + } + + private enum CodingKeys: String, CodingKey { + case lastModificationDate = "LastModificationDate" + case ruleSetId = "RuleSetId" + case ruleSetName = "RuleSetName" + } + } + + public struct RuleStringExpression: AWSEncodableShape & AWSDecodableShape { + /// The string to evaluate in a string condition expression. + public let evaluate: RuleStringToEvaluate + /// The matching operator for a string condition expression. + public let `operator`: RuleStringOperator + /// The string(s) to be evaluated in a string condition expression. For all operators, except for NOT_EQUALS, if multiple values are given, the values are processed as an OR. That is, if any of the values match the email's string using the given operator, the condition is deemed to match. However, for NOT_EQUALS, the condition is only deemed to match if none of the given strings match the email's string. + public let values: [String] + + public init(evaluate: RuleStringToEvaluate, operator: RuleStringOperator, values: [String]) { + self.evaluate = evaluate + self.`operator` = `operator` + self.values = values + } + + public func validate(name: String) throws { + try self.values.forEach { + try validate($0, name: "values[]", parent: name, max: 4096) + try validate($0, name: "values[]", parent: name, min: 1) + } + try self.validate(self.values, name: "values", parent: name, max: 10) + try self.validate(self.values, name: "values", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case evaluate = "Evaluate" + case `operator` = "Operator" + case values = "Values" + } + } + + public struct RuleVerdictExpression: AWSEncodableShape & AWSDecodableShape { + /// The verdict to evaluate in a verdict condition expression. + public let evaluate: RuleVerdictToEvaluate + /// The matching operator for a verdict condition expression. + public let `operator`: RuleVerdictOperator + /// The values to match with the email's verdict using the given operator. For the EQUALS operator, if multiple values are given, the condition is deemed to match if any of the given verdicts match that of the email. For the NOT_EQUALS operator, if multiple values are given, the condition is deemed to match of none of the given verdicts match the verdict of the email. + public let values: [RuleVerdict] + + public init(evaluate: RuleVerdictToEvaluate, operator: RuleVerdictOperator, values: [RuleVerdict]) { + self.evaluate = evaluate + self.`operator` = `operator` + self.values = values + } + + public func validate(name: String) throws { + try self.evaluate.validate(name: "\(name).evaluate") + try self.validate(self.values, name: "values", parent: name, max: 10) + try self.validate(self.values, name: "values", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case evaluate = "Evaluate" + case `operator` = "Operator" + case values = "Values" + } + } + + public struct S3Action: AWSEncodableShape & AWSDecodableShape { + /// A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the specified the bucket has been deleted. + public let actionFailurePolicy: ActionFailurePolicy? + /// The Amazon Resource Name (ARN) of the IAM Role to use while writing to S3. This role must have access to the s3:PutObject, kms:Encrypt, and kms:GenerateDataKey APIs for the given bucket. + public let roleArn: String + /// The bucket name of the S3 bucket to write to. + public let s3Bucket: String + /// The S3 prefix to use for the write to the s3 bucket. + public let s3Prefix: String? + /// The KMS Key ID to use to encrypt the message in S3. + public let s3SseKmsKeyId: String? + + public init(actionFailurePolicy: ActionFailurePolicy? = nil, roleArn: String, s3Bucket: String, s3Prefix: String? = nil, s3SseKmsKeyId: String? = nil) { + self.actionFailurePolicy = actionFailurePolicy + self.roleArn = roleArn + self.s3Bucket = s3Bucket + self.s3Prefix = s3Prefix + self.s3SseKmsKeyId = s3SseKmsKeyId + } + + public func validate(name: String) throws { + try self.validate(self.roleArn, name: "roleArn", parent: name, max: 2048) + try self.validate(self.roleArn, name: "roleArn", parent: name, min: 20) + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^[a-zA-Z0-9:_/+=,@.#-]+$") + try self.validate(self.s3Bucket, name: "s3Bucket", parent: name, max: 62) + try self.validate(self.s3Bucket, name: "s3Bucket", parent: name, min: 1) + try self.validate(self.s3Bucket, name: "s3Bucket", parent: name, pattern: "^[a-zA-Z0-9.-]+$") + try self.validate(self.s3Prefix, name: "s3Prefix", parent: name, max: 62) + try self.validate(self.s3Prefix, name: "s3Prefix", parent: name, min: 1) + try self.validate(self.s3Prefix, name: "s3Prefix", parent: name, pattern: "^[a-zA-Z0-9!_.*'()/-]+$") + try self.validate(self.s3SseKmsKeyId, name: "s3SseKmsKeyId", parent: name, max: 2048) + try self.validate(self.s3SseKmsKeyId, name: "s3SseKmsKeyId", parent: name, min: 20) + try self.validate(self.s3SseKmsKeyId, name: "s3SseKmsKeyId", parent: name, pattern: "^[a-zA-Z0-9-:/]+$") + } + + private enum CodingKeys: String, CodingKey { + case actionFailurePolicy = "ActionFailurePolicy" + case roleArn = "RoleArn" + case s3Bucket = "S3Bucket" + case s3Prefix = "S3Prefix" + case s3SseKmsKeyId = "S3SseKmsKeyId" + } + } + + public struct S3ExportDestinationConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The S3 location to deliver the exported email data. + public let s3Location: String? + + public init(s3Location: String? = nil) { + self.s3Location = s3Location + } + + public func validate(name: String) throws { + try self.validate(self.s3Location, name: "s3Location", parent: name, pattern: "^s3://[a-zA-Z0-9.-]{3,63}(/[a-zA-Z0-9!_.*'()/-]*)*$") + } + + private enum CodingKeys: String, CodingKey { + case s3Location = "S3Location" + } + } + + public struct SearchStatus: AWSDecodableShape { + /// The timestamp of when the search completed (if finished). + public let completionTimestamp: Date? + /// An error message if the search failed. + public let errorMessage: String? + /// The current state of the search job. + public let state: SearchState? + /// The timestamp of when the search was submitted. + public let submissionTimestamp: Date? + + public init(completionTimestamp: Date? = nil, errorMessage: String? = nil, state: SearchState? = nil, submissionTimestamp: Date? = nil) { + self.completionTimestamp = completionTimestamp + self.errorMessage = errorMessage + self.state = state + self.submissionTimestamp = submissionTimestamp + } + + private enum CodingKeys: String, CodingKey { + case completionTimestamp = "CompletionTimestamp" + case errorMessage = "ErrorMessage" + case state = "State" + case submissionTimestamp = "SubmissionTimestamp" + } + } + + public struct SearchSummary: AWSDecodableShape { + /// The unique identifier of the search job. + public let searchId: String? + /// The current status of the search job. + public let status: SearchStatus? + + public init(searchId: String? = nil, status: SearchStatus? = nil) { + self.searchId = searchId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case searchId = "SearchId" + case status = "Status" + } + } + + public struct SendAction: AWSEncodableShape & AWSDecodableShape { + /// A policy that states what to do in the case of failure. The action will fail if there are configuration errors. For example, the caller does not have the permissions to call the sendRawEmail API. + public let actionFailurePolicy: ActionFailurePolicy? + /// The Amazon Resource Name (ARN) of the role to use for this action. This role must have access to the ses:SendRawEmail API. + public let roleArn: String + + public init(actionFailurePolicy: ActionFailurePolicy? = nil, roleArn: String) { + self.actionFailurePolicy = actionFailurePolicy + self.roleArn = roleArn + } + + public func validate(name: String) throws { + try self.validate(self.roleArn, name: "roleArn", parent: name, max: 2048) + try self.validate(self.roleArn, name: "roleArn", parent: name, min: 20) + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^[a-zA-Z0-9:_/+=,@.#-]+$") + } + + private enum CodingKeys: String, CodingKey { + case actionFailurePolicy = "ActionFailurePolicy" + case roleArn = "RoleArn" + } + } + + public struct StartArchiveExportRequest: AWSEncodableShape { + /// The identifier of the archive to export emails from. + public let archiveId: String + /// Details on where to deliver the exported email data. + public let exportDestinationConfiguration: ExportDestinationConfiguration + /// Criteria to filter which emails are included in the export. + public let filters: ArchiveFilters? + /// The start of the timestamp range to include emails from. + public let fromTimestamp: Date + /// The maximum number of email items to include in the export. + public let maxResults: Int? + /// The end of the timestamp range to include emails from. + public let toTimestamp: Date + + public init(archiveId: String, exportDestinationConfiguration: ExportDestinationConfiguration, filters: ArchiveFilters? = nil, fromTimestamp: Date, maxResults: Int? = nil, toTimestamp: Date) { + self.archiveId = archiveId + self.exportDestinationConfiguration = exportDestinationConfiguration + self.filters = filters + self.fromTimestamp = fromTimestamp + self.maxResults = maxResults + self.toTimestamp = toTimestamp + } + + public func validate(name: String) throws { + try self.validate(self.archiveId, name: "archiveId", parent: name, max: 66) + try self.validate(self.archiveId, name: "archiveId", parent: name, min: 3) + try self.validate(self.archiveId, name: "archiveId", parent: name, pattern: "^a-[\\w]{1,64}$") + try self.exportDestinationConfiguration.validate(name: "\(name).exportDestinationConfiguration") + try self.filters?.validate(name: "\(name).filters") + } + + private enum CodingKeys: String, CodingKey { + case archiveId = "ArchiveId" + case exportDestinationConfiguration = "ExportDestinationConfiguration" + case filters = "Filters" + case fromTimestamp = "FromTimestamp" + case maxResults = "MaxResults" + case toTimestamp = "ToTimestamp" + } + } + + public struct StartArchiveExportResponse: AWSDecodableShape { + /// The unique identifier for the initiated export job. + public let exportId: String? + + public init(exportId: String? = nil) { + self.exportId = exportId + } + + private enum CodingKeys: String, CodingKey { + case exportId = "ExportId" + } + } + + public struct StartArchiveSearchRequest: AWSEncodableShape { + /// The identifier of the archive to search emails in. + public let archiveId: String + /// Criteria to filter which emails are included in the search results. + public let filters: ArchiveFilters? + /// The start timestamp of the range to search emails from. + public let fromTimestamp: Date + /// The maximum number of search results to return. + public let maxResults: Int + /// The end timestamp of the range to search emails from. + public let toTimestamp: Date + + public init(archiveId: String, filters: ArchiveFilters? = nil, fromTimestamp: Date, maxResults: Int, toTimestamp: Date) { + self.archiveId = archiveId + self.filters = filters + self.fromTimestamp = fromTimestamp + self.maxResults = maxResults + self.toTimestamp = toTimestamp + } + + public func validate(name: String) throws { + try self.validate(self.archiveId, name: "archiveId", parent: name, max: 66) + try self.validate(self.archiveId, name: "archiveId", parent: name, min: 3) + try self.validate(self.archiveId, name: "archiveId", parent: name, pattern: "^a-[\\w]{1,64}$") + try self.filters?.validate(name: "\(name).filters") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 0) + } + + private enum CodingKeys: String, CodingKey { + case archiveId = "ArchiveId" + case filters = "Filters" + case fromTimestamp = "FromTimestamp" + case maxResults = "MaxResults" + case toTimestamp = "ToTimestamp" + } + } + + public struct StartArchiveSearchResponse: AWSDecodableShape { + /// The unique identifier for the initiated search job. + public let searchId: String? + + public init(searchId: String? = nil) { + self.searchId = searchId + } + + private enum CodingKeys: String, CodingKey { + case searchId = "SearchId" + } + } + + public struct StopArchiveExportRequest: AWSEncodableShape { + /// The identifier of the export job to stop. + public let exportId: String + + public init(exportId: String) { + self.exportId = exportId + } + + public func validate(name: String) throws { + try self.validate(self.exportId, name: "exportId", parent: name, max: 64) + try self.validate(self.exportId, name: "exportId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case exportId = "ExportId" + } + } + + public struct StopArchiveExportResponse: AWSDecodableShape { + public init() {} + } + + public struct StopArchiveSearchRequest: AWSEncodableShape { + /// The identifier of the search job to stop. + public let searchId: String + + public init(searchId: String) { + self.searchId = searchId + } + + public func validate(name: String) throws { + try self.validate(self.searchId, name: "searchId", parent: name, max: 64) + try self.validate(self.searchId, name: "searchId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case searchId = "SearchId" + } + } + + public struct StopArchiveSearchResponse: AWSDecodableShape { + public init() {} + } + + public struct Tag: AWSEncodableShape & AWSDecodableShape { + /// The key of the key-value tag. + public let key: String + /// The value of the key-value tag. + public let value: String + + public init(key: String, value: String) { + self.key = key + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.key, name: "key", parent: name, max: 128) + try self.validate(self.key, name: "key", parent: name, min: 1) + try self.validate(self.key, name: "key", parent: name, pattern: "^[a-zA-Z0-9/_\\+=\\.:@\\-]+$") + try self.validate(self.value, name: "value", parent: name, max: 256) + try self.validate(self.value, name: "value", parent: name, pattern: "^[a-zA-Z0-9/_\\+=\\.:@\\-]*$") + } + + private enum CodingKeys: String, CodingKey { + case key = "Key" + case value = "Value" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource that you want to tag. + public let resourceArn: String + /// The tags used to organize, track, or control access for the resource. For example, { "tags": {"key1":"value1", "key2":"value2"} }. + public let tags: [Tag] + + public init(resourceArn: String, tags: [Tag]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):ses:[a-z0-9-]{1,20}:[0-9]{12}:(mailmanager-|addon-).+$") + try self.tags.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + case tags = "Tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct TrafficPolicy: AWSDecodableShape { + /// Default action instructs the traffic policy to either Allow or Deny (block) messages that fall outside of (or not addressed by) the conditions of your policy statements + public let defaultAction: AcceptAction + /// The identifier of the traffic policy resource. + public let trafficPolicyId: String + /// A user-friendly name of the traffic policy resource. + public let trafficPolicyName: String + + public init(defaultAction: AcceptAction, trafficPolicyId: String, trafficPolicyName: String) { + self.defaultAction = defaultAction + self.trafficPolicyId = trafficPolicyId + self.trafficPolicyName = trafficPolicyName + } + + private enum CodingKeys: String, CodingKey { + case defaultAction = "DefaultAction" + case trafficPolicyId = "TrafficPolicyId" + case trafficPolicyName = "TrafficPolicyName" + } + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource that you want to untag. + public let resourceArn: String + /// The keys of the key-value pairs for the tag or tags you want to remove from the specified resource. + public let tagKeys: [String] + + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws(|-cn|-us-gov):ses:[a-z0-9-]{1,20}:[0-9]{12}:(mailmanager-|addon-).+$") + try self.tagKeys.forEach { + try validate($0, name: "tagKeys[]", parent: name, max: 128) + try validate($0, name: "tagKeys[]", parent: name, min: 1) + try validate($0, name: "tagKeys[]", parent: name, pattern: "^[a-zA-Z0-9/_\\+=\\.:@\\-]+$") + } + try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + case tagKeys = "TagKeys" + } + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateArchiveRequest: AWSEncodableShape { + /// The identifier of the archive to update. + public let archiveId: String + /// A new, unique name for the archive. + public let archiveName: String? + /// A new retention period for emails in the archive. + public let retention: ArchiveRetention? + + public init(archiveId: String, archiveName: String? = nil, retention: ArchiveRetention? = nil) { + self.archiveId = archiveId + self.archiveName = archiveName + self.retention = retention + } + + public func validate(name: String) throws { + try self.validate(self.archiveId, name: "archiveId", parent: name, max: 66) + try self.validate(self.archiveId, name: "archiveId", parent: name, min: 1) + try self.validate(self.archiveName, name: "archiveName", parent: name, max: 64) + try self.validate(self.archiveName, name: "archiveName", parent: name, min: 1) + try self.validate(self.archiveName, name: "archiveName", parent: name, pattern: "^[a-zA-Z0-9][a-zA-Z0-9_-]*[a-zA-Z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case archiveId = "ArchiveId" + case archiveName = "ArchiveName" + case retention = "Retention" + } + } + + public struct UpdateArchiveResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateIngressPointRequest: AWSEncodableShape { + /// If you choose an Authenticated ingress endpoint, you must configure either an SMTP password or a secret ARN. + public let ingressPointConfiguration: IngressPointConfiguration? + /// The identifier for the ingress endpoint you want to update. + public let ingressPointId: String + /// A user friendly name for the ingress endpoint resource. + public let ingressPointName: String? + /// The identifier of an existing rule set that you attach to an ingress endpoint resource. + public let ruleSetId: String? + /// The update status of an ingress endpoint. + public let statusToUpdate: IngressPointStatusToUpdate? + /// The identifier of an existing traffic policy that you attach to an ingress endpoint resource. + public let trafficPolicyId: String? + + public init(ingressPointConfiguration: IngressPointConfiguration? = nil, ingressPointId: String, ingressPointName: String? = nil, ruleSetId: String? = nil, statusToUpdate: IngressPointStatusToUpdate? = nil, trafficPolicyId: String? = nil) { + self.ingressPointConfiguration = ingressPointConfiguration + self.ingressPointId = ingressPointId + self.ingressPointName = ingressPointName + self.ruleSetId = ruleSetId + self.statusToUpdate = statusToUpdate + self.trafficPolicyId = trafficPolicyId + } + + public func validate(name: String) throws { + try self.ingressPointConfiguration?.validate(name: "\(name).ingressPointConfiguration") + try self.validate(self.ingressPointId, name: "ingressPointId", parent: name, max: 100) + try self.validate(self.ingressPointId, name: "ingressPointId", parent: name, min: 1) + try self.validate(self.ingressPointName, name: "ingressPointName", parent: name, max: 63) + try self.validate(self.ingressPointName, name: "ingressPointName", parent: name, min: 3) + try self.validate(self.ingressPointName, name: "ingressPointName", parent: name, pattern: "^[A-Za-z0-9_\\-]+$") + try self.validate(self.ruleSetId, name: "ruleSetId", parent: name, max: 100) + try self.validate(self.ruleSetId, name: "ruleSetId", parent: name, min: 1) + try self.validate(self.trafficPolicyId, name: "trafficPolicyId", parent: name, max: 100) + try self.validate(self.trafficPolicyId, name: "trafficPolicyId", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case ingressPointConfiguration = "IngressPointConfiguration" + case ingressPointId = "IngressPointId" + case ingressPointName = "IngressPointName" + case ruleSetId = "RuleSetId" + case statusToUpdate = "StatusToUpdate" + case trafficPolicyId = "TrafficPolicyId" + } + } + + public struct UpdateIngressPointResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateRelayRequest: AWSEncodableShape { + /// Authentication for the relay destination server—specify the secretARN where the SMTP credentials are stored. + public let authentication: RelayAuthentication? + /// The unique relay identifier. + public let relayId: String + /// The name of the relay resource. + public let relayName: String? + /// The destination relay server address. + public let serverName: String? + /// The destination relay server port. + public let serverPort: Int? + + public init(authentication: RelayAuthentication? = nil, relayId: String, relayName: String? = nil, serverName: String? = nil, serverPort: Int? = nil) { + self.authentication = authentication + self.relayId = relayId + self.relayName = relayName + self.serverName = serverName + self.serverPort = serverPort + } + + public func validate(name: String) throws { + try self.authentication?.validate(name: "\(name).authentication") + try self.validate(self.relayId, name: "relayId", parent: name, max: 100) + try self.validate(self.relayId, name: "relayId", parent: name, min: 1) + try self.validate(self.relayId, name: "relayId", parent: name, pattern: "^[a-zA-Z0-9-]+$") + try self.validate(self.relayName, name: "relayName", parent: name, max: 100) + try self.validate(self.relayName, name: "relayName", parent: name, min: 1) + try self.validate(self.relayName, name: "relayName", parent: name, pattern: "^[a-zA-Z0-9-_]+$") + try self.validate(self.serverName, name: "serverName", parent: name, max: 100) + try self.validate(self.serverName, name: "serverName", parent: name, min: 1) + try self.validate(self.serverName, name: "serverName", parent: name, pattern: "^[a-zA-Z0-9-\\.]+$") + try self.validate(self.serverPort, name: "serverPort", parent: name, max: 65535) + try self.validate(self.serverPort, name: "serverPort", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case authentication = "Authentication" + case relayId = "RelayId" + case relayName = "RelayName" + case serverName = "ServerName" + case serverPort = "ServerPort" + } + } + + public struct UpdateRelayResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateRuleSetRequest: AWSEncodableShape { + /// A new set of rules to replace the current rules of the rule set—these rules will override all the rules of the rule set. + public let rules: [Rule]? + /// The identifier of a rule set you want to update. + public let ruleSetId: String + /// A user-friendly name for the rule set resource. + public let ruleSetName: String? + + public init(rules: [Rule]? = nil, ruleSetId: String, ruleSetName: String? = nil) { + self.rules = rules + self.ruleSetId = ruleSetId + self.ruleSetName = ruleSetName + } + + public func validate(name: String) throws { + try self.rules?.forEach { + try $0.validate(name: "\(name).rules[]") + } + try self.validate(self.rules, name: "rules", parent: name, max: 40) + try self.validate(self.ruleSetId, name: "ruleSetId", parent: name, max: 100) + try self.validate(self.ruleSetId, name: "ruleSetId", parent: name, min: 1) + try self.validate(self.ruleSetName, name: "ruleSetName", parent: name, max: 100) + try self.validate(self.ruleSetName, name: "ruleSetName", parent: name, min: 1) + try self.validate(self.ruleSetName, name: "ruleSetName", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + } + + private enum CodingKeys: String, CodingKey { + case rules = "Rules" + case ruleSetId = "RuleSetId" + case ruleSetName = "RuleSetName" + } + } + + public struct UpdateRuleSetResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateTrafficPolicyRequest: AWSEncodableShape { + /// Default action instructs the traffic policy to either Allow or Deny (block) messages that fall outside of (or not addressed by) the conditions of your policy statements + public let defaultAction: AcceptAction? + /// The maximum message size in bytes of email which is allowed in by this traffic policy—anything larger will be blocked. + public let maxMessageSizeBytes: Int? + /// The list of conditions to be updated for filtering email traffic. + public let policyStatements: [PolicyStatement]? + /// The identifier of the traffic policy that you want to update. + public let trafficPolicyId: String + /// A user-friendly name for the traffic policy resource. + public let trafficPolicyName: String? + + public init(defaultAction: AcceptAction? = nil, maxMessageSizeBytes: Int? = nil, policyStatements: [PolicyStatement]? = nil, trafficPolicyId: String, trafficPolicyName: String? = nil) { + self.defaultAction = defaultAction + self.maxMessageSizeBytes = maxMessageSizeBytes + self.policyStatements = policyStatements + self.trafficPolicyId = trafficPolicyId + self.trafficPolicyName = trafficPolicyName + } + + public func validate(name: String) throws { + try self.validate(self.maxMessageSizeBytes, name: "maxMessageSizeBytes", parent: name, min: 1) + try self.policyStatements?.forEach { + try $0.validate(name: "\(name).policyStatements[]") + } + try self.validate(self.trafficPolicyId, name: "trafficPolicyId", parent: name, max: 100) + try self.validate(self.trafficPolicyId, name: "trafficPolicyId", parent: name, min: 1) + try self.validate(self.trafficPolicyName, name: "trafficPolicyName", parent: name, max: 63) + try self.validate(self.trafficPolicyName, name: "trafficPolicyName", parent: name, min: 3) + try self.validate(self.trafficPolicyName, name: "trafficPolicyName", parent: name, pattern: "^[A-Za-z0-9_\\-]+$") + } + + private enum CodingKeys: String, CodingKey { + case defaultAction = "DefaultAction" + case maxMessageSizeBytes = "MaxMessageSizeBytes" + case policyStatements = "PolicyStatements" + case trafficPolicyId = "TrafficPolicyId" + case trafficPolicyName = "TrafficPolicyName" + } + } + + public struct UpdateTrafficPolicyResponse: AWSDecodableShape { + public init() {} + } + + public struct ArchiveBooleanToEvaluate: AWSEncodableShape & AWSDecodableShape { + /// The name of the email attribute to evaluate. + public let attribute: ArchiveBooleanEmailAttribute? + + public init(attribute: ArchiveBooleanEmailAttribute? = nil) { + self.attribute = attribute + } + + private enum CodingKeys: String, CodingKey { + case attribute = "Attribute" + } + } + + public struct ArchiveRetention: AWSEncodableShape & AWSDecodableShape { + /// The enum value sets the period for retaining emails in an archive. + public let retentionPeriod: RetentionPeriod? + + public init(retentionPeriod: RetentionPeriod? = nil) { + self.retentionPeriod = retentionPeriod + } + + private enum CodingKeys: String, CodingKey { + case retentionPeriod = "RetentionPeriod" + } + } + + public struct ArchiveStringToEvaluate: AWSEncodableShape & AWSDecodableShape { + /// The name of the email attribute to evaluate. + public let attribute: ArchiveStringEmailAttribute? + + public init(attribute: ArchiveStringEmailAttribute? = nil) { + self.attribute = attribute + } + + private enum CodingKeys: String, CodingKey { + case attribute = "Attribute" + } + } + + public struct ExportDestinationConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Configuration for delivering to an Amazon S3 bucket. + public let s3: S3ExportDestinationConfiguration? + + public init(s3: S3ExportDestinationConfiguration? = nil) { + self.s3 = s3 + } + + public func validate(name: String) throws { + try self.s3?.validate(name: "\(name).s3") + } + + private enum CodingKeys: String, CodingKey { + case s3 = "S3" + } + } + + public struct IngressBooleanToEvaluate: AWSEncodableShape & AWSDecodableShape { + /// The structure type for a boolean condition stating the Add On ARN and its returned value. + public let analysis: IngressAnalysis? + + public init(analysis: IngressAnalysis? = nil) { + self.analysis = analysis + } + + public func validate(name: String) throws { + try self.analysis?.validate(name: "\(name).analysis") + } + + private enum CodingKeys: String, CodingKey { + case analysis = "Analysis" + } + } + + public struct IngressIpToEvaluate: AWSEncodableShape & AWSDecodableShape { + /// An enum type representing the allowed attribute types for an IP condition. + public let attribute: IngressIpv4Attribute? + + public init(attribute: IngressIpv4Attribute? = nil) { + self.attribute = attribute + } + + private enum CodingKeys: String, CodingKey { + case attribute = "Attribute" + } + } + + public struct IngressStringToEvaluate: AWSEncodableShape & AWSDecodableShape { + /// The enum type representing the allowed attribute types for a string condition. + public let attribute: IngressStringEmailAttribute? + + public init(attribute: IngressStringEmailAttribute? = nil) { + self.attribute = attribute + } + + private enum CodingKeys: String, CodingKey { + case attribute = "Attribute" + } + } + + public struct IngressTlsProtocolToEvaluate: AWSEncodableShape & AWSDecodableShape { + /// The enum type representing the allowed attribute types for the TLS condition. + public let attribute: IngressTlsAttribute? + + public init(attribute: IngressTlsAttribute? = nil) { + self.attribute = attribute + } + + private enum CodingKeys: String, CodingKey { + case attribute = "Attribute" + } + } + + public struct RuleBooleanToEvaluate: AWSEncodableShape & AWSDecodableShape { + /// The boolean type representing the allowed attribute types for an email. + public let attribute: RuleBooleanEmailAttribute? + + public init(attribute: RuleBooleanEmailAttribute? = nil) { + self.attribute = attribute + } + + private enum CodingKeys: String, CodingKey { + case attribute = "Attribute" + } + } + + public struct RuleIpToEvaluate: AWSEncodableShape & AWSDecodableShape { + /// The attribute of the email to evaluate. + public let attribute: RuleIpEmailAttribute? + + public init(attribute: RuleIpEmailAttribute? = nil) { + self.attribute = attribute + } + + private enum CodingKeys: String, CodingKey { + case attribute = "Attribute" + } + } + + public struct RuleNumberToEvaluate: AWSEncodableShape & AWSDecodableShape { + /// An email attribute that is used as the number to evaluate. + public let attribute: RuleNumberEmailAttribute? + + public init(attribute: RuleNumberEmailAttribute? = nil) { + self.attribute = attribute + } + + private enum CodingKeys: String, CodingKey { + case attribute = "Attribute" + } + } + + public struct RuleStringToEvaluate: AWSEncodableShape & AWSDecodableShape { + /// The email attribute to evaluate in a string condition expression. + public let attribute: RuleStringEmailAttribute? + + public init(attribute: RuleStringEmailAttribute? = nil) { + self.attribute = attribute + } + + private enum CodingKeys: String, CodingKey { + case attribute = "Attribute" + } + } +} + +// MARK: - Errors + +/// Error enum for MailManager +public struct MailManagerErrorType: AWSErrorType { + enum Code: String { + case accessDeniedException = "AccessDeniedException" + case conflictException = "ConflictException" + case resourceNotFoundException = "ResourceNotFoundException" + case serviceQuotaExceededException = "ServiceQuotaExceededException" + case throttlingException = "ThrottlingException" + case validationException = "ValidationException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize MailManager + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// Occurs when a user is denied access to a specific resource or action. + public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// The request configuration has conflicts. For details, see the accompanying error message. + public static var conflictException: Self { .init(.conflictException) } + /// Occurs when a requested resource is not found. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// Occurs when an operation exceeds a predefined service quota or limit. + public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } + /// Occurs when a service's request rate limit is exceeded, resulting in throttling of further requests. + public static var throttlingException: Self { .init(.throttlingException) } + /// The request validation has failed. For details, see the accompanying error message. + public static var validationException: Self { .init(.validationException) } +} + +extension MailManagerErrorType: Equatable { + public static func == (lhs: MailManagerErrorType, rhs: MailManagerErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension MailManagerErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/ManagedBlockchain/ManagedBlockchain_shapes.swift b/Sources/Soto/Services/ManagedBlockchain/ManagedBlockchain_shapes.swift index 5f13217324..5683dedf12 100644 --- a/Sources/Soto/Services/ManagedBlockchain/ManagedBlockchain_shapes.swift +++ b/Sources/Soto/Services/ManagedBlockchain/ManagedBlockchain_shapes.swift @@ -239,7 +239,7 @@ extension ManagedBlockchain { public let accessorType: AccessorType /// This is a unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than once. This identifier is required only if you make a service request directly using an HTTP client. It is generated automatically if you use an Amazon Web Services SDK or the Amazon Web Services CLI. public let clientRequestToken: String - /// The blockchain network that the Accessor token is created for. We recommend using the appropriate networkType value for the blockchain network that you are creating the Accessor token for. You cannnot use the value ETHEREUM_MAINNET_AND_GOERLI to specify a networkType for your Accessor token. The default value of ETHEREUM_MAINNET_AND_GOERLI is only applied: when the CreateAccessor action does not set a networkType. to all existing Accessor tokens that were created before the networkType property was introduced. + /// The blockchain network that the Accessor token is created for. Use the actual networkType value for the blockchain network that you are creating the Accessor token for. With the shut down of the Ethereum Goerli and Polygon Mumbai Testnet networks the following networkType values are no longer available for selection and use. ETHEREUM_MAINNET_AND_GOERLI ETHEREUM_GOERLI POLYGON_MUMBAI However, your existing Accessor tokens with these networkType values will remain unchanged. public let networkType: AccessorNetworkType? /// Tags to assign to the Accessor. Each tag consists of a key and an optional value. You can specify multiple key-value pairs in a single request with an overall maximum of 50 tags allowed per resource. For more information about tags, see Tagging Resources in the Amazon Managed Blockchain Ethereum Developer Guide, or Tagging Resources in the Amazon Managed Blockchain Hyperledger Fabric Developer Guide. public let tags: [String: String]? @@ -433,7 +433,7 @@ extension ManagedBlockchain { public let clientRequestToken: String /// The unique identifier of the member that owns this node. Applies only to Hyperledger Fabric. public let memberId: String? - /// The unique identifier of the network for the node. Ethereum public networks have the following NetworkIds: n-ethereum-mainnet n-ethereum-goerli + /// The unique identifier of the network for the node. Ethereum public networks have the following NetworkIds: n-ethereum-mainnet public let networkId: String /// The properties of a node configuration. public let nodeConfiguration: NodeConfiguration @@ -628,7 +628,7 @@ extension ManagedBlockchain { public struct DeleteNodeInput: AWSEncodableShape { /// The unique identifier of the member that owns this node. Applies only to Hyperledger Fabric and is required for Hyperledger Fabric. public let memberId: String? - /// The unique identifier of the network that the node is on. Ethereum public networks have the following NetworkIds: n-ethereum-mainnet n-ethereum-goerli + /// The unique identifier of the network that the node is on. Ethereum public networks have the following NetworkIds: n-ethereum-mainnet public let networkId: String /// The unique identifier of the node. public let nodeId: String @@ -1647,7 +1647,7 @@ extension ManagedBlockchain { } public struct NetworkEthereumAttributes: AWSDecodableShape { - /// The Ethereum CHAIN_ID associated with the Ethereum network. Chain IDs are as follows: mainnet = 1 goerli = 5 + /// The Ethereum CHAIN_ID associated with the Ethereum network. Chain IDs are as follows: mainnet = 1 public let chainId: String? public init(chainId: String? = nil) { diff --git a/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift b/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift index cf70ebd9fc..5a4aba5076 100644 --- a/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift +++ b/Sources/Soto/Services/MediaConvert/MediaConvert_api.swift @@ -220,7 +220,7 @@ public struct MediaConvert: AWSService { ) } - /// Send an request with an empty body to the regional API endpoint to get your account API endpoint. + /// Send a request with an empty body to the regional API endpoint to get your account API endpoint. Note that DescribeEndpoints is no longer required. We recommend that you send your requests directly to the regional endpoint instead. @available(*, deprecated, message: "DescribeEndpoints and account specific endpoints are no longer required. We recommend that you send your requests directly to the regional endpoint instead.") @Sendable public func describeEndpoints(_ input: DescribeEndpointsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeEndpointsResponse { @@ -390,6 +390,19 @@ public struct MediaConvert: AWSService { ) } + /// Retrieve a JSON array that includes job details for up to twenty of your most recent jobs. Optionally filter results further according to input file, queue, or status. To retrieve the twenty next most recent jobs, use the nextToken string returned with the array. + @Sendable + public func searchJobs(_ input: SearchJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchJobsResponse { + return try await self.client.execute( + operation: "SearchJobs", + path: "/2017-08-29/search", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Add tags to a MediaConvert queue, preset, or job template. For information about tagging, see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/tagging-resources.html @Sendable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { @@ -469,7 +482,7 @@ extension MediaConvert { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension MediaConvert { - /// Send an request with an empty body to the regional API endpoint to get your account API endpoint. + /// Send a request with an empty body to the regional API endpoint to get your account API endpoint. Note that DescribeEndpoints is no longer required. We recommend that you send your requests directly to the regional endpoint instead. /// Return PaginatorSequence for operation. /// /// - Parameters: @@ -564,6 +577,25 @@ extension MediaConvert { logger: logger ) } + + /// Retrieve a JSON array that includes job details for up to twenty of your most recent jobs. Optionally filter results further according to input file, queue, or status. To retrieve the twenty next most recent jobs, use the nextToken string returned with the array. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func searchJobsPaginator( + _ input: SearchJobsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchJobs, + inputKey: \SearchJobsRequest.nextToken, + outputKey: \SearchJobsResponse.nextToken, + logger: logger + ) + } } extension MediaConvert.DescribeEndpointsRequest: AWSPaginateToken { @@ -622,3 +654,16 @@ extension MediaConvert.ListQueuesRequest: AWSPaginateToken { ) } } + +extension MediaConvert.SearchJobsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> MediaConvert.SearchJobsRequest { + return .init( + inputFile: self.inputFile, + maxResults: self.maxResults, + nextToken: token, + order: self.order, + queue: self.queue, + status: self.status + ) + } +} diff --git a/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift b/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift index 7a9a4feee3..7b50a4503e 100644 --- a/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift +++ b/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift @@ -4508,6 +4508,8 @@ extension MediaConvert { public let clientCache: CmafClientCache? /// Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist generation. public let codecSpecification: CmafCodecSpecification? + /// Specify whether MediaConvert generates I-frame only video segments for DASH trick play, also known as trick mode. When specified, the I-frame only video segments are included within an additional AdaptationSet in your DASH output manifest. To generate I-frame only video segments: Enter a name as a text string, up to 256 character long. This name is appended to the end of this output group's base filename, that you specify as part of your destination URI, and used for the I-frame only video segment files. You may also include format identifiers. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/using-variables-in-your-job-settings.html#using-settings-variables-with-streaming-outputs To not generate I-frame only video segments: Leave blank. + public let dashIFrameTrickPlayNameModifier: String? /// Specify how MediaConvert writes SegmentTimeline in your output DASH manifest. To write a SegmentTimeline in each video Representation: Keep the default value, Basic. To write a common SegmentTimeline in the video AdaptationSet: Choose Compact. Note that MediaConvert will still write a SegmentTimeline in any Representation that does not share a common timeline. To write a video AdaptationSet for each different output framerate, and a common SegmentTimeline in each AdaptationSet: Choose Distinct. public let dashManifestStyle: DashManifestStyle? /// Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file. @@ -4555,11 +4557,12 @@ extension MediaConvert { /// When you enable Precise segment duration in DASH manifests, your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element. public let writeSegmentTimelineInRepresentation: CmafWriteSegmentTimelineInRepresentation? - public init(additionalManifests: [CmafAdditionalManifest]? = nil, baseUrl: String? = nil, clientCache: CmafClientCache? = nil, codecSpecification: CmafCodecSpecification? = nil, dashManifestStyle: DashManifestStyle? = nil, destination: String? = nil, destinationSettings: DestinationSettings? = nil, encryption: CmafEncryptionSettings? = nil, fragmentLength: Int? = nil, imageBasedTrickPlay: CmafImageBasedTrickPlay? = nil, imageBasedTrickPlaySettings: CmafImageBasedTrickPlaySettings? = nil, manifestCompression: CmafManifestCompression? = nil, manifestDurationFormat: CmafManifestDurationFormat? = nil, minBufferTime: Int? = nil, minFinalSegmentLength: Double? = nil, mpdManifestBandwidthType: CmafMpdManifestBandwidthType? = nil, mpdProfile: CmafMpdProfile? = nil, ptsOffsetHandlingForBFrames: CmafPtsOffsetHandlingForBFrames? = nil, segmentControl: CmafSegmentControl? = nil, segmentLength: Int? = nil, segmentLengthControl: CmafSegmentLengthControl? = nil, streamInfResolution: CmafStreamInfResolution? = nil, targetDurationCompatibilityMode: CmafTargetDurationCompatibilityMode? = nil, videoCompositionOffsets: CmafVideoCompositionOffsets? = nil, writeDashManifest: CmafWriteDASHManifest? = nil, writeHlsManifest: CmafWriteHLSManifest? = nil, writeSegmentTimelineInRepresentation: CmafWriteSegmentTimelineInRepresentation? = nil) { + public init(additionalManifests: [CmafAdditionalManifest]? = nil, baseUrl: String? = nil, clientCache: CmafClientCache? = nil, codecSpecification: CmafCodecSpecification? = nil, dashIFrameTrickPlayNameModifier: String? = nil, dashManifestStyle: DashManifestStyle? = nil, destination: String? = nil, destinationSettings: DestinationSettings? = nil, encryption: CmafEncryptionSettings? = nil, fragmentLength: Int? = nil, imageBasedTrickPlay: CmafImageBasedTrickPlay? = nil, imageBasedTrickPlaySettings: CmafImageBasedTrickPlaySettings? = nil, manifestCompression: CmafManifestCompression? = nil, manifestDurationFormat: CmafManifestDurationFormat? = nil, minBufferTime: Int? = nil, minFinalSegmentLength: Double? = nil, mpdManifestBandwidthType: CmafMpdManifestBandwidthType? = nil, mpdProfile: CmafMpdProfile? = nil, ptsOffsetHandlingForBFrames: CmafPtsOffsetHandlingForBFrames? = nil, segmentControl: CmafSegmentControl? = nil, segmentLength: Int? = nil, segmentLengthControl: CmafSegmentLengthControl? = nil, streamInfResolution: CmafStreamInfResolution? = nil, targetDurationCompatibilityMode: CmafTargetDurationCompatibilityMode? = nil, videoCompositionOffsets: CmafVideoCompositionOffsets? = nil, writeDashManifest: CmafWriteDASHManifest? = nil, writeHlsManifest: CmafWriteHLSManifest? = nil, writeSegmentTimelineInRepresentation: CmafWriteSegmentTimelineInRepresentation? = nil) { self.additionalManifests = additionalManifests self.baseUrl = baseUrl self.clientCache = clientCache self.codecSpecification = codecSpecification + self.dashIFrameTrickPlayNameModifier = dashIFrameTrickPlayNameModifier self.dashManifestStyle = dashManifestStyle self.destination = destination self.destinationSettings = destinationSettings @@ -4589,6 +4592,8 @@ extension MediaConvert { try self.additionalManifests?.forEach { try $0.validate(name: "\(name).additionalManifests[]") } + try self.validate(self.dashIFrameTrickPlayNameModifier, name: "dashIFrameTrickPlayNameModifier", parent: name, max: 256) + try self.validate(self.dashIFrameTrickPlayNameModifier, name: "dashIFrameTrickPlayNameModifier", parent: name, min: 1) try self.validate(self.destination, name: "destination", parent: name, pattern: "^s3:\\/\\/") try self.destinationSettings?.validate(name: "\(name).destinationSettings") try self.encryption?.validate(name: "\(name).encryption") @@ -4606,6 +4611,7 @@ extension MediaConvert { case baseUrl = "baseUrl" case clientCache = "clientCache" case codecSpecification = "codecSpecification" + case dashIFrameTrickPlayNameModifier = "dashIFrameTrickPlayNameModifier" case dashManifestStyle = "dashManifestStyle" case destination = "destination" case destinationSettings = "destinationSettings" @@ -5209,6 +5215,8 @@ extension MediaConvert { public let audioChannelConfigSchemeIdUri: DashIsoGroupAudioChannelConfigSchemeIdUri? /// A partial URI prefix that will be put in the manifest (.mpd) file at the top level BaseURL element. Can be used if streams are delivered from a different URL than the manifest file. public let baseUrl: String? + /// Specify whether MediaConvert generates I-frame only video segments for DASH trick play, also known as trick mode. When specified, the I-frame only video segments are included within an additional AdaptationSet in your DASH output manifest. To generate I-frame only video segments: Enter a name as a text string, up to 256 character long. This name is appended to the end of this output group's base filename, that you specify as part of your destination URI, and used for the I-frame only video segment files. You may also include format identifiers. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/using-variables-in-your-job-settings.html#using-settings-variables-with-streaming-outputs To not generate I-frame only video segments: Leave blank. + public let dashIFrameTrickPlayNameModifier: String? /// Specify how MediaConvert writes SegmentTimeline in your output DASH manifest. To write a SegmentTimeline in each video Representation: Keep the default value, Basic. To write a common SegmentTimeline in the video AdaptationSet: Choose Compact. Note that MediaConvert will still write a SegmentTimeline in any Representation that does not share a common timeline. To write a video AdaptationSet for each different output framerate, and a common SegmentTimeline in each AdaptationSet: Choose Distinct. public let dashManifestStyle: DashManifestStyle? /// Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file. @@ -5246,10 +5254,11 @@ extension MediaConvert { /// If you get an HTTP error in the 400 range when you play back your DASH output, enable this setting and run your transcoding job again. When you enable this setting, the service writes precise segment durations in the DASH manifest. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When you don't enable this setting, the service writes approximate segment durations in your DASH manifest. public let writeSegmentTimelineInRepresentation: DashIsoWriteSegmentTimelineInRepresentation? - public init(additionalManifests: [DashAdditionalManifest]? = nil, audioChannelConfigSchemeIdUri: DashIsoGroupAudioChannelConfigSchemeIdUri? = nil, baseUrl: String? = nil, dashManifestStyle: DashManifestStyle? = nil, destination: String? = nil, destinationSettings: DestinationSettings? = nil, encryption: DashIsoEncryptionSettings? = nil, fragmentLength: Int? = nil, hbbtvCompliance: DashIsoHbbtvCompliance? = nil, imageBasedTrickPlay: DashIsoImageBasedTrickPlay? = nil, imageBasedTrickPlaySettings: DashIsoImageBasedTrickPlaySettings? = nil, minBufferTime: Int? = nil, minFinalSegmentLength: Double? = nil, mpdManifestBandwidthType: DashIsoMpdManifestBandwidthType? = nil, mpdProfile: DashIsoMpdProfile? = nil, ptsOffsetHandlingForBFrames: DashIsoPtsOffsetHandlingForBFrames? = nil, segmentControl: DashIsoSegmentControl? = nil, segmentLength: Int? = nil, segmentLengthControl: DashIsoSegmentLengthControl? = nil, videoCompositionOffsets: DashIsoVideoCompositionOffsets? = nil, writeSegmentTimelineInRepresentation: DashIsoWriteSegmentTimelineInRepresentation? = nil) { + public init(additionalManifests: [DashAdditionalManifest]? = nil, audioChannelConfigSchemeIdUri: DashIsoGroupAudioChannelConfigSchemeIdUri? = nil, baseUrl: String? = nil, dashIFrameTrickPlayNameModifier: String? = nil, dashManifestStyle: DashManifestStyle? = nil, destination: String? = nil, destinationSettings: DestinationSettings? = nil, encryption: DashIsoEncryptionSettings? = nil, fragmentLength: Int? = nil, hbbtvCompliance: DashIsoHbbtvCompliance? = nil, imageBasedTrickPlay: DashIsoImageBasedTrickPlay? = nil, imageBasedTrickPlaySettings: DashIsoImageBasedTrickPlaySettings? = nil, minBufferTime: Int? = nil, minFinalSegmentLength: Double? = nil, mpdManifestBandwidthType: DashIsoMpdManifestBandwidthType? = nil, mpdProfile: DashIsoMpdProfile? = nil, ptsOffsetHandlingForBFrames: DashIsoPtsOffsetHandlingForBFrames? = nil, segmentControl: DashIsoSegmentControl? = nil, segmentLength: Int? = nil, segmentLengthControl: DashIsoSegmentLengthControl? = nil, videoCompositionOffsets: DashIsoVideoCompositionOffsets? = nil, writeSegmentTimelineInRepresentation: DashIsoWriteSegmentTimelineInRepresentation? = nil) { self.additionalManifests = additionalManifests self.audioChannelConfigSchemeIdUri = audioChannelConfigSchemeIdUri self.baseUrl = baseUrl + self.dashIFrameTrickPlayNameModifier = dashIFrameTrickPlayNameModifier self.dashManifestStyle = dashManifestStyle self.destination = destination self.destinationSettings = destinationSettings @@ -5274,6 +5283,8 @@ extension MediaConvert { try self.additionalManifests?.forEach { try $0.validate(name: "\(name).additionalManifests[]") } + try self.validate(self.dashIFrameTrickPlayNameModifier, name: "dashIFrameTrickPlayNameModifier", parent: name, max: 256) + try self.validate(self.dashIFrameTrickPlayNameModifier, name: "dashIFrameTrickPlayNameModifier", parent: name, min: 1) try self.validate(self.destination, name: "destination", parent: name, pattern: "^s3:\\/\\/") try self.destinationSettings?.validate(name: "\(name).destinationSettings") try self.encryption?.validate(name: "\(name).encryption") @@ -5290,6 +5301,7 @@ extension MediaConvert { case additionalManifests = "additionalManifests" case audioChannelConfigSchemeIdUri = "audioChannelConfigSchemeIdUri" case baseUrl = "baseUrl" + case dashIFrameTrickPlayNameModifier = "dashIFrameTrickPlayNameModifier" case dashManifestStyle = "dashManifestStyle" case destination = "destination" case destinationSettings = "destinationSettings" @@ -7701,8 +7713,8 @@ extension MediaConvert { } public func validate(name: String) throws { - try self.validate(self.endTimecode, name: "endTimecode", parent: name, pattern: "^([01][0-9]|2[0-4]):[0-5][0-9]:[0-5][0-9][:;][0-9]{2}$") - try self.validate(self.startTimecode, name: "startTimecode", parent: name, pattern: "^([01][0-9]|2[0-4]):[0-5][0-9]:[0-5][0-9][:;][0-9]{2}$") + try self.validate(self.endTimecode, name: "endTimecode", parent: name, pattern: "^([01][0-9]|2[0-4]):[0-5][0-9]:[0-5][0-9][:;][0-9]{2}(@[0-9]+(\\.[0-9]+)?(:[0-9]+)?)?$") + try self.validate(self.startTimecode, name: "startTimecode", parent: name, pattern: "^([01][0-9]|2[0-4]):[0-5][0-9]:[0-5][0-9][:;][0-9]{2}(@[0-9]+(\\.[0-9]+)?(:[0-9]+)?)?$") } private enum CodingKeys: String, CodingKey { @@ -7875,20 +7887,46 @@ extension MediaConvert { } public struct InputVideoGenerator: AWSEncodableShape & AWSDecodableShape { - /// Specify an integer value for Black video duration from 50 to 86400000 to generate a black video input for that many milliseconds. Required when you include Video generator. + /// Specify the number of audio channels to include in your video generator input. MediaConvert creates these audio channels as silent audio within a single audio track. Enter an integer from 1 to 32. + public let channels: Int? + /// Specify the duration, in milliseconds, for your video generator input. + /// Enter an integer from 50 to 86400000. public let duration: Int? + /// Specify the denominator of the fraction that represents the frame rate for your video generator input. When you do, you must also specify a value for Frame rate numerator. MediaConvert uses a default frame rate of 29.97 when you leave Frame rate numerator and Frame rate denominator blank. + public let framerateDenominator: Int? + /// Specify the numerator of the fraction that represents the frame rate for your video generator input. When you do, you must also specify a value for Frame rate denominator. MediaConvert uses a default frame rate of 29.97 when you leave Frame rate numerator and Frame rate denominator blank. + public let framerateNumerator: Int? + /// Specify the audio sample rate, in Hz, for the silent audio in your video generator input. + /// Enter an integer from 32000 to 48000. + public let sampleRate: Int? - public init(duration: Int? = nil) { + public init(channels: Int? = nil, duration: Int? = nil, framerateDenominator: Int? = nil, framerateNumerator: Int? = nil, sampleRate: Int? = nil) { + self.channels = channels self.duration = duration + self.framerateDenominator = framerateDenominator + self.framerateNumerator = framerateNumerator + self.sampleRate = sampleRate } public func validate(name: String) throws { + try self.validate(self.channels, name: "channels", parent: name, max: 32) + try self.validate(self.channels, name: "channels", parent: name, min: 1) try self.validate(self.duration, name: "duration", parent: name, max: 86400000) try self.validate(self.duration, name: "duration", parent: name, min: 50) + try self.validate(self.framerateDenominator, name: "framerateDenominator", parent: name, max: 1001) + try self.validate(self.framerateDenominator, name: "framerateDenominator", parent: name, min: 1) + try self.validate(self.framerateNumerator, name: "framerateNumerator", parent: name, max: 60000) + try self.validate(self.framerateNumerator, name: "framerateNumerator", parent: name, min: 1) + try self.validate(self.sampleRate, name: "sampleRate", parent: name, max: 48000) + try self.validate(self.sampleRate, name: "sampleRate", parent: name, min: 32000) } private enum CodingKeys: String, CodingKey { + case channels = "channels" case duration = "duration" + case framerateDenominator = "framerateDenominator" + case framerateNumerator = "framerateNumerator" + case sampleRate = "sampleRate" } } @@ -10002,6 +10040,7 @@ extension MediaConvert { try $0.validate(name: "\(name).captionDescriptions[]") } try self.containerSettings?.validate(name: "\(name).containerSettings") + try self.validate(self.nameModifier, name: "nameModifier", parent: name, max: 256) try self.validate(self.nameModifier, name: "nameModifier", parent: name, min: 1) try self.videoDescription?.validate(name: "\(name).videoDescription") } @@ -10682,6 +10721,65 @@ extension MediaConvert { } } + public struct SearchJobsRequest: AWSEncodableShape { + /// Optional. Provide your input file URL or your partial input file name. The maximum length for an input file is 300 characters. + public let inputFile: String? + /// Optional. Number of jobs, up to twenty, that will be returned at one time. + public let maxResults: Int? + /// Optional. Use this string, provided with the response to a previous request, to request the next batch of jobs. + public let nextToken: String? + /// Optional. When you request lists of resources, you can specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource. + public let order: Order? + /// Optional. Provide a queue name, or a queue ARN, to return only jobs from that queue. + public let queue: String? + /// Optional. A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR. + public let status: JobStatus? + + public init(inputFile: String? = nil, maxResults: Int? = nil, nextToken: String? = nil, order: Order? = nil, queue: String? = nil, status: JobStatus? = nil) { + self.inputFile = inputFile + self.maxResults = maxResults + self.nextToken = nextToken + self.order = order + self.queue = queue + self.status = status + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.inputFile, key: "inputFile") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.order, key: "order") + request.encodeQuery(self.queue, key: "queue") + request.encodeQuery(self.status, key: "status") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 20) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct SearchJobsResponse: AWSDecodableShape { + /// List of jobs. + public let jobs: [Job]? + /// Use this string to request the next batch of jobs. + public let nextToken: String? + + public init(jobs: [Job]? = nil, nextToken: String? = nil) { + self.jobs = jobs + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case jobs = "jobs" + case nextToken = "nextToken" + } + } + public struct SpekeKeyProvider: AWSEncodableShape & AWSDecodableShape { /// If you want your key provider to encrypt the content keys that it provides to MediaConvert, set up a certificate with a master key using AWS Certificate Manager. Specify the certificate's Amazon Resource Name (ARN) here. public let certificateArn: String? diff --git a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift index 0346573574..6af5548675 100644 --- a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift +++ b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift @@ -1889,6 +1889,12 @@ extension MediaLive { public var description: String { return self.rawValue } } + public enum Scte35SegmentationScope: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case allOutputGroups = "ALL_OUTPUT_GROUPS" + case scte35EnabledOutputGroups = "SCTE35_ENABLED_OUTPUT_GROUPS" + public var description: String { return self.rawValue } + } + public enum Scte35SpliceInsertNoRegionalBlackoutBehavior: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case follow = "FOLLOW" case ignore = "IGNORE" @@ -2811,9 +2817,14 @@ extension MediaLive { public struct AvailConfiguration: AWSEncodableShape & AWSDecodableShape { /// Controls how SCTE-35 messages create cues. Splice Insert mode treats all segmentation signals traditionally. With Time Signal APOS mode only Time Signal Placement Opportunity and Break messages create segment breaks. With ESAM mode, signals are forwarded to an ESAM server for possible update. public let availSettings: AvailSettings? + /// Configures whether SCTE 35 passthrough triggers segment breaks in all output groups that use segmented outputs. Insertion of a SCTE 35 message typically results in a segment break, in addition to the regular cadence of breaks. The segment breaks appear in video outputs, audio outputs, and captions outputs (if any). + /// ALL_OUTPUT_GROUPS: Default. Insert the segment break in in all output groups that have segmented outputs. This is the legacy behavior. + /// SCTE35_ENABLED_OUTPUT_GROUPS: Insert the segment break only in output groups that have SCTE 35 passthrough enabled. This is the recommended value, because it reduces unnecessary segment breaks. + public let scte35SegmentationScope: Scte35SegmentationScope? - public init(availSettings: AvailSettings? = nil) { + public init(availSettings: AvailSettings? = nil, scte35SegmentationScope: Scte35SegmentationScope? = nil) { self.availSettings = availSettings + self.scte35SegmentationScope = scte35SegmentationScope } public func validate(name: String) throws { @@ -2822,6 +2833,7 @@ extension MediaLive { private enum CodingKeys: String, CodingKey { case availSettings = "availSettings" + case scte35SegmentationScope = "scte35SegmentationScope" } } diff --git a/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift b/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift index b58a27a6d6..c8be3de93b 100644 --- a/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift +++ b/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift @@ -85,6 +85,20 @@ extension MediaPackageV2 { public var description: String { return self.rawValue } } + public enum EndpointErrorCondition: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case incompleteManifest = "INCOMPLETE_MANIFEST" + case missingDrmKey = "MISSING_DRM_KEY" + case slateInput = "SLATE_INPUT" + case staleManifest = "STALE_MANIFEST" + public var description: String { return self.rawValue } + } + + public enum InputType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cmaf = "CMAF" + case hls = "HLS" + public var description: String { return self.rawValue } + } + public enum PresetSpeke20Audio: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case presetAudio1 = "PRESET_AUDIO_1" case presetAudio2 = "PRESET_AUDIO_2" @@ -169,15 +183,18 @@ extension MediaPackageV2 { public let createdAt: Date /// Any descriptive information that you want to add to the channel for future identification purposes. public let description: String? + /// The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior. The allowed values are: HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments). CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests). + public let inputType: InputType? /// The date and time the channel was modified. public let modifiedAt: Date - public init(arn: String, channelGroupName: String, channelName: String, createdAt: Date, description: String? = nil, modifiedAt: Date) { + public init(arn: String, channelGroupName: String, channelName: String, createdAt: Date, description: String? = nil, inputType: InputType? = nil, modifiedAt: Date) { self.arn = arn self.channelGroupName = channelGroupName self.channelName = channelName self.createdAt = createdAt self.description = description + self.inputType = inputType self.modifiedAt = modifiedAt } @@ -187,6 +204,7 @@ extension MediaPackageV2 { case channelName = "ChannelName" case createdAt = "CreatedAt" case description = "Description" + case inputType = "InputType" case modifiedAt = "ModifiedAt" } } @@ -284,14 +302,17 @@ extension MediaPackageV2 { public let clientToken: String? /// Enter any descriptive text that helps you to identify the channel. public let description: String? + /// The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior. The allowed values are: HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments). CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests). + public let inputType: InputType? /// A comma-separated list of tag key:value pairs that you define. For example: "Key1": "Value1", "Key2": "Value2" public let tags: [String: String]? - public init(channelGroupName: String, channelName: String, clientToken: String? = CreateChannelRequest.idempotencyToken(), description: String? = nil, tags: [String: String]? = nil) { + public init(channelGroupName: String, channelName: String, clientToken: String? = CreateChannelRequest.idempotencyToken(), description: String? = nil, inputType: InputType? = nil, tags: [String: String]? = nil) { self.channelGroupName = channelGroupName self.channelName = channelName self.clientToken = clientToken self.description = description + self.inputType = inputType self.tags = tags } @@ -302,6 +323,7 @@ extension MediaPackageV2 { try container.encode(self.channelName, forKey: .channelName) request.encodeHeader(self.clientToken, key: "x-amzn-client-token") try container.encodeIfPresent(self.description, forKey: .description) + try container.encodeIfPresent(self.inputType, forKey: .inputType) try container.encodeIfPresent(self.tags, forKey: .tags) } @@ -321,6 +343,7 @@ extension MediaPackageV2 { private enum CodingKeys: String, CodingKey { case channelName = "ChannelName" case description = "Description" + case inputType = "InputType" case tags = "tags" } } @@ -339,12 +362,14 @@ extension MediaPackageV2 { /// The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource. public let eTag: String? public let ingestEndpoints: [IngestEndpoint]? + /// The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior. The allowed values are: HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments). CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests). + public let inputType: InputType? /// The date and time the channel was modified. public let modifiedAt: Date /// The comma-separated list of tag key:value pairs assigned to the channel. public let tags: [String: String]? - public init(arn: String, channelGroupName: String, channelName: String, createdAt: Date, description: String? = nil, eTag: String? = nil, ingestEndpoints: [IngestEndpoint]? = nil, modifiedAt: Date, tags: [String: String]? = nil) { + public init(arn: String, channelGroupName: String, channelName: String, createdAt: Date, description: String? = nil, eTag: String? = nil, ingestEndpoints: [IngestEndpoint]? = nil, inputType: InputType? = nil, modifiedAt: Date, tags: [String: String]? = nil) { self.arn = arn self.channelGroupName = channelGroupName self.channelName = channelName @@ -352,6 +377,7 @@ extension MediaPackageV2 { self.description = description self.eTag = eTag self.ingestEndpoints = ingestEndpoints + self.inputType = inputType self.modifiedAt = modifiedAt self.tags = tags } @@ -364,6 +390,7 @@ extension MediaPackageV2 { case description = "Description" case eTag = "ETag" case ingestEndpoints = "IngestEndpoints" + case inputType = "InputType" case modifiedAt = "ModifiedAt" case tags = "Tags" } @@ -521,6 +548,8 @@ extension MediaPackageV2 { public let dashManifests: [CreateDashManifestConfiguration]? /// Enter any descriptive text that helps you to identify the origin endpoint. public let description: String? + /// The failover settings for the endpoint. + public let forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? /// An HTTP live streaming (HLS) manifest configuration. public let hlsManifests: [CreateHlsManifestConfiguration]? /// A low-latency HLS manifest configuration. @@ -534,13 +563,14 @@ extension MediaPackageV2 { /// A comma-separated list of tag key:value pairs that you define. For example: "Key1": "Value1", "Key2": "Value2" public let tags: [String: String]? - public init(channelGroupName: String, channelName: String, clientToken: String? = CreateOriginEndpointRequest.idempotencyToken(), containerType: ContainerType, dashManifests: [CreateDashManifestConfiguration]? = nil, description: String? = nil, hlsManifests: [CreateHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [CreateLowLatencyHlsManifestConfiguration]? = nil, originEndpointName: String, segment: Segment? = nil, startoverWindowSeconds: Int? = nil, tags: [String: String]? = nil) { + public init(channelGroupName: String, channelName: String, clientToken: String? = CreateOriginEndpointRequest.idempotencyToken(), containerType: ContainerType, dashManifests: [CreateDashManifestConfiguration]? = nil, description: String? = nil, forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? = nil, hlsManifests: [CreateHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [CreateLowLatencyHlsManifestConfiguration]? = nil, originEndpointName: String, segment: Segment? = nil, startoverWindowSeconds: Int? = nil, tags: [String: String]? = nil) { self.channelGroupName = channelGroupName self.channelName = channelName self.clientToken = clientToken self.containerType = containerType self.dashManifests = dashManifests self.description = description + self.forceEndpointErrorConfiguration = forceEndpointErrorConfiguration self.hlsManifests = hlsManifests self.lowLatencyHlsManifests = lowLatencyHlsManifests self.originEndpointName = originEndpointName @@ -558,6 +588,7 @@ extension MediaPackageV2 { try container.encode(self.containerType, forKey: .containerType) try container.encodeIfPresent(self.dashManifests, forKey: .dashManifests) try container.encodeIfPresent(self.description, forKey: .description) + try container.encodeIfPresent(self.forceEndpointErrorConfiguration, forKey: .forceEndpointErrorConfiguration) try container.encodeIfPresent(self.hlsManifests, forKey: .hlsManifests) try container.encodeIfPresent(self.lowLatencyHlsManifests, forKey: .lowLatencyHlsManifests) try container.encode(self.originEndpointName, forKey: .originEndpointName) @@ -596,6 +627,7 @@ extension MediaPackageV2 { case containerType = "ContainerType" case dashManifests = "DashManifests" case description = "Description" + case forceEndpointErrorConfiguration = "ForceEndpointErrorConfiguration" case hlsManifests = "HlsManifests" case lowLatencyHlsManifests = "LowLatencyHlsManifests" case originEndpointName = "OriginEndpointName" @@ -622,6 +654,8 @@ extension MediaPackageV2 { public let description: String? /// The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource. public let eTag: String? + /// The failover settings for the endpoint. + public let forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? /// An HTTP live streaming (HLS) manifest configuration. public let hlsManifests: [GetHlsManifestConfiguration]? /// A low-latency HLS manifest configuration. @@ -637,7 +671,7 @@ extension MediaPackageV2 { /// The comma-separated list of tag key:value pairs assigned to the origin endpoint. public let tags: [String: String]? - public init(arn: String, channelGroupName: String, channelName: String, containerType: ContainerType, createdAt: Date, dashManifests: [GetDashManifestConfiguration]? = nil, description: String? = nil, eTag: String? = nil, hlsManifests: [GetHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [GetLowLatencyHlsManifestConfiguration]? = nil, modifiedAt: Date, originEndpointName: String, segment: Segment, startoverWindowSeconds: Int? = nil, tags: [String: String]? = nil) { + public init(arn: String, channelGroupName: String, channelName: String, containerType: ContainerType, createdAt: Date, dashManifests: [GetDashManifestConfiguration]? = nil, description: String? = nil, eTag: String? = nil, forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? = nil, hlsManifests: [GetHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [GetLowLatencyHlsManifestConfiguration]? = nil, modifiedAt: Date, originEndpointName: String, segment: Segment, startoverWindowSeconds: Int? = nil, tags: [String: String]? = nil) { self.arn = arn self.channelGroupName = channelGroupName self.channelName = channelName @@ -646,6 +680,7 @@ extension MediaPackageV2 { self.dashManifests = dashManifests self.description = description self.eTag = eTag + self.forceEndpointErrorConfiguration = forceEndpointErrorConfiguration self.hlsManifests = hlsManifests self.lowLatencyHlsManifests = lowLatencyHlsManifests self.modifiedAt = modifiedAt @@ -664,6 +699,7 @@ extension MediaPackageV2 { case dashManifests = "DashManifests" case description = "Description" case eTag = "ETag" + case forceEndpointErrorConfiguration = "ForceEndpointErrorConfiguration" case hlsManifests = "HlsManifests" case lowLatencyHlsManifests = "LowLatencyHlsManifests" case modifiedAt = "ModifiedAt" @@ -952,6 +988,19 @@ extension MediaPackageV2 { } } + public struct ForceEndpointErrorConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The failover conditions for the endpoint. The options are: STALE_MANIFEST - The manifest stalled and there are no new segments or parts. INCOMPLETE_MANIFEST - There is a gap in the manifest. MISSING_DRM_KEY - Key rotation is enabled but we're unable to fetch the key for the current key period. SLATE_INPUT - The segments which contain slate content are considered to be missing content. + public let endpointErrorConditions: [EndpointErrorCondition]? + + public init(endpointErrorConditions: [EndpointErrorCondition]? = nil) { + self.endpointErrorConditions = endpointErrorConditions + } + + private enum CodingKeys: String, CodingKey { + case endpointErrorConditions = "EndpointErrorConditions" + } + } + public struct GetChannelGroupRequest: AWSEncodableShape { /// The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region. public let channelGroupName: String @@ -1111,12 +1160,14 @@ extension MediaPackageV2 { /// The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource. public let eTag: String? public let ingestEndpoints: [IngestEndpoint]? + /// The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior. The allowed values are: HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments). CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests). + public let inputType: InputType? /// The date and time the channel was modified. public let modifiedAt: Date /// The comma-separated list of tag key:value pairs assigned to the channel. public let tags: [String: String]? - public init(arn: String, channelGroupName: String, channelName: String, createdAt: Date, description: String? = nil, eTag: String? = nil, ingestEndpoints: [IngestEndpoint]? = nil, modifiedAt: Date, tags: [String: String]? = nil) { + public init(arn: String, channelGroupName: String, channelName: String, createdAt: Date, description: String? = nil, eTag: String? = nil, ingestEndpoints: [IngestEndpoint]? = nil, inputType: InputType? = nil, modifiedAt: Date, tags: [String: String]? = nil) { self.arn = arn self.channelGroupName = channelGroupName self.channelName = channelName @@ -1124,6 +1175,7 @@ extension MediaPackageV2 { self.description = description self.eTag = eTag self.ingestEndpoints = ingestEndpoints + self.inputType = inputType self.modifiedAt = modifiedAt self.tags = tags } @@ -1136,6 +1188,7 @@ extension MediaPackageV2 { case description = "Description" case eTag = "ETag" case ingestEndpoints = "IngestEndpoints" + case inputType = "InputType" case modifiedAt = "ModifiedAt" case tags = "Tags" } @@ -1383,6 +1436,8 @@ extension MediaPackageV2 { public let description: String? /// The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource. public let eTag: String? + /// The failover settings for the endpoint. + public let forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? /// An HTTP live streaming (HLS) manifest configuration. public let hlsManifests: [GetHlsManifestConfiguration]? /// A low-latency HLS manifest configuration. @@ -1397,7 +1452,7 @@ extension MediaPackageV2 { /// The comma-separated list of tag key:value pairs assigned to the origin endpoint. public let tags: [String: String]? - public init(arn: String, channelGroupName: String, channelName: String, containerType: ContainerType, createdAt: Date, dashManifests: [GetDashManifestConfiguration]? = nil, description: String? = nil, eTag: String? = nil, hlsManifests: [GetHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [GetLowLatencyHlsManifestConfiguration]? = nil, modifiedAt: Date, originEndpointName: String, segment: Segment, startoverWindowSeconds: Int? = nil, tags: [String: String]? = nil) { + public init(arn: String, channelGroupName: String, channelName: String, containerType: ContainerType, createdAt: Date, dashManifests: [GetDashManifestConfiguration]? = nil, description: String? = nil, eTag: String? = nil, forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? = nil, hlsManifests: [GetHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [GetLowLatencyHlsManifestConfiguration]? = nil, modifiedAt: Date, originEndpointName: String, segment: Segment, startoverWindowSeconds: Int? = nil, tags: [String: String]? = nil) { self.arn = arn self.channelGroupName = channelGroupName self.channelName = channelName @@ -1406,6 +1461,7 @@ extension MediaPackageV2 { self.dashManifests = dashManifests self.description = description self.eTag = eTag + self.forceEndpointErrorConfiguration = forceEndpointErrorConfiguration self.hlsManifests = hlsManifests self.lowLatencyHlsManifests = lowLatencyHlsManifests self.modifiedAt = modifiedAt @@ -1424,6 +1480,7 @@ extension MediaPackageV2 { case dashManifests = "DashManifests" case description = "Description" case eTag = "ETag" + case forceEndpointErrorConfiguration = "ForceEndpointErrorConfiguration" case hlsManifests = "HlsManifests" case lowLatencyHlsManifests = "LowLatencyHlsManifests" case modifiedAt = "ModifiedAt" @@ -1705,6 +1762,8 @@ extension MediaPackageV2 { public let dashManifests: [ListDashManifestConfiguration]? /// Any descriptive information that you want to add to the origin endpoint for future identification purposes. public let description: String? + /// The failover settings for the endpoint. + public let forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? /// An HTTP live streaming (HLS) manifest configuration. public let hlsManifests: [ListHlsManifestConfiguration]? /// A low-latency HLS manifest configuration. @@ -1714,7 +1773,7 @@ extension MediaPackageV2 { /// The name that describes the origin endpoint. The name is the primary identifier for the origin endpoint, and and must be unique for your account in the AWS Region and channel. public let originEndpointName: String - public init(arn: String, channelGroupName: String, channelName: String, containerType: ContainerType, createdAt: Date? = nil, dashManifests: [ListDashManifestConfiguration]? = nil, description: String? = nil, hlsManifests: [ListHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [ListLowLatencyHlsManifestConfiguration]? = nil, modifiedAt: Date? = nil, originEndpointName: String) { + public init(arn: String, channelGroupName: String, channelName: String, containerType: ContainerType, createdAt: Date? = nil, dashManifests: [ListDashManifestConfiguration]? = nil, description: String? = nil, forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? = nil, hlsManifests: [ListHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [ListLowLatencyHlsManifestConfiguration]? = nil, modifiedAt: Date? = nil, originEndpointName: String) { self.arn = arn self.channelGroupName = channelGroupName self.channelName = channelName @@ -1722,6 +1781,7 @@ extension MediaPackageV2 { self.createdAt = createdAt self.dashManifests = dashManifests self.description = description + self.forceEndpointErrorConfiguration = forceEndpointErrorConfiguration self.hlsManifests = hlsManifests self.lowLatencyHlsManifests = lowLatencyHlsManifests self.modifiedAt = modifiedAt @@ -1736,6 +1796,7 @@ extension MediaPackageV2 { case createdAt = "CreatedAt" case dashManifests = "DashManifests" case description = "Description" + case forceEndpointErrorConfiguration = "ForceEndpointErrorConfiguration" case hlsManifests = "HlsManifests" case lowLatencyHlsManifests = "LowLatencyHlsManifests" case modifiedAt = "ModifiedAt" @@ -2124,12 +2185,14 @@ extension MediaPackageV2 { /// The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource. public let eTag: String? public let ingestEndpoints: [IngestEndpoint]? + /// The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior. The allowed values are: HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments). CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests). + public let inputType: InputType? /// The date and time the channel was modified. public let modifiedAt: Date /// The comma-separated list of tag key:value pairs assigned to the channel. public let tags: [String: String]? - public init(arn: String, channelGroupName: String, channelName: String, createdAt: Date, description: String? = nil, eTag: String? = nil, ingestEndpoints: [IngestEndpoint]? = nil, modifiedAt: Date, tags: [String: String]? = nil) { + public init(arn: String, channelGroupName: String, channelName: String, createdAt: Date, description: String? = nil, eTag: String? = nil, ingestEndpoints: [IngestEndpoint]? = nil, inputType: InputType? = nil, modifiedAt: Date, tags: [String: String]? = nil) { self.arn = arn self.channelGroupName = channelGroupName self.channelName = channelName @@ -2137,6 +2200,7 @@ extension MediaPackageV2 { self.description = description self.eTag = eTag self.ingestEndpoints = ingestEndpoints + self.inputType = inputType self.modifiedAt = modifiedAt self.tags = tags } @@ -2149,6 +2213,7 @@ extension MediaPackageV2 { case description = "Description" case eTag = "ETag" case ingestEndpoints = "IngestEndpoints" + case inputType = "InputType" case modifiedAt = "ModifiedAt" case tags = "tags" } @@ -2167,6 +2232,8 @@ extension MediaPackageV2 { public let description: String? /// The expected current Entity Tag (ETag) for the resource. If the specified ETag does not match the resource's current entity tag, the update request will be rejected. public let eTag: String? + /// The failover settings for the endpoint. + public let forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? /// An HTTP live streaming (HLS) manifest configuration. public let hlsManifests: [CreateHlsManifestConfiguration]? /// A low-latency HLS manifest configuration. @@ -2178,13 +2245,14 @@ extension MediaPackageV2 { /// The size of the window (in seconds) to create a window of the live stream that's available for on-demand viewing. Viewers can start-over or catch-up on content that falls within the window. The maximum startover window is 1,209,600 seconds (14 days). public let startoverWindowSeconds: Int? - public init(channelGroupName: String, channelName: String, containerType: ContainerType, dashManifests: [CreateDashManifestConfiguration]? = nil, description: String? = nil, eTag: String? = nil, hlsManifests: [CreateHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [CreateLowLatencyHlsManifestConfiguration]? = nil, originEndpointName: String, segment: Segment? = nil, startoverWindowSeconds: Int? = nil) { + public init(channelGroupName: String, channelName: String, containerType: ContainerType, dashManifests: [CreateDashManifestConfiguration]? = nil, description: String? = nil, eTag: String? = nil, forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? = nil, hlsManifests: [CreateHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [CreateLowLatencyHlsManifestConfiguration]? = nil, originEndpointName: String, segment: Segment? = nil, startoverWindowSeconds: Int? = nil) { self.channelGroupName = channelGroupName self.channelName = channelName self.containerType = containerType self.dashManifests = dashManifests self.description = description self.eTag = eTag + self.forceEndpointErrorConfiguration = forceEndpointErrorConfiguration self.hlsManifests = hlsManifests self.lowLatencyHlsManifests = lowLatencyHlsManifests self.originEndpointName = originEndpointName @@ -2201,6 +2269,7 @@ extension MediaPackageV2 { try container.encodeIfPresent(self.dashManifests, forKey: .dashManifests) try container.encodeIfPresent(self.description, forKey: .description) request.encodeHeader(self.eTag, key: "x-amzn-update-if-match") + try container.encodeIfPresent(self.forceEndpointErrorConfiguration, forKey: .forceEndpointErrorConfiguration) try container.encodeIfPresent(self.hlsManifests, forKey: .hlsManifests) try container.encodeIfPresent(self.lowLatencyHlsManifests, forKey: .lowLatencyHlsManifests) request.encodePath(self.originEndpointName, key: "OriginEndpointName") @@ -2238,6 +2307,7 @@ extension MediaPackageV2 { case containerType = "ContainerType" case dashManifests = "DashManifests" case description = "Description" + case forceEndpointErrorConfiguration = "ForceEndpointErrorConfiguration" case hlsManifests = "HlsManifests" case lowLatencyHlsManifests = "LowLatencyHlsManifests" case segment = "Segment" @@ -2262,6 +2332,8 @@ extension MediaPackageV2 { public let description: String? /// The current Entity Tag (ETag) associated with this resource. The entity tag can be used to safely make concurrent updates to the resource. public let eTag: String? + /// The failover settings for the endpoint. + public let forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? /// An HTTP live streaming (HLS) manifest configuration. public let hlsManifests: [GetHlsManifestConfiguration]? /// A low-latency HLS manifest configuration. @@ -2277,7 +2349,7 @@ extension MediaPackageV2 { /// The comma-separated list of tag key:value pairs assigned to the origin endpoint. public let tags: [String: String]? - public init(arn: String, channelGroupName: String, channelName: String, containerType: ContainerType, createdAt: Date, dashManifests: [GetDashManifestConfiguration]? = nil, description: String? = nil, eTag: String? = nil, hlsManifests: [GetHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [GetLowLatencyHlsManifestConfiguration]? = nil, modifiedAt: Date, originEndpointName: String, segment: Segment, startoverWindowSeconds: Int? = nil, tags: [String: String]? = nil) { + public init(arn: String, channelGroupName: String, channelName: String, containerType: ContainerType, createdAt: Date, dashManifests: [GetDashManifestConfiguration]? = nil, description: String? = nil, eTag: String? = nil, forceEndpointErrorConfiguration: ForceEndpointErrorConfiguration? = nil, hlsManifests: [GetHlsManifestConfiguration]? = nil, lowLatencyHlsManifests: [GetLowLatencyHlsManifestConfiguration]? = nil, modifiedAt: Date, originEndpointName: String, segment: Segment, startoverWindowSeconds: Int? = nil, tags: [String: String]? = nil) { self.arn = arn self.channelGroupName = channelGroupName self.channelName = channelName @@ -2286,6 +2358,7 @@ extension MediaPackageV2 { self.dashManifests = dashManifests self.description = description self.eTag = eTag + self.forceEndpointErrorConfiguration = forceEndpointErrorConfiguration self.hlsManifests = hlsManifests self.lowLatencyHlsManifests = lowLatencyHlsManifests self.modifiedAt = modifiedAt @@ -2304,6 +2377,7 @@ extension MediaPackageV2 { case dashManifests = "DashManifests" case description = "Description" case eTag = "ETag" + case forceEndpointErrorConfiguration = "ForceEndpointErrorConfiguration" case hlsManifests = "HlsManifests" case lowLatencyHlsManifests = "LowLatencyHlsManifests" case modifiedAt = "ModifiedAt" diff --git a/Sources/Soto/Services/MedicalImaging/MedicalImaging_api.swift b/Sources/Soto/Services/MedicalImaging/MedicalImaging_api.swift index 48c89ffc05..9e3799a694 100644 --- a/Sources/Soto/Services/MedicalImaging/MedicalImaging_api.swift +++ b/Sources/Soto/Services/MedicalImaging/MedicalImaging_api.swift @@ -248,7 +248,7 @@ public struct MedicalImaging: AWSService { ) } - /// Search image sets based on defined input attributes. SearchImageSets accepts a single search query parameter and returns a paginated response of all image sets that have the matching criteria. All range queries must be input as (lowerBound, upperBound). SearchImageSets uses the updatedAt field for sorting in decreasing order from latest to oldest. + /// Search image sets based on defined input attributes. SearchImageSets accepts a single search query parameter and returns a paginated response of all image sets that have the matching criteria. All date range queries must be input as (lowerBound, upperBound). By default, SearchImageSets uses the updatedAt field for sorting in descending order from newest to oldest. @Sendable public func searchImageSets(_ input: SearchImageSetsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchImageSetsResponse { return try await self.client.execute( @@ -386,7 +386,7 @@ extension MedicalImaging { ) } - /// Search image sets based on defined input attributes. SearchImageSets accepts a single search query parameter and returns a paginated response of all image sets that have the matching criteria. All range queries must be input as (lowerBound, upperBound). SearchImageSets uses the updatedAt field for sorting in decreasing order from latest to oldest. + /// Search image sets based on defined input attributes. SearchImageSets accepts a single search query parameter and returns a paginated response of all image sets that have the matching criteria. All date range queries must be input as (lowerBound, upperBound). By default, SearchImageSets uses the updatedAt field for sorting in descending order from newest to oldest. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/MedicalImaging/MedicalImaging_shapes.swift b/Sources/Soto/Services/MedicalImaging/MedicalImaging_shapes.swift index 115f5dae06..f81b343651 100644 --- a/Sources/Soto/Services/MedicalImaging/MedicalImaging_shapes.swift +++ b/Sources/Soto/Services/MedicalImaging/MedicalImaging_shapes.swift @@ -1442,6 +1442,8 @@ extension MedicalImaging { public let dataAccessRoleArn: String /// The data store identifier. public let datastoreId: String + /// The account ID of the source S3 bucket owner. + public let inputOwnerAccountId: String? /// The input prefix path for the S3 bucket that contains the DICOM files to be imported. public let inputS3Uri: String /// The import job name. @@ -1449,10 +1451,11 @@ extension MedicalImaging { /// The output prefix of the S3 bucket to upload the results of the DICOM import job. public let outputS3Uri: String - public init(clientToken: String = StartDICOMImportJobRequest.idempotencyToken(), dataAccessRoleArn: String, datastoreId: String, inputS3Uri: String, jobName: String? = nil, outputS3Uri: String) { + public init(clientToken: String = StartDICOMImportJobRequest.idempotencyToken(), dataAccessRoleArn: String, datastoreId: String, inputOwnerAccountId: String? = nil, inputS3Uri: String, jobName: String? = nil, outputS3Uri: String) { self.clientToken = clientToken self.dataAccessRoleArn = dataAccessRoleArn self.datastoreId = datastoreId + self.inputOwnerAccountId = inputOwnerAccountId self.inputS3Uri = inputS3Uri self.jobName = jobName self.outputS3Uri = outputS3Uri @@ -1464,6 +1467,7 @@ extension MedicalImaging { try container.encode(self.clientToken, forKey: .clientToken) try container.encode(self.dataAccessRoleArn, forKey: .dataAccessRoleArn) request.encodePath(self.datastoreId, key: "datastoreId") + try container.encodeIfPresent(self.inputOwnerAccountId, forKey: .inputOwnerAccountId) try container.encode(self.inputS3Uri, forKey: .inputS3Uri) try container.encodeIfPresent(self.jobName, forKey: .jobName) try container.encode(self.outputS3Uri, forKey: .outputS3Uri) @@ -1477,6 +1481,9 @@ extension MedicalImaging { try self.validate(self.dataAccessRoleArn, name: "dataAccessRoleArn", parent: name, min: 20) try self.validate(self.dataAccessRoleArn, name: "dataAccessRoleArn", parent: name, pattern: "^arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+$") try self.validate(self.datastoreId, name: "datastoreId", parent: name, pattern: "^[0-9a-z]{32}$") + try self.validate(self.inputOwnerAccountId, name: "inputOwnerAccountId", parent: name, max: 12) + try self.validate(self.inputOwnerAccountId, name: "inputOwnerAccountId", parent: name, min: 12) + try self.validate(self.inputOwnerAccountId, name: "inputOwnerAccountId", parent: name, pattern: "^\\d+$") try self.validate(self.inputS3Uri, name: "inputS3Uri", parent: name, max: 1024) try self.validate(self.inputS3Uri, name: "inputS3Uri", parent: name, min: 1) try self.validate(self.inputS3Uri, name: "inputS3Uri", parent: name, pattern: "^s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?$") @@ -1491,6 +1498,7 @@ extension MedicalImaging { private enum CodingKeys: String, CodingKey { case clientToken = "clientToken" case dataAccessRoleArn = "dataAccessRoleArn" + case inputOwnerAccountId = "inputOwnerAccountId" case inputS3Uri = "inputS3Uri" case jobName = "jobName" case outputS3Uri = "outputS3Uri" diff --git a/Sources/Soto/Services/Neptune/Neptune_api.swift b/Sources/Soto/Services/Neptune/Neptune_api.swift index 10cef362f3..85a5f04f7a 100644 --- a/Sources/Soto/Services/Neptune/Neptune_api.swift +++ b/Sources/Soto/Services/Neptune/Neptune_api.swift @@ -81,9 +81,9 @@ public struct Neptune: AWSService { "us-east-2": "rds-fips.us-east-2.amazonaws.com", "us-gov-east-1": "rds.us-gov-east-1.amazonaws.com", "us-gov-west-1": "rds.us-gov-west-1.amazonaws.com", - "us-iso-east-1": "rds-fips.us-iso-east-1.c2s.ic.gov", - "us-iso-west-1": "rds-fips.us-iso-west-1.c2s.ic.gov", - "us-isob-east-1": "rds-fips.us-isob-east-1.sc2s.sgov.gov", + "us-iso-east-1": "rds.us-iso-east-1.c2s.ic.gov", + "us-iso-west-1": "rds.us-iso-west-1.c2s.ic.gov", + "us-isob-east-1": "rds.us-isob-east-1.sc2s.sgov.gov", "us-west-1": "rds-fips.us-west-1.amazonaws.com", "us-west-2": "rds-fips.us-west-2.amazonaws.com" ]) diff --git a/Sources/Soto/Services/NetworkManager/NetworkManager_shapes.swift b/Sources/Soto/Services/NetworkManager/NetworkManager_shapes.swift index 504849eca1..30dc6c2d2b 100644 --- a/Sources/Soto/Services/NetworkManager/NetworkManager_shapes.swift +++ b/Sources/Soto/Services/NetworkManager/NetworkManager_shapes.swift @@ -26,6 +26,18 @@ import Foundation extension NetworkManager { // MARK: Enums + public enum AttachmentErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case maximumNoEncapLimitExceeded = "MAXIMUM_NO_ENCAP_LIMIT_EXCEEDED" + case subnetDuplicatedInAvailabilityZone = "SUBNET_DUPLICATED_IN_AVAILABILITY_ZONE" + case subnetNoFreeAddresses = "SUBNET_NO_FREE_ADDRESSES" + case subnetNoIpv6Cidrs = "SUBNET_NO_IPV6_CIDRS" + case subnetNotFound = "SUBNET_NOT_FOUND" + case subnetUnsupportedAvailabilityZone = "SUBNET_UNSUPPORTED_AVAILABILITY_ZONE" + case vpcNotFound = "VPC_NOT_FOUND" + case vpnConnectionNotFound = "VPN_CONNECTION_NOT_FOUND" + public var description: String { return self.rawValue } + } + public enum AttachmentState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case available = "AVAILABLE" case creating = "CREATING" @@ -80,6 +92,7 @@ extension NetworkManager { case coreNetworkConfiguration = "CORE_NETWORK_CONFIGURATION" case coreNetworkEdge = "CORE_NETWORK_EDGE" case coreNetworkSegment = "CORE_NETWORK_SEGMENT" + case networkFunctionGroup = "NETWORK_FUNCTION_GROUP" case segmentActionsConfiguration = "SEGMENT_ACTIONS_CONFIGURATION" case segmentsConfiguration = "SEGMENTS_CONFIGURATION" public var description: String { return self.rawValue } @@ -93,6 +106,16 @@ extension NetworkManager { public var description: String { return self.rawValue } } + public enum ConnectPeerErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case edgeLocationNoFreeIps = "EDGE_LOCATION_NO_FREE_IPS" + case edgeLocationPeerDuplicate = "EDGE_LOCATION_PEER_DUPLICATE" + case invalidInsideCidrBlock = "INVALID_INSIDE_CIDR_BLOCK" + case ipOutsideSubnetCidrRange = "IP_OUTSIDE_SUBNET_CIDR_RANGE" + case noAssociatedCidrBlock = "NO_ASSOCIATED_CIDR_BLOCK" + case subnetNotFound = "SUBNET_NOT_FOUND" + public var description: String { return self.rawValue } + } + public enum ConnectPeerState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case available = "AVAILABLE" case creating = "CREATING" @@ -175,6 +198,16 @@ extension NetworkManager { public var description: String { return self.rawValue } } + public enum PeeringErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case edgeLocationPeerDuplicate = "EDGE_LOCATION_PEER_DUPLICATE" + case internalError = "INTERNAL_ERROR" + case invalidTransitGatewayState = "INVALID_TRANSIT_GATEWAY_STATE" + case missingRequiredPermissions = "MISSING_PERMISSIONS" + case transitGatewayNotFound = "TRANSIT_GATEWAY_NOT_FOUND" + case transitGatewayPeersLimitExceeded = "TRANSIT_GATEWAY_PEERS_LIMIT_EXCEEDED" + public var description: String { return self.rawValue } + } + public enum PeeringState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case available = "AVAILABLE" case creating = "CREATING" @@ -224,6 +257,7 @@ extension NetworkManager { public enum RouteTableType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case coreNetworkSegment = "CORE_NETWORK_SEGMENT" + case networkFunctionGroup = "NETWORK_FUNCTION_GROUP" case transitGatewayRouteTable = "TRANSIT_GATEWAY_ROUTE_TABLE" public var description: String { return self.rawValue } } @@ -234,6 +268,18 @@ extension NetworkManager { public var description: String { return self.rawValue } } + public enum SegmentActionServiceInsertion: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case sendTo = "send-to" + case sendVia = "send-via" + public var description: String { return self.rawValue } + } + + public enum SendViaMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case dualHop = "dual-hop" + case singleHop = "single-hop" + public var description: String { return self.rawValue } + } + public enum SiteState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case available = "AVAILABLE" case deleting = "DELETING" @@ -579,8 +625,14 @@ extension NetworkManager { public let createdAt: Date? /// The Region where the edge is located. public let edgeLocation: String? + /// Describes the error associated with the attachment request. + public let lastModificationErrors: [AttachmentError]? + /// The name of the network function group. + public let networkFunctionGroupName: String? /// The ID of the attachment account owner. public let ownerAccountId: String? + /// Describes a proposed change to a network function group associated with the attachment. + public let proposedNetworkFunctionGroupChange: ProposedNetworkFunctionGroupChange? /// The attachment to move from one segment to another. public let proposedSegmentChange: ProposedSegmentChange? /// The attachment resource ARN. @@ -594,7 +646,7 @@ extension NetworkManager { /// The timestamp when the attachment was last updated. public let updatedAt: Date? - public init(attachmentId: String? = nil, attachmentPolicyRuleNumber: Int? = nil, attachmentType: AttachmentType? = nil, coreNetworkArn: String? = nil, coreNetworkId: String? = nil, createdAt: Date? = nil, edgeLocation: String? = nil, ownerAccountId: String? = nil, proposedSegmentChange: ProposedSegmentChange? = nil, resourceArn: String? = nil, segmentName: String? = nil, state: AttachmentState? = nil, tags: [Tag]? = nil, updatedAt: Date? = nil) { + public init(attachmentId: String? = nil, attachmentPolicyRuleNumber: Int? = nil, attachmentType: AttachmentType? = nil, coreNetworkArn: String? = nil, coreNetworkId: String? = nil, createdAt: Date? = nil, edgeLocation: String? = nil, lastModificationErrors: [AttachmentError]? = nil, networkFunctionGroupName: String? = nil, ownerAccountId: String? = nil, proposedNetworkFunctionGroupChange: ProposedNetworkFunctionGroupChange? = nil, proposedSegmentChange: ProposedSegmentChange? = nil, resourceArn: String? = nil, segmentName: String? = nil, state: AttachmentState? = nil, tags: [Tag]? = nil, updatedAt: Date? = nil) { self.attachmentId = attachmentId self.attachmentPolicyRuleNumber = attachmentPolicyRuleNumber self.attachmentType = attachmentType @@ -602,7 +654,10 @@ extension NetworkManager { self.coreNetworkId = coreNetworkId self.createdAt = createdAt self.edgeLocation = edgeLocation + self.lastModificationErrors = lastModificationErrors + self.networkFunctionGroupName = networkFunctionGroupName self.ownerAccountId = ownerAccountId + self.proposedNetworkFunctionGroupChange = proposedNetworkFunctionGroupChange self.proposedSegmentChange = proposedSegmentChange self.resourceArn = resourceArn self.segmentName = segmentName @@ -619,7 +674,10 @@ extension NetworkManager { case coreNetworkId = "CoreNetworkId" case createdAt = "CreatedAt" case edgeLocation = "EdgeLocation" + case lastModificationErrors = "LastModificationErrors" + case networkFunctionGroupName = "NetworkFunctionGroupName" case ownerAccountId = "OwnerAccountId" + case proposedNetworkFunctionGroupChange = "ProposedNetworkFunctionGroupChange" case proposedSegmentChange = "ProposedSegmentChange" case resourceArn = "ResourceArn" case segmentName = "SegmentName" @@ -629,6 +687,31 @@ extension NetworkManager { } } + public struct AttachmentError: AWSDecodableShape { + /// The error code for the attachment request. + public let code: AttachmentErrorCode? + /// The message associated with the error code. + public let message: String? + /// The ID of the attachment request. + public let requestId: String? + /// The ARN of the requested attachment resource. + public let resourceArn: String? + + public init(code: AttachmentErrorCode? = nil, message: String? = nil, requestId: String? = nil, resourceArn: String? = nil) { + self.code = code + self.message = message + self.requestId = requestId + self.resourceArn = resourceArn + } + + private enum CodingKeys: String, CodingKey { + case code = "Code" + case message = "Message" + case requestId = "RequestId" + case resourceArn = "ResourceArn" + } + } + public struct Bandwidth: AWSEncodableShape & AWSDecodableShape { /// Download speed in Mbps. public let downloadSpeed: Int? @@ -706,20 +789,23 @@ extension NetworkManager { public let createdAt: Date? /// The Connect peer Regions where edges are located. public let edgeLocation: String? + /// Describes the error associated with the attachment request. + public let lastModificationErrors: [ConnectPeerError]? /// The state of the Connect peer. public let state: ConnectPeerState? - /// The subnet ARN for the Connect peer. + /// The subnet ARN for the Connect peer. This only applies only when the protocol is NO_ENCAP. public let subnetArn: String? /// The list of key-value tags associated with the Connect peer. public let tags: [Tag]? - public init(configuration: ConnectPeerConfiguration? = nil, connectAttachmentId: String? = nil, connectPeerId: String? = nil, coreNetworkId: String? = nil, createdAt: Date? = nil, edgeLocation: String? = nil, state: ConnectPeerState? = nil, subnetArn: String? = nil, tags: [Tag]? = nil) { + public init(configuration: ConnectPeerConfiguration? = nil, connectAttachmentId: String? = nil, connectPeerId: String? = nil, coreNetworkId: String? = nil, createdAt: Date? = nil, edgeLocation: String? = nil, lastModificationErrors: [ConnectPeerError]? = nil, state: ConnectPeerState? = nil, subnetArn: String? = nil, tags: [Tag]? = nil) { self.configuration = configuration self.connectAttachmentId = connectAttachmentId self.connectPeerId = connectPeerId self.coreNetworkId = coreNetworkId self.createdAt = createdAt self.edgeLocation = edgeLocation + self.lastModificationErrors = lastModificationErrors self.state = state self.subnetArn = subnetArn self.tags = tags @@ -732,6 +818,7 @@ extension NetworkManager { case coreNetworkId = "CoreNetworkId" case createdAt = "CreatedAt" case edgeLocation = "EdgeLocation" + case lastModificationErrors = "LastModificationErrors" case state = "State" case subnetArn = "SubnetArn" case tags = "Tags" @@ -821,6 +908,31 @@ extension NetworkManager { } } + public struct ConnectPeerError: AWSDecodableShape { + /// The error code for the Connect peer request. + public let code: ConnectPeerErrorCode? + /// The message associated with the error code. + public let message: String? + /// The ID of the Connect peer request. + public let requestId: String? + /// The ARN of the requested Connect peer resource. + public let resourceArn: String? + + public init(code: ConnectPeerErrorCode? = nil, message: String? = nil, requestId: String? = nil, resourceArn: String? = nil) { + self.code = code + self.message = message + self.requestId = requestId + self.resourceArn = resourceArn + } + + private enum CodingKeys: String, CodingKey { + case code = "Code" + case message = "Message" + case requestId = "RequestId" + case resourceArn = "ResourceArn" + } + } + public struct ConnectPeerSummary: AWSDecodableShape { /// The ID of a Connect peer attachment. public let connectAttachmentId: String? @@ -949,6 +1061,8 @@ extension NetworkManager { public let edges: [CoreNetworkEdge]? /// The ID of the global network that your core network is a part of. public let globalNetworkId: String? + /// The network function groups associated with a core network. + public let networkFunctionGroups: [CoreNetworkNetworkFunctionGroup]? /// The segments within a core network. public let segments: [CoreNetworkSegment]? /// The current state of a core network. @@ -956,13 +1070,14 @@ extension NetworkManager { /// The list of key-value tags associated with a core network. public let tags: [Tag]? - public init(coreNetworkArn: String? = nil, coreNetworkId: String? = nil, createdAt: Date? = nil, description: String? = nil, edges: [CoreNetworkEdge]? = nil, globalNetworkId: String? = nil, segments: [CoreNetworkSegment]? = nil, state: CoreNetworkState? = nil, tags: [Tag]? = nil) { + public init(coreNetworkArn: String? = nil, coreNetworkId: String? = nil, createdAt: Date? = nil, description: String? = nil, edges: [CoreNetworkEdge]? = nil, globalNetworkId: String? = nil, networkFunctionGroups: [CoreNetworkNetworkFunctionGroup]? = nil, segments: [CoreNetworkSegment]? = nil, state: CoreNetworkState? = nil, tags: [Tag]? = nil) { self.coreNetworkArn = coreNetworkArn self.coreNetworkId = coreNetworkId self.createdAt = createdAt self.description = description self.edges = edges self.globalNetworkId = globalNetworkId + self.networkFunctionGroups = networkFunctionGroups self.segments = segments self.state = state self.tags = tags @@ -975,6 +1090,7 @@ extension NetworkManager { case description = "Description" case edges = "Edges" case globalNetworkId = "GlobalNetworkId" + case networkFunctionGroups = "NetworkFunctionGroups" case segments = "Segments" case state = "State" case tags = "Tags" @@ -1054,13 +1170,16 @@ extension NetworkManager { public let cidr: String? /// The edge location for the core network change event. public let edgeLocation: String? + /// The changed network function group name. + public let networkFunctionGroupName: String? /// The segment name if the change event is associated with a segment. public let segmentName: String? - public init(attachmentId: String? = nil, cidr: String? = nil, edgeLocation: String? = nil, segmentName: String? = nil) { + public init(attachmentId: String? = nil, cidr: String? = nil, edgeLocation: String? = nil, networkFunctionGroupName: String? = nil, segmentName: String? = nil) { self.attachmentId = attachmentId self.cidr = cidr self.edgeLocation = edgeLocation + self.networkFunctionGroupName = networkFunctionGroupName self.segmentName = segmentName } @@ -1068,6 +1187,7 @@ extension NetworkManager { case attachmentId = "AttachmentId" case cidr = "Cidr" case edgeLocation = "EdgeLocation" + case networkFunctionGroupName = "NetworkFunctionGroupName" case segmentName = "SegmentName" } } @@ -1083,18 +1203,24 @@ extension NetworkManager { public let edgeLocations: [String]? /// The inside IP addresses used for core network change values. public let insideCidrBlocks: [String]? + /// The network function group name if the change event is associated with a network function group. + public let networkFunctionGroupName: String? /// The names of the segments in a core network. public let segmentName: String? + /// Describes the service insertion action. + public let serviceInsertionActions: [ServiceInsertionAction]? /// The shared segments for a core network change value. public let sharedSegments: [String]? - public init(asn: Int64? = nil, cidr: String? = nil, destinationIdentifier: String? = nil, edgeLocations: [String]? = nil, insideCidrBlocks: [String]? = nil, segmentName: String? = nil, sharedSegments: [String]? = nil) { + public init(asn: Int64? = nil, cidr: String? = nil, destinationIdentifier: String? = nil, edgeLocations: [String]? = nil, insideCidrBlocks: [String]? = nil, networkFunctionGroupName: String? = nil, segmentName: String? = nil, serviceInsertionActions: [ServiceInsertionAction]? = nil, sharedSegments: [String]? = nil) { self.asn = asn self.cidr = cidr self.destinationIdentifier = destinationIdentifier self.edgeLocations = edgeLocations self.insideCidrBlocks = insideCidrBlocks + self.networkFunctionGroupName = networkFunctionGroupName self.segmentName = segmentName + self.serviceInsertionActions = serviceInsertionActions self.sharedSegments = sharedSegments } @@ -1104,7 +1230,9 @@ extension NetworkManager { case destinationIdentifier = "DestinationIdentifier" case edgeLocations = "EdgeLocations" case insideCidrBlocks = "InsideCidrBlocks" + case networkFunctionGroupName = "NetworkFunctionGroupName" case segmentName = "SegmentName" + case serviceInsertionActions = "ServiceInsertionActions" case sharedSegments = "SharedSegments" } } @@ -1130,6 +1258,58 @@ extension NetworkManager { } } + public struct CoreNetworkNetworkFunctionGroup: AWSDecodableShape { + /// The core network edge locations. + public let edgeLocations: [String]? + /// The name of the network function group. + public let name: String? + /// The segments associated with the network function group. + public let segments: ServiceInsertionSegments? + + public init(edgeLocations: [String]? = nil, name: String? = nil, segments: ServiceInsertionSegments? = nil) { + self.edgeLocations = edgeLocations + self.name = name + self.segments = segments + } + + private enum CodingKeys: String, CodingKey { + case edgeLocations = "EdgeLocations" + case name = "Name" + case segments = "Segments" + } + } + + public struct CoreNetworkNetworkFunctionGroupIdentifier: AWSEncodableShape { + /// The ID of the core network. + public let coreNetworkId: String? + /// The location for the core network edge. + public let edgeLocation: String? + /// The network function group name. + public let networkFunctionGroupName: String? + + public init(coreNetworkId: String? = nil, edgeLocation: String? = nil, networkFunctionGroupName: String? = nil) { + self.coreNetworkId = coreNetworkId + self.edgeLocation = edgeLocation + self.networkFunctionGroupName = networkFunctionGroupName + } + + public func validate(name: String) throws { + try self.validate(self.coreNetworkId, name: "coreNetworkId", parent: name, max: 50) + try self.validate(self.coreNetworkId, name: "coreNetworkId", parent: name, pattern: "^core-network-([0-9a-f]{8,17})$") + try self.validate(self.edgeLocation, name: "edgeLocation", parent: name, max: 63) + try self.validate(self.edgeLocation, name: "edgeLocation", parent: name, min: 1) + try self.validate(self.edgeLocation, name: "edgeLocation", parent: name, pattern: "^[\\s\\S]*$") + try self.validate(self.networkFunctionGroupName, name: "networkFunctionGroupName", parent: name, max: 256) + try self.validate(self.networkFunctionGroupName, name: "networkFunctionGroupName", parent: name, pattern: "^[\\s\\S]*$") + } + + private enum CodingKeys: String, CodingKey { + case coreNetworkId = "CoreNetworkId" + case edgeLocation = "EdgeLocation" + case networkFunctionGroupName = "NetworkFunctionGroupName" + } + } + public struct CoreNetworkPolicy: AWSDecodableShape { /// Whether a core network policy is the current LIVE policy or the most recently submitted policy. public let alias: CoreNetworkPolicyAlias? @@ -1376,19 +1556,19 @@ extension NetworkManager { } public struct CreateConnectPeerRequest: AWSEncodableShape { - /// The Connect peer BGP options. + /// The Connect peer BGP options. This only applies only when the protocol is GRE. public let bgpOptions: BgpOptions? /// The client token associated with the request. public let clientToken: String? /// The ID of the connection attachment. public let connectAttachmentId: String - /// A Connect peer core network address. + /// A Connect peer core network address. This only applies only when the protocol is GRE. public let coreNetworkAddress: String? /// The inside IP addresses used for BGP peering. public let insideCidrBlocks: [String]? /// The Connect peer address. public let peerAddress: String - /// The subnet ARN for the Connect peer. + /// The subnet ARN for the Connect peer. This only applies only when the protocol is NO_ENCAP. public let subnetArn: String? /// The tags associated with the peer request. public let tags: [Tag]? @@ -2830,6 +3010,23 @@ extension NetworkManager { } } + public struct EdgeOverride: AWSDecodableShape { + /// The list of edge locations. + public let edgeSets: [[String]]? + /// The edge that should be used when overriding the current edge order. + public let useEdge: String? + + public init(edgeSets: [[String]]? = nil, useEdge: String? = nil) { + self.edgeSets = edgeSets + self.useEdge = useEdge + } + + private enum CodingKeys: String, CodingKey { + case edgeSets = "EdgeSets" + case useEdge = "UseEdge" + } + } + public struct ExecuteCoreNetworkChangeSetRequest: AWSEncodableShape { /// The ID of a core network. public let coreNetworkId: String @@ -3513,7 +3710,7 @@ extension NetworkManager { public let maxResults: Int? /// The token for the next page of results. public let nextToken: String? - /// The resource type. The following are the supported resource types for Direct Connect: dxcon dx-gateway dx-vif The following are the supported resource types for Network Manager: connection device link site The following are the supported resource types for Amazon VPC: customer-gateway transit-gateway transit-gateway-attachment transit-gateway-connect-peer transit-gateway-route-table vpn-connection + /// The resource type. The following are the supported resource types for Direct Connect: dxcon dx-gateway dx-vif The following are the supported resource types for Network Manager: attachment connect-peer connection core-network device link peering site The following are the supported resource types for Amazon VPC: customer-gateway transit-gateway transit-gateway-attachment transit-gateway-connect-peer transit-gateway-route-table vpn-connection public let resourceType: String? public init(globalNetworkId: String, maxResults: Int? = nil, nextToken: String? = nil, resourceType: String? = nil) { @@ -3580,7 +3777,7 @@ extension NetworkManager { public let registeredGatewayArn: String? /// The ARN of the gateway. public let resourceArn: String? - /// The resource type. The following are the supported resource types for Direct Connect: dxcon dx-gateway dx-vif The following are the supported resource types for Network Manager: connection device link site The following are the supported resource types for Amazon VPC: customer-gateway transit-gateway transit-gateway-attachment transit-gateway-connect-peer transit-gateway-route-table vpn-connection + /// The resource type. The following are the supported resource types for Direct Connect: dxcon dx-gateway dx-vif The following are the supported resource types for Network Manager: attachment connect-peer connection core-network device link peering site The following are the supported resource types for Amazon VPC: customer-gateway transit-gateway transit-gateway-attachment transit-gateway-connect-peer transit-gateway-route-table vpn-connection public let resourceType: String? public init(accountId: String? = nil, awsRegion: String? = nil, coreNetworkId: String? = nil, globalNetworkId: String, maxResults: Int? = nil, nextToken: String? = nil, registeredGatewayArn: String? = nil, resourceArn: String? = nil, resourceType: String? = nil) { @@ -3669,7 +3866,7 @@ extension NetworkManager { public let registeredGatewayArn: String? /// The ARN of the resource. public let resourceArn: String? - /// The resource type. The following are the supported resource types for Direct Connect: dxcon - The definition model is Connection. dx-gateway - The definition model is DirectConnectGateway. dx-vif - The definition model is VirtualInterface. The following are the supported resource types for Network Manager: connection - The definition model is Connection. device - The definition model is Device. link - The definition model is Link. site - The definition model is Site. The following are the supported resource types for Amazon VPC: customer-gateway - The definition model is CustomerGateway. transit-gateway - The definition model is TransitGateway. transit-gateway-attachment - The definition model is TransitGatewayAttachment. transit-gateway-connect-peer - The definition model is TransitGatewayConnectPeer. transit-gateway-route-table - The definition model is TransitGatewayRouteTable. vpn-connection - The definition model is VpnConnection. + /// The resource type. The following are the supported resource types for Direct Connect: dxcon dx-gateway dx-vif The following are the supported resource types for Network Manager: attachment connect-peer connection core-network device link peering site The following are the supported resource types for Amazon VPC: customer-gateway transit-gateway transit-gateway-attachment transit-gateway-connect-peer transit-gateway-route-table vpn-connection public let resourceType: String? public init(accountId: String? = nil, awsRegion: String? = nil, coreNetworkId: String? = nil, globalNetworkId: String, maxResults: Int? = nil, nextToken: String? = nil, registeredGatewayArn: String? = nil, resourceArn: String? = nil, resourceType: String? = nil) { @@ -3880,7 +4077,7 @@ extension NetworkManager { public let registeredGatewayArn: String? /// The ARN of the resource. public let resourceArn: String? - /// The resource type. The following are the supported resource types for Direct Connect: dxcon dx-gateway dx-vif The following are the supported resource types for Network Manager: connection device link site The following are the supported resource types for Amazon VPC: customer-gateway transit-gateway transit-gateway-attachment transit-gateway-connect-peer transit-gateway-route-table vpn-connection + /// The resource type. The following are the supported resource types: connect-peer transit-gateway-connect-peer vpn-connection public let resourceType: String? public init(accountId: String? = nil, awsRegion: String? = nil, coreNetworkId: String? = nil, globalNetworkId: String, maxResults: Int? = nil, nextToken: String? = nil, registeredGatewayArn: String? = nil, resourceArn: String? = nil, resourceType: String? = nil) { @@ -4851,6 +5048,19 @@ extension NetworkManager { } } + public struct NetworkFunctionGroup: AWSDecodableShape { + /// The name of the network function group. + public let name: String? + + public init(name: String? = nil) { + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case name = "Name" + } + } + public struct NetworkResource: AWSDecodableShape { /// The Amazon Web Services account ID. public let accountId: String? @@ -4870,7 +5080,7 @@ extension NetworkManager { public let resourceArn: String? /// The ID of the resource. public let resourceId: String? - /// The resource type. The following are the supported resource types for Direct Connect: dxcon dx-gateway dx-vif The following are the supported resource types for Network Manager: connection device link site The following are the supported resource types for Amazon VPC: customer-gateway transit-gateway transit-gateway-attachment transit-gateway-connect-peer transit-gateway-route-table vpn-connection + /// The resource type. The following are the supported resource types for Direct Connect: dxcon dx-gateway dx-vif The following are the supported resource types for Network Manager: attachment connect-peer connection core-network device link peering site The following are the supported resource types for Amazon VPC: customer-gateway transit-gateway transit-gateway-attachment transit-gateway-connect-peer transit-gateway-route-table vpn-connection public let resourceType: String? /// The tags. public let tags: [Tag]? @@ -4988,6 +5198,8 @@ extension NetworkManager { public let coreNetworkAttachmentId: String? /// The edge location for the network destination. public let edgeLocation: String? + /// The network function group name associated with the destination. + public let networkFunctionGroupName: String? /// The ID of the resource. public let resourceId: String? /// The resource type. @@ -4997,9 +5209,10 @@ extension NetworkManager { /// The ID of the transit gateway attachment. public let transitGatewayAttachmentId: String? - public init(coreNetworkAttachmentId: String? = nil, edgeLocation: String? = nil, resourceId: String? = nil, resourceType: String? = nil, segmentName: String? = nil, transitGatewayAttachmentId: String? = nil) { + public init(coreNetworkAttachmentId: String? = nil, edgeLocation: String? = nil, networkFunctionGroupName: String? = nil, resourceId: String? = nil, resourceType: String? = nil, segmentName: String? = nil, transitGatewayAttachmentId: String? = nil) { self.coreNetworkAttachmentId = coreNetworkAttachmentId self.edgeLocation = edgeLocation + self.networkFunctionGroupName = networkFunctionGroupName self.resourceId = resourceId self.resourceType = resourceType self.segmentName = segmentName @@ -5009,6 +5222,7 @@ extension NetworkManager { private enum CodingKeys: String, CodingKey { case coreNetworkAttachmentId = "CoreNetworkAttachmentId" case edgeLocation = "EdgeLocation" + case networkFunctionGroupName = "NetworkFunctionGroupName" case resourceId = "ResourceId" case resourceType = "ResourceType" case segmentName = "SegmentName" @@ -5116,6 +5330,8 @@ extension NetworkManager { public let createdAt: Date? /// The edge location for the peer. public let edgeLocation: String? + /// Describes the error associated with the Connect peer request. + public let lastModificationErrors: [PeeringError]? /// The ID of the account owner. public let ownerAccountId: String? /// The ID of the peering attachment. @@ -5129,11 +5345,12 @@ extension NetworkManager { /// The list of key-value tags associated with the peering. public let tags: [Tag]? - public init(coreNetworkArn: String? = nil, coreNetworkId: String? = nil, createdAt: Date? = nil, edgeLocation: String? = nil, ownerAccountId: String? = nil, peeringId: String? = nil, peeringType: PeeringType? = nil, resourceArn: String? = nil, state: PeeringState? = nil, tags: [Tag]? = nil) { + public init(coreNetworkArn: String? = nil, coreNetworkId: String? = nil, createdAt: Date? = nil, edgeLocation: String? = nil, lastModificationErrors: [PeeringError]? = nil, ownerAccountId: String? = nil, peeringId: String? = nil, peeringType: PeeringType? = nil, resourceArn: String? = nil, state: PeeringState? = nil, tags: [Tag]? = nil) { self.coreNetworkArn = coreNetworkArn self.coreNetworkId = coreNetworkId self.createdAt = createdAt self.edgeLocation = edgeLocation + self.lastModificationErrors = lastModificationErrors self.ownerAccountId = ownerAccountId self.peeringId = peeringId self.peeringType = peeringType @@ -5147,6 +5364,7 @@ extension NetworkManager { case coreNetworkId = "CoreNetworkId" case createdAt = "CreatedAt" case edgeLocation = "EdgeLocation" + case lastModificationErrors = "LastModificationErrors" case ownerAccountId = "OwnerAccountId" case peeringId = "PeeringId" case peeringType = "PeeringType" @@ -5156,6 +5374,69 @@ extension NetworkManager { } } + public struct PeeringError: AWSDecodableShape { + /// The error code for the peering request. + public let code: PeeringErrorCode? + /// The message associated with the error code. + public let message: String? + /// Provides additional information about missing permissions for the peering error. + public let missingPermissionsContext: PermissionsErrorContext? + /// The ID of the Peering request. + public let requestId: String? + /// The ARN of the requested peering resource. + public let resourceArn: String? + + public init(code: PeeringErrorCode? = nil, message: String? = nil, missingPermissionsContext: PermissionsErrorContext? = nil, requestId: String? = nil, resourceArn: String? = nil) { + self.code = code + self.message = message + self.missingPermissionsContext = missingPermissionsContext + self.requestId = requestId + self.resourceArn = resourceArn + } + + private enum CodingKeys: String, CodingKey { + case code = "Code" + case message = "Message" + case missingPermissionsContext = "MissingPermissionsContext" + case requestId = "RequestId" + case resourceArn = "ResourceArn" + } + } + + public struct PermissionsErrorContext: AWSDecodableShape { + /// The missing permissions. + public let missingPermission: String? + + public init(missingPermission: String? = nil) { + self.missingPermission = missingPermission + } + + private enum CodingKeys: String, CodingKey { + case missingPermission = "MissingPermission" + } + } + + public struct ProposedNetworkFunctionGroupChange: AWSDecodableShape { + /// The proposed new attachment policy rule number for the network function group. + public let attachmentPolicyRuleNumber: Int? + /// The proposed name change for the network function group name. + public let networkFunctionGroupName: String? + /// The list of proposed changes to the key-value tags associated with the network function group. + public let tags: [Tag]? + + public init(attachmentPolicyRuleNumber: Int? = nil, networkFunctionGroupName: String? = nil, tags: [Tag]? = nil) { + self.attachmentPolicyRuleNumber = attachmentPolicyRuleNumber + self.networkFunctionGroupName = networkFunctionGroupName + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case attachmentPolicyRuleNumber = "AttachmentPolicyRuleNumber" + case networkFunctionGroupName = "NetworkFunctionGroupName" + case tags = "Tags" + } + } + public struct ProposedSegmentChange: AWSDecodableShape { /// The rule number in the policy document that applies to this change. public let attachmentPolicyRuleNumber: Int? @@ -5545,28 +5826,75 @@ extension NetworkManager { } public struct RouteTableIdentifier: AWSEncodableShape { + /// The route table identifier associated with the network function group. + public let coreNetworkNetworkFunctionGroup: CoreNetworkNetworkFunctionGroupIdentifier? /// The segment edge in a core network. public let coreNetworkSegmentEdge: CoreNetworkSegmentEdgeIdentifier? /// The ARN of the transit gateway route table for the attachment request. For example, "TransitGatewayRouteTableArn": "arn:aws:ec2:us-west-2:123456789012:transit-gateway-route-table/tgw-rtb-9876543210123456". public let transitGatewayRouteTableArn: String? - public init(coreNetworkSegmentEdge: CoreNetworkSegmentEdgeIdentifier? = nil, transitGatewayRouteTableArn: String? = nil) { + public init(coreNetworkNetworkFunctionGroup: CoreNetworkNetworkFunctionGroupIdentifier? = nil, coreNetworkSegmentEdge: CoreNetworkSegmentEdgeIdentifier? = nil, transitGatewayRouteTableArn: String? = nil) { + self.coreNetworkNetworkFunctionGroup = coreNetworkNetworkFunctionGroup self.coreNetworkSegmentEdge = coreNetworkSegmentEdge self.transitGatewayRouteTableArn = transitGatewayRouteTableArn } public func validate(name: String) throws { + try self.coreNetworkNetworkFunctionGroup?.validate(name: "\(name).coreNetworkNetworkFunctionGroup") try self.coreNetworkSegmentEdge?.validate(name: "\(name).coreNetworkSegmentEdge") try self.validate(self.transitGatewayRouteTableArn, name: "transitGatewayRouteTableArn", parent: name, max: 500) try self.validate(self.transitGatewayRouteTableArn, name: "transitGatewayRouteTableArn", parent: name, pattern: "^[\\s\\S]*$") } private enum CodingKeys: String, CodingKey { + case coreNetworkNetworkFunctionGroup = "CoreNetworkNetworkFunctionGroup" case coreNetworkSegmentEdge = "CoreNetworkSegmentEdge" case transitGatewayRouteTableArn = "TransitGatewayRouteTableArn" } } + public struct ServiceInsertionAction: AWSDecodableShape { + /// The action the service insertion takes for traffic. send-via sends east-west traffic between attachments. send-to sends north-south traffic to the security appliance, and then from that to either the Internet or to an on-premesis location. + public let action: SegmentActionServiceInsertion? + /// Describes the mode packets take for the send-via action. This is not used when the action is send-to. dual-hop packets traverse attachments in both the source to the destination core network edges. This mode requires that an inspection attachment must be present in all Regions of the service insertion-enabled segments. For single-hop, packets traverse a single intermediate inserted attachment. You can use EdgeOverride to specify a specific edge to use. + public let mode: SendViaMode? + /// The list of network function groups and any edge overrides for the chosen service insertion action. Used for both send-to or send-via. + public let via: Via? + /// The list of destination segments if the service insertion action is send-via. + public let whenSentTo: WhenSentTo? + + public init(action: SegmentActionServiceInsertion? = nil, mode: SendViaMode? = nil, via: Via? = nil, whenSentTo: WhenSentTo? = nil) { + self.action = action + self.mode = mode + self.via = via + self.whenSentTo = whenSentTo + } + + private enum CodingKeys: String, CodingKey { + case action = "Action" + case mode = "Mode" + case via = "Via" + case whenSentTo = "WhenSentTo" + } + } + + public struct ServiceInsertionSegments: AWSDecodableShape { + /// The list of segments associated with the send-to action. + public let sendTo: [String]? + /// The list of segments associated with the send-via action. + public let sendVia: [String]? + + public init(sendTo: [String]? = nil, sendVia: [String]? = nil) { + self.sendTo = sendTo + self.sendVia = sendVia + } + + private enum CodingKeys: String, CodingKey { + case sendTo = "SendTo" + case sendVia = "SendVia" + } + } + public struct Site: AWSDecodableShape { /// The date and time that the site was created. public let createdAt: Date? @@ -6397,6 +6725,23 @@ extension NetworkManager { } } + public struct Via: AWSDecodableShape { + /// The list of network function groups associated with the service insertion action. + public let networkFunctionGroups: [NetworkFunctionGroup]? + /// Describes any edge overrides. An edge override is a specific edge to be used for traffic. + public let withEdgeOverrides: [EdgeOverride]? + + public init(networkFunctionGroups: [NetworkFunctionGroup]? = nil, withEdgeOverrides: [EdgeOverride]? = nil) { + self.networkFunctionGroups = networkFunctionGroups + self.withEdgeOverrides = withEdgeOverrides + } + + private enum CodingKeys: String, CodingKey { + case networkFunctionGroups = "NetworkFunctionGroups" + case withEdgeOverrides = "WithEdgeOverrides" + } + } + public struct VpcAttachment: AWSDecodableShape { /// Provides details about the VPC attachment. public let attachment: Attachment? @@ -6434,6 +6779,19 @@ extension NetworkManager { case ipv6Support = "Ipv6Support" } } + + public struct WhenSentTo: AWSDecodableShape { + /// The list of destination segments when the service insertion action is send-to. + public let whenSentToSegmentsList: [String]? + + public init(whenSentToSegmentsList: [String]? = nil) { + self.whenSentToSegmentsList = whenSentToSegmentsList + } + + private enum CodingKeys: String, CodingKey { + case whenSentToSegmentsList = "WhenSentToSegmentsList" + } + } } // MARK: - Errors diff --git a/Sources/Soto/Services/OSIS/OSIS_shapes.swift b/Sources/Soto/Services/OSIS/OSIS_shapes.swift index 6474d19c7d..89e1cff9d8 100644 --- a/Sources/Soto/Services/OSIS/OSIS_shapes.swift +++ b/Sources/Soto/Services/OSIS/OSIS_shapes.swift @@ -56,6 +56,12 @@ extension OSIS { public var description: String { return self.rawValue } } + public enum VpcEndpointManagement: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case customer = "CUSTOMER" + case service = "SERVICE" + public var description: String { return self.rawValue } + } + public enum VpcEndpointServiceName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case opensearchServerless = "OPENSEARCH_SERVERLESS" public var description: String { return self.rawValue } @@ -127,7 +133,7 @@ extension OSIS { } public struct CloudWatchLogDestination: AWSEncodableShape & AWSDecodableShape { - /// The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, /aws/OpenSearchService/IngestionService/my-pipeline. + /// The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing log group or create a new one. For example, /aws/vendedlogs/OpenSearchService/pipelines. public let logGroup: String public init(logGroup: String) { @@ -247,7 +253,7 @@ extension OSIS { } public struct EncryptionAtRestOptions: AWSEncodableShape & AWSDecodableShape { - /// The ARN of the KMS key used to encrypt data-at-rest in OpenSearch Ingestion. By default, data is encrypted using an AWS owned key. + /// The ARN of the KMS key used to encrypt buffer data. By default, data is encrypted using an Amazon Web Services owned key. public let kmsKeyArn: String public init(kmsKeyArn: String) { @@ -267,15 +273,23 @@ extension OSIS { public struct GetPipelineBlueprintRequest: AWSEncodableShape { /// The name of the blueprint to retrieve. public let blueprintName: String + /// The format format of the blueprint to retrieve. + public let format: String? - public init(blueprintName: String) { + public init(blueprintName: String, format: String? = nil) { self.blueprintName = blueprintName + self.format = format } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.blueprintName, key: "BlueprintName") + request.encodeQuery(self.format, key: "format") + } + + public func validate(name: String) throws { + try self.validate(self.format, name: "format", parent: name, pattern: "^(YAML|JSON)$") } private enum CodingKeys: CodingKey {} @@ -284,13 +298,17 @@ extension OSIS { public struct GetPipelineBlueprintResponse: AWSDecodableShape { /// The requested blueprint in YAML format. public let blueprint: PipelineBlueprint? + /// The format of the blueprint. + public let format: String? - public init(blueprint: PipelineBlueprint? = nil) { + public init(blueprint: PipelineBlueprint? = nil, format: String? = nil) { self.blueprint = blueprint + self.format = format } private enum CodingKeys: String, CodingKey { case blueprint = "Blueprint" + case format = "Format" } } @@ -331,7 +349,7 @@ extension OSIS { } public struct GetPipelineRequest: AWSEncodableShape { - /// The name of the pipeline to get information about. + /// The name of the pipeline. public let pipelineName: String public init(pipelineName: String) { @@ -489,6 +507,8 @@ extension OSIS { public let bufferOptions: BufferOptions? /// The date and time when the pipeline was created. public let createdAt: Date? + /// Destinations to which the pipeline writes data. + public let destinations: [PipelineDestination]? public let encryptionAtRestOptions: EncryptionAtRestOptions? /// The ingestion endpoints for the pipeline, which you can send data to. public let ingestEndpointUrls: [String]? @@ -506,7 +526,7 @@ extension OSIS { public let pipelineConfigurationBody: String? /// The name of the pipeline. public let pipelineName: String? - /// A list of VPC endpoints that OpenSearch Ingestion has created to other AWS services. + /// A list of VPC endpoints that OpenSearch Ingestion has created to other Amazon Web Services services. public let serviceVpcEndpoints: [ServiceVpcEndpoint]? /// The current status of the pipeline. public let status: PipelineStatus? @@ -516,10 +536,13 @@ extension OSIS { public let tags: [Tag]? /// The VPC interface endpoints that have access to the pipeline. public let vpcEndpoints: [VpcEndpoint]? + /// The VPC endpoint service name for the pipeline. + public let vpcEndpointService: String? - public init(bufferOptions: BufferOptions? = nil, createdAt: Date? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, ingestEndpointUrls: [String]? = nil, lastUpdatedAt: Date? = nil, logPublishingOptions: LogPublishingOptions? = nil, maxUnits: Int? = nil, minUnits: Int? = nil, pipelineArn: String? = nil, pipelineConfigurationBody: String? = nil, pipelineName: String? = nil, serviceVpcEndpoints: [ServiceVpcEndpoint]? = nil, status: PipelineStatus? = nil, statusReason: PipelineStatusReason? = nil, tags: [Tag]? = nil, vpcEndpoints: [VpcEndpoint]? = nil) { + public init(bufferOptions: BufferOptions? = nil, createdAt: Date? = nil, destinations: [PipelineDestination]? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, ingestEndpointUrls: [String]? = nil, lastUpdatedAt: Date? = nil, logPublishingOptions: LogPublishingOptions? = nil, maxUnits: Int? = nil, minUnits: Int? = nil, pipelineArn: String? = nil, pipelineConfigurationBody: String? = nil, pipelineName: String? = nil, serviceVpcEndpoints: [ServiceVpcEndpoint]? = nil, status: PipelineStatus? = nil, statusReason: PipelineStatusReason? = nil, tags: [Tag]? = nil, vpcEndpoints: [VpcEndpoint]? = nil, vpcEndpointService: String? = nil) { self.bufferOptions = bufferOptions self.createdAt = createdAt + self.destinations = destinations self.encryptionAtRestOptions = encryptionAtRestOptions self.ingestEndpointUrls = ingestEndpointUrls self.lastUpdatedAt = lastUpdatedAt @@ -534,11 +557,13 @@ extension OSIS { self.statusReason = statusReason self.tags = tags self.vpcEndpoints = vpcEndpoints + self.vpcEndpointService = vpcEndpointService } private enum CodingKeys: String, CodingKey { case bufferOptions = "BufferOptions" case createdAt = "CreatedAt" + case destinations = "Destinations" case encryptionAtRestOptions = "EncryptionAtRestOptions" case ingestEndpointUrls = "IngestEndpointUrls" case lastUpdatedAt = "LastUpdatedAt" @@ -553,36 +578,86 @@ extension OSIS { case statusReason = "StatusReason" case tags = "Tags" case vpcEndpoints = "VpcEndpoints" + case vpcEndpointService = "VpcEndpointService" } } public struct PipelineBlueprint: AWSDecodableShape { /// The name of the blueprint. public let blueprintName: String? + /// A description of the blueprint. + public let displayDescription: String? + /// The display name of the blueprint. + public let displayName: String? /// The YAML configuration of the blueprint. public let pipelineConfigurationBody: String? + /// The name of the service that the blueprint is associated with. + public let service: String? + /// The use case that the blueprint relates to. + public let useCase: String? - public init(blueprintName: String? = nil, pipelineConfigurationBody: String? = nil) { + public init(blueprintName: String? = nil, displayDescription: String? = nil, displayName: String? = nil, pipelineConfigurationBody: String? = nil, service: String? = nil, useCase: String? = nil) { self.blueprintName = blueprintName + self.displayDescription = displayDescription + self.displayName = displayName self.pipelineConfigurationBody = pipelineConfigurationBody + self.service = service + self.useCase = useCase } private enum CodingKeys: String, CodingKey { case blueprintName = "BlueprintName" + case displayDescription = "DisplayDescription" + case displayName = "DisplayName" case pipelineConfigurationBody = "PipelineConfigurationBody" + case service = "Service" + case useCase = "UseCase" } } public struct PipelineBlueprintSummary: AWSDecodableShape { /// The name of the blueprint. public let blueprintName: String? - - public init(blueprintName: String? = nil) { + /// A description of the blueprint. + public let displayDescription: String? + /// The display name of the blueprint. + public let displayName: String? + /// The name of the service that the blueprint is associated with. + public let service: String? + /// The use case that the blueprint relates to. + public let useCase: String? + + public init(blueprintName: String? = nil, displayDescription: String? = nil, displayName: String? = nil, service: String? = nil, useCase: String? = nil) { self.blueprintName = blueprintName + self.displayDescription = displayDescription + self.displayName = displayName + self.service = service + self.useCase = useCase } private enum CodingKeys: String, CodingKey { case blueprintName = "BlueprintName" + case displayDescription = "DisplayDescription" + case displayName = "DisplayName" + case service = "Service" + case useCase = "UseCase" + } + } + + public struct PipelineDestination: AWSDecodableShape { + /// The endpoint receiving data from the pipeline. + public let endpoint: String? + /// The name of the service receiving data from the pipeline. + public let serviceName: String? + + public init(endpoint: String? = nil, serviceName: String? = nil) { + self.endpoint = endpoint + self.serviceName = serviceName + } + + private enum CodingKeys: String, CodingKey { + case endpoint = "Endpoint" + case serviceName = "ServiceName" } } @@ -602,6 +677,8 @@ extension OSIS { public struct PipelineSummary: AWSDecodableShape { /// The date and time when the pipeline was created. public let createdAt: Date? + /// A list of destinations to which the pipeline writes data. + public let destinations: [PipelineDestination]? /// The date and time when the pipeline was last updated. public let lastUpdatedAt: Date? /// The maximum pipeline capacity, in Ingestion Compute Units (ICUs). @@ -618,8 +695,9 @@ extension OSIS { /// A list of tags associated with the given pipeline. public let tags: [Tag]? - public init(createdAt: Date? = nil, lastUpdatedAt: Date? = nil, maxUnits: Int? = nil, minUnits: Int? = nil, pipelineArn: String? = nil, pipelineName: String? = nil, status: PipelineStatus? = nil, statusReason: PipelineStatusReason? = nil, tags: [Tag]? = nil) { + public init(createdAt: Date? = nil, destinations: [PipelineDestination]? = nil, lastUpdatedAt: Date? = nil, maxUnits: Int? = nil, minUnits: Int? = nil, pipelineArn: String? = nil, pipelineName: String? = nil, status: PipelineStatus? = nil, statusReason: PipelineStatusReason? = nil, tags: [Tag]? = nil) { self.createdAt = createdAt + self.destinations = destinations self.lastUpdatedAt = lastUpdatedAt self.maxUnits = maxUnits self.minUnits = minUnits @@ -632,6 +710,7 @@ extension OSIS { private enum CodingKeys: String, CodingKey { case createdAt = "CreatedAt" + case destinations = "Destinations" case lastUpdatedAt = "LastUpdatedAt" case maxUnits = "MaxUnits" case minUnits = "MinUnits" @@ -646,7 +725,7 @@ extension OSIS { public struct ServiceVpcEndpoint: AWSDecodableShape { /// The name of the service for which a VPC endpoint was created. public let serviceName: VpcEndpointServiceName? - /// The ID of the VPC endpoint that was created. + /// The unique identifier of the VPC endpoint that was created. public let vpcEndpointId: String? public init(serviceName: VpcEndpointServiceName? = nil, vpcEndpointId: String? = nil) { @@ -945,6 +1024,27 @@ extension OSIS { } } + public struct VpcAttachmentOptions: AWSEncodableShape & AWSDecodableShape { + /// Whether a VPC is attached to the pipeline. + public let attachToVpc: Bool + /// The CIDR block to be reserved for OpenSearch Ingestion to create elastic network interfaces (ENIs). + public let cidrBlock: String? + + public init(attachToVpc: Bool, cidrBlock: String? = nil) { + self.attachToVpc = attachToVpc + self.cidrBlock = cidrBlock + } + + public func validate(name: String) throws { + try self.validate(self.cidrBlock, name: "cidrBlock", parent: name, pattern: "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(3[0-2]|[12]?[0-9])$") + } + + private enum CodingKeys: String, CodingKey { + case attachToVpc = "AttachToVpc" + case cidrBlock = "CidrBlock" + } + } + public struct VpcEndpoint: AWSDecodableShape { /// The unique identifier of the endpoint. public let vpcEndpointId: String? @@ -971,10 +1071,16 @@ extension OSIS { public let securityGroupIds: [String]? /// A list of subnet IDs associated with the VPC endpoint. public let subnetIds: [String] + /// Options for attaching a VPC to a pipeline. + public let vpcAttachmentOptions: VpcAttachmentOptions? + /// Defines whether you or Amazon OpenSearch Ingestion service create and manage the VPC endpoint configured for the pipeline. + public let vpcEndpointManagement: VpcEndpointManagement? - public init(securityGroupIds: [String]? = nil, subnetIds: [String]) { + public init(securityGroupIds: [String]? = nil, subnetIds: [String], vpcAttachmentOptions: VpcAttachmentOptions? = nil, vpcEndpointManagement: VpcEndpointManagement? = nil) { self.securityGroupIds = securityGroupIds self.subnetIds = subnetIds + self.vpcAttachmentOptions = vpcAttachmentOptions + self.vpcEndpointManagement = vpcEndpointManagement } public func validate(name: String) throws { @@ -992,11 +1098,14 @@ extension OSIS { } try self.validate(self.subnetIds, name: "subnetIds", parent: name, max: 12) try self.validate(self.subnetIds, name: "subnetIds", parent: name, min: 1) + try self.vpcAttachmentOptions?.validate(name: "\(name).vpcAttachmentOptions") } private enum CodingKeys: String, CodingKey { case securityGroupIds = "SecurityGroupIds" case subnetIds = "SubnetIds" + case vpcAttachmentOptions = "VpcAttachmentOptions" + case vpcEndpointManagement = "VpcEndpointManagement" } } } @@ -1008,6 +1117,7 @@ public struct OSISErrorType: AWSErrorType { enum Code: String { case accessDeniedException = "AccessDeniedException" case conflictException = "ConflictException" + case disabledOperationException = "DisabledOperationException" case internalException = "InternalException" case invalidPaginationTokenException = "InvalidPaginationTokenException" case limitExceededException = "LimitExceededException" @@ -1038,6 +1148,8 @@ public struct OSISErrorType: AWSErrorType { public static var accessDeniedException: Self { .init(.accessDeniedException) } /// The client attempted to remove a resource that is currently in use. public static var conflictException: Self { .init(.conflictException) } + /// Exception is thrown when an operation has been disabled. + public static var disabledOperationException: Self { .init(.disabledOperationException) } /// The request failed because of an unknown error, exception, or failure (the failure is internal to the service). public static var internalException: Self { .init(.internalException) } /// An invalid pagination token provided in the request. diff --git a/Sources/Soto/Services/OpenSearch/OpenSearch_shapes.swift b/Sources/Soto/Services/OpenSearch/OpenSearch_shapes.swift index 550e38fe29..0931c1001e 100644 --- a/Sources/Soto/Services/OpenSearch/OpenSearch_shapes.swift +++ b/Sources/Soto/Services/OpenSearch/OpenSearch_shapes.swift @@ -92,6 +92,12 @@ extension OpenSearch { public var description: String { return self.rawValue } } + public enum DataSourceStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case disabled = "DISABLED" + public var description: String { return self.rawValue } + } + public enum DeploymentStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case completed = "COMPLETED" case eligible = "ELIGIBLE" @@ -211,6 +217,23 @@ extension OpenSearch { public var description: String { return self.rawValue } } + public enum NaturalLanguageQueryGenerationCurrentState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disableComplete = "DISABLE_COMPLETE" + case disableFailed = "DISABLE_FAILED" + case disableInProgress = "DISABLE_IN_PROGRESS" + case enableComplete = "ENABLE_COMPLETE" + case enableFailed = "ENABLE_FAILED" + case enableInProgress = "ENABLE_IN_PROGRESS" + case notEnabled = "NOT_ENABLED" + public var description: String { return self.rawValue } + } + + public enum NaturalLanguageQueryGenerationDesiredState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum NodeStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "Active" case notAvailable = "NotAvailable" @@ -504,6 +527,48 @@ extension OpenSearch { // MARK: Shapes + public struct AIMLOptionsInput: AWSEncodableShape { + /// Container for parameters required for natural language query generation on the specified domain. + public let naturalLanguageQueryGenerationOptions: NaturalLanguageQueryGenerationOptionsInput? + + public init(naturalLanguageQueryGenerationOptions: NaturalLanguageQueryGenerationOptionsInput? = nil) { + self.naturalLanguageQueryGenerationOptions = naturalLanguageQueryGenerationOptions + } + + private enum CodingKeys: String, CodingKey { + case naturalLanguageQueryGenerationOptions = "NaturalLanguageQueryGenerationOptions" + } + } + + public struct AIMLOptionsOutput: AWSDecodableShape { + /// Container for parameters required for natural language query generation on the specified domain. + public let naturalLanguageQueryGenerationOptions: NaturalLanguageQueryGenerationOptionsOutput? + + public init(naturalLanguageQueryGenerationOptions: NaturalLanguageQueryGenerationOptionsOutput? = nil) { + self.naturalLanguageQueryGenerationOptions = naturalLanguageQueryGenerationOptions + } + + private enum CodingKeys: String, CodingKey { + case naturalLanguageQueryGenerationOptions = "NaturalLanguageQueryGenerationOptions" + } + } + + public struct AIMLOptionsStatus: AWSDecodableShape { + /// Machine learning options on the specified domain. + public let options: AIMLOptionsOutput? + public let status: OptionStatus? + + public init(options: AIMLOptionsOutput? = nil, status: OptionStatus? = nil) { + self.options = options + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case options = "Options" + case status = "Status" + } + } + public struct AWSDomainInformation: AWSEncodableShape & AWSDecodableShape { /// Name of the domain. public let domainName: String @@ -717,14 +782,17 @@ extension OpenSearch { public let enabled: Bool? /// True if the internal user database is enabled. public let internalUserDatabaseEnabled: Bool? + /// Container for information about the JWT configuration of the Amazon OpenSearch Service. + public let jwtOptions: JWTOptionsOutput? /// Container for information about the SAML configuration for OpenSearch Dashboards. public let samlOptions: SAMLOptionsOutput? - public init(anonymousAuthDisableDate: Date? = nil, anonymousAuthEnabled: Bool? = nil, enabled: Bool? = nil, internalUserDatabaseEnabled: Bool? = nil, samlOptions: SAMLOptionsOutput? = nil) { + public init(anonymousAuthDisableDate: Date? = nil, anonymousAuthEnabled: Bool? = nil, enabled: Bool? = nil, internalUserDatabaseEnabled: Bool? = nil, jwtOptions: JWTOptionsOutput? = nil, samlOptions: SAMLOptionsOutput? = nil) { self.anonymousAuthDisableDate = anonymousAuthDisableDate self.anonymousAuthEnabled = anonymousAuthEnabled self.enabled = enabled self.internalUserDatabaseEnabled = internalUserDatabaseEnabled + self.jwtOptions = jwtOptions self.samlOptions = samlOptions } @@ -733,6 +801,7 @@ extension OpenSearch { case anonymousAuthEnabled = "AnonymousAuthEnabled" case enabled = "Enabled" case internalUserDatabaseEnabled = "InternalUserDatabaseEnabled" + case jwtOptions = "JWTOptions" case samlOptions = "SAMLOptions" } } @@ -744,20 +813,24 @@ extension OpenSearch { public let enabled: Bool? /// True to enable the internal user database. public let internalUserDatabaseEnabled: Bool? + /// Container for information about the JWT configuration of the Amazon OpenSearch Service. + public let jwtOptions: JWTOptionsInput? /// Container for information about the master user. public let masterUserOptions: MasterUserOptions? /// Container for information about the SAML configuration for OpenSearch Dashboards. public let samlOptions: SAMLOptionsInput? - public init(anonymousAuthEnabled: Bool? = nil, enabled: Bool? = nil, internalUserDatabaseEnabled: Bool? = nil, masterUserOptions: MasterUserOptions? = nil, samlOptions: SAMLOptionsInput? = nil) { + public init(anonymousAuthEnabled: Bool? = nil, enabled: Bool? = nil, internalUserDatabaseEnabled: Bool? = nil, jwtOptions: JWTOptionsInput? = nil, masterUserOptions: MasterUserOptions? = nil, samlOptions: SAMLOptionsInput? = nil) { self.anonymousAuthEnabled = anonymousAuthEnabled self.enabled = enabled self.internalUserDatabaseEnabled = internalUserDatabaseEnabled + self.jwtOptions = jwtOptions self.masterUserOptions = masterUserOptions self.samlOptions = samlOptions } public func validate(name: String) throws { + try self.jwtOptions?.validate(name: "\(name).jwtOptions") try self.masterUserOptions?.validate(name: "\(name).masterUserOptions") try self.samlOptions?.validate(name: "\(name).samlOptions") } @@ -766,6 +839,7 @@ extension OpenSearch { case anonymousAuthEnabled = "AnonymousAuthEnabled" case enabled = "Enabled" case internalUserDatabaseEnabled = "InternalUserDatabaseEnabled" + case jwtOptions = "JWTOptions" case masterUserOptions = "MasterUserOptions" case samlOptions = "SAMLOptions" } @@ -1503,6 +1577,8 @@ extension OpenSearch { public let advancedOptions: [String: String]? /// Options for fine-grained access control. public let advancedSecurityOptions: AdvancedSecurityOptionsInput? + /// Options for all machine learning features for the specified domain. + public let aimlOptions: AIMLOptionsInput? /// Options for Auto-Tune. public let autoTuneOptions: AutoTuneOptionsInput? /// Container for the cluster configuration of a domain. @@ -1536,10 +1612,11 @@ extension OpenSearch { /// Container for the values required to configure VPC access domains. If you don't specify these values, OpenSearch Service creates the domain with a public endpoint. For more information, see Launching your Amazon OpenSearch Service domains using a VPC. public let vpcOptions: VPCOptions? - public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptionsInput? = nil, autoTuneOptions: AutoTuneOptionsInput? = nil, clusterConfig: ClusterConfig? = nil, cognitoOptions: CognitoOptions? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainName: String, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, engineVersion: String? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, tagList: [Tag]? = nil, vpcOptions: VPCOptions? = nil) { + public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptionsInput? = nil, aimlOptions: AIMLOptionsInput? = nil, autoTuneOptions: AutoTuneOptionsInput? = nil, clusterConfig: ClusterConfig? = nil, cognitoOptions: CognitoOptions? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainName: String, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, engineVersion: String? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, tagList: [Tag]? = nil, vpcOptions: VPCOptions? = nil) { self.accessPolicies = accessPolicies self.advancedOptions = advancedOptions self.advancedSecurityOptions = advancedSecurityOptions + self.aimlOptions = aimlOptions self.autoTuneOptions = autoTuneOptions self.clusterConfig = clusterConfig self.cognitoOptions = cognitoOptions @@ -1585,6 +1662,7 @@ extension OpenSearch { case accessPolicies = "AccessPolicies" case advancedOptions = "AdvancedOptions" case advancedSecurityOptions = "AdvancedSecurityOptions" + case aimlOptions = "AIMLOptions" case autoTuneOptions = "AutoTuneOptions" case clusterConfig = "ClusterConfig" case cognitoOptions = "CognitoOptions" @@ -1800,17 +1878,21 @@ extension OpenSearch { public let description: String? /// The name of the data source. public let name: String? + /// The status of the data source. + public let status: DataSourceStatus? - public init(dataSourceType: DataSourceType? = nil, description: String? = nil, name: String? = nil) { + public init(dataSourceType: DataSourceType? = nil, description: String? = nil, name: String? = nil, status: DataSourceStatus? = nil) { self.dataSourceType = dataSourceType self.description = description self.name = name + self.status = status } private enum CodingKeys: String, CodingKey { case dataSourceType = "DataSourceType" case description = "Description" case name = "Name" + case status = "Status" } } @@ -2801,6 +2883,8 @@ extension OpenSearch { public let advancedOptions: AdvancedOptionsStatus? /// Container for fine-grained access control settings for the domain. public let advancedSecurityOptions: AdvancedSecurityOptionsStatus? + /// Container for parameters required to enable all machine learning features. + public let aimlOptions: AIMLOptionsStatus? /// Container for Auto-Tune settings for the domain. public let autoTuneOptions: AutoTuneOptionsStatus? /// Container for information about the progress of an existing configuration change. @@ -2834,10 +2918,11 @@ extension OpenSearch { /// The current VPC options for the domain and the status of any updates to their configuration. public let vpcOptions: VPCDerivedInfoStatus? - public init(accessPolicies: AccessPoliciesStatus? = nil, advancedOptions: AdvancedOptionsStatus? = nil, advancedSecurityOptions: AdvancedSecurityOptionsStatus? = nil, autoTuneOptions: AutoTuneOptionsStatus? = nil, changeProgressDetails: ChangeProgressDetails? = nil, clusterConfig: ClusterConfigStatus? = nil, cognitoOptions: CognitoOptionsStatus? = nil, domainEndpointOptions: DomainEndpointOptionsStatus? = nil, ebsOptions: EBSOptionsStatus? = nil, encryptionAtRestOptions: EncryptionAtRestOptionsStatus? = nil, engineVersion: VersionStatus? = nil, ipAddressType: IPAddressTypeStatus? = nil, logPublishingOptions: LogPublishingOptionsStatus? = nil, modifyingProperties: [ModifyingProperties]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptionsStatus? = nil, offPeakWindowOptions: OffPeakWindowOptionsStatus? = nil, snapshotOptions: SnapshotOptionsStatus? = nil, softwareUpdateOptions: SoftwareUpdateOptionsStatus? = nil, vpcOptions: VPCDerivedInfoStatus? = nil) { + public init(accessPolicies: AccessPoliciesStatus? = nil, advancedOptions: AdvancedOptionsStatus? = nil, advancedSecurityOptions: AdvancedSecurityOptionsStatus? = nil, aimlOptions: AIMLOptionsStatus? = nil, autoTuneOptions: AutoTuneOptionsStatus? = nil, changeProgressDetails: ChangeProgressDetails? = nil, clusterConfig: ClusterConfigStatus? = nil, cognitoOptions: CognitoOptionsStatus? = nil, domainEndpointOptions: DomainEndpointOptionsStatus? = nil, ebsOptions: EBSOptionsStatus? = nil, encryptionAtRestOptions: EncryptionAtRestOptionsStatus? = nil, engineVersion: VersionStatus? = nil, ipAddressType: IPAddressTypeStatus? = nil, logPublishingOptions: LogPublishingOptionsStatus? = nil, modifyingProperties: [ModifyingProperties]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptionsStatus? = nil, offPeakWindowOptions: OffPeakWindowOptionsStatus? = nil, snapshotOptions: SnapshotOptionsStatus? = nil, softwareUpdateOptions: SoftwareUpdateOptionsStatus? = nil, vpcOptions: VPCDerivedInfoStatus? = nil) { self.accessPolicies = accessPolicies self.advancedOptions = advancedOptions self.advancedSecurityOptions = advancedSecurityOptions + self.aimlOptions = aimlOptions self.autoTuneOptions = autoTuneOptions self.changeProgressDetails = changeProgressDetails self.clusterConfig = clusterConfig @@ -2860,6 +2945,7 @@ extension OpenSearch { case accessPolicies = "AccessPolicies" case advancedOptions = "AdvancedOptions" case advancedSecurityOptions = "AdvancedSecurityOptions" + case aimlOptions = "AIMLOptions" case autoTuneOptions = "AutoTuneOptions" case changeProgressDetails = "ChangeProgressDetails" case clusterConfig = "ClusterConfig" @@ -3102,6 +3188,8 @@ extension OpenSearch { public let advancedOptions: [String: String]? /// Settings for fine-grained access control. public let advancedSecurityOptions: AdvancedSecurityOptions? + /// Container for parameters required to enable all machine learning features. + public let aimlOptions: AIMLOptionsOutput? /// The Amazon Resource Name (ARN) of the domain. For more information, see IAM identifiers in the AWS Identity and Access Management User Guide. public let arn: String /// Auto-Tune settings for the domain. @@ -3118,7 +3206,7 @@ extension OpenSearch { public let deleted: Bool? /// Additional options for the domain endpoint, such as whether to require HTTPS for all traffic. public let domainEndpointOptions: DomainEndpointOptions? - /// The DualStack Hosted Zone Id for the domain. + /// The dual stack hosted zone ID for the domain. public let domainEndpointV2HostedZoneId: String? /// Unique identifier for the domain. public let domainId: String @@ -3161,10 +3249,11 @@ extension OpenSearch { /// The VPC configuration for the domain. public let vpcOptions: VPCDerivedInfo? - public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptions? = nil, arn: String, autoTuneOptions: AutoTuneOptionsOutput? = nil, changeProgressDetails: ChangeProgressDetails? = nil, clusterConfig: ClusterConfig, cognitoOptions: CognitoOptions? = nil, created: Bool? = nil, deleted: Bool? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainEndpointV2HostedZoneId: String? = nil, domainId: String, domainName: String, domainProcessingStatus: DomainProcessingStatusType? = nil, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, endpoint: String? = nil, endpoints: [String: String]? = nil, endpointV2: String? = nil, engineVersion: String? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, modifyingProperties: [ModifyingProperties]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, processing: Bool? = nil, serviceSoftwareOptions: ServiceSoftwareOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, upgradeProcessing: Bool? = nil, vpcOptions: VPCDerivedInfo? = nil) { + public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptions? = nil, aimlOptions: AIMLOptionsOutput? = nil, arn: String, autoTuneOptions: AutoTuneOptionsOutput? = nil, changeProgressDetails: ChangeProgressDetails? = nil, clusterConfig: ClusterConfig, cognitoOptions: CognitoOptions? = nil, created: Bool? = nil, deleted: Bool? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainEndpointV2HostedZoneId: String? = nil, domainId: String, domainName: String, domainProcessingStatus: DomainProcessingStatusType? = nil, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, endpoint: String? = nil, endpoints: [String: String]? = nil, endpointV2: String? = nil, engineVersion: String? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, modifyingProperties: [ModifyingProperties]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, processing: Bool? = nil, serviceSoftwareOptions: ServiceSoftwareOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, upgradeProcessing: Bool? = nil, vpcOptions: VPCDerivedInfo? = nil) { self.accessPolicies = accessPolicies self.advancedOptions = advancedOptions self.advancedSecurityOptions = advancedSecurityOptions + self.aimlOptions = aimlOptions self.arn = arn self.autoTuneOptions = autoTuneOptions self.changeProgressDetails = changeProgressDetails @@ -3200,6 +3289,7 @@ extension OpenSearch { case accessPolicies = "AccessPolicies" case advancedOptions = "AdvancedOptions" case advancedSecurityOptions = "AdvancedSecurityOptions" + case aimlOptions = "AIMLOptions" case arn = "ARN" case autoTuneOptions = "AutoTuneOptions" case changeProgressDetails = "ChangeProgressDetails" @@ -3518,17 +3608,21 @@ extension OpenSearch { public let description: String? /// The name of the data source. public let name: String? + /// The status of the data source. + public let status: DataSourceStatus? - public init(dataSourceType: DataSourceType? = nil, description: String? = nil, name: String? = nil) { + public init(dataSourceType: DataSourceType? = nil, description: String? = nil, name: String? = nil, status: DataSourceStatus? = nil) { self.dataSourceType = dataSourceType self.description = description self.name = name + self.status = status } private enum CodingKeys: String, CodingKey { case dataSourceType = "DataSourceType" case description = "Description" case name = "Name" + case status = "Status" } } @@ -3872,6 +3966,63 @@ extension OpenSearch { } } + public struct JWTOptionsInput: AWSEncodableShape { + /// True to enable JWT authentication and authorization for a domain. + public let enabled: Bool? + /// Element of the JWT assertion used by the cluster to verify JWT signatures. + public let publicKey: String? + /// Element of the JWT assertion to use for roles. + public let rolesKey: String? + /// Element of the JWT assertion to use for the user name. + public let subjectKey: String? + + public init(enabled: Bool? = nil, publicKey: String? = nil, rolesKey: String? = nil, subjectKey: String? = nil) { + self.enabled = enabled + self.publicKey = publicKey + self.rolesKey = rolesKey + self.subjectKey = subjectKey + } + + public func validate(name: String) throws { + try self.validate(self.rolesKey, name: "rolesKey", parent: name, max: 64) + try self.validate(self.rolesKey, name: "rolesKey", parent: name, min: 1) + try self.validate(self.subjectKey, name: "subjectKey", parent: name, max: 64) + try self.validate(self.subjectKey, name: "subjectKey", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case enabled = "Enabled" + case publicKey = "PublicKey" + case rolesKey = "RolesKey" + case subjectKey = "SubjectKey" + } + } + + public struct JWTOptionsOutput: AWSDecodableShape { + /// True if JWT use is enabled. + public let enabled: Bool? + /// The key used to verify the signature of incoming JWT requests. + public let publicKey: String? + /// The key used for matching the JWT roles attribute. + public let rolesKey: String? + /// The key used for matching the JWT subject attribute. + public let subjectKey: String? + + public init(enabled: Bool? = nil, publicKey: String? = nil, rolesKey: String? = nil, subjectKey: String? = nil) { + self.enabled = enabled + self.publicKey = publicKey + self.rolesKey = rolesKey + self.subjectKey = subjectKey + } + + private enum CodingKeys: String, CodingKey { + case enabled = "Enabled" + case publicKey = "PublicKey" + case rolesKey = "RolesKey" + case subjectKey = "SubjectKey" + } + } + public struct Limits: AWSDecodableShape { /// List of additional limits that are specific to a given instance type for each of its instance roles. public let additionalLimits: [AdditionalLimit]? @@ -4526,6 +4677,36 @@ extension OpenSearch { } } + public struct NaturalLanguageQueryGenerationOptionsInput: AWSEncodableShape { + /// The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED. + public let desiredState: NaturalLanguageQueryGenerationDesiredState? + + public init(desiredState: NaturalLanguageQueryGenerationDesiredState? = nil) { + self.desiredState = desiredState + } + + private enum CodingKeys: String, CodingKey { + case desiredState = "DesiredState" + } + } + + public struct NaturalLanguageQueryGenerationOptionsOutput: AWSDecodableShape { + /// The current state of the natural language query generation feature, indicating completion, in progress, or failure. + public let currentState: NaturalLanguageQueryGenerationCurrentState? + /// The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED. + public let desiredState: NaturalLanguageQueryGenerationDesiredState? + + public init(currentState: NaturalLanguageQueryGenerationCurrentState? = nil, desiredState: NaturalLanguageQueryGenerationDesiredState? = nil) { + self.currentState = currentState + self.desiredState = desiredState + } + + private enum CodingKeys: String, CodingKey { + case currentState = "CurrentState" + case desiredState = "DesiredState" + } + } + public struct NodeToNodeEncryptionOptions: AWSEncodableShape & AWSDecodableShape { /// True to enable node-to-node encryption. public let enabled: Bool? @@ -5539,12 +5720,15 @@ extension OpenSearch { public let domainName: String /// The name of the data source to modify. public let name: String + /// The status of the data source update. + public let status: DataSourceStatus? - public init(dataSourceType: DataSourceType, description: String? = nil, domainName: String, name: String) { + public init(dataSourceType: DataSourceType, description: String? = nil, domainName: String, name: String, status: DataSourceStatus? = nil) { self.dataSourceType = dataSourceType self.description = description self.domainName = domainName self.name = name + self.status = status } public func encode(to encoder: Encoder) throws { @@ -5554,6 +5738,7 @@ extension OpenSearch { try container.encodeIfPresent(self.description, forKey: .description) request.encodePath(self.domainName, key: "DomainName") request.encodePath(self.name, key: "Name") + try container.encodeIfPresent(self.status, forKey: .status) } public func validate(name: String) throws { @@ -5571,6 +5756,7 @@ extension OpenSearch { private enum CodingKeys: String, CodingKey { case dataSourceType = "DataSourceType" case description = "Description" + case status = "Status" } } @@ -5594,6 +5780,8 @@ extension OpenSearch { public let advancedOptions: [String: String]? /// Options for fine-grained access control. public let advancedSecurityOptions: AdvancedSecurityOptionsInput? + /// Options for all machine learning features for the specified domain. + public let aimlOptions: AIMLOptionsInput? /// Options for Auto-Tune. public let autoTuneOptions: AutoTuneOptions? /// Changes that you want to make to the cluster configuration, such as the instance type and number of EC2 instances. @@ -5627,10 +5815,11 @@ extension OpenSearch { /// Options to specify the subnets and security groups for a VPC endpoint. For more information, see Launching your Amazon OpenSearch Service domains using a VPC. public let vpcOptions: VPCOptions? - public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptionsInput? = nil, autoTuneOptions: AutoTuneOptions? = nil, clusterConfig: ClusterConfig? = nil, cognitoOptions: CognitoOptions? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainName: String, dryRun: Bool? = nil, dryRunMode: DryRunMode? = nil, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, vpcOptions: VPCOptions? = nil) { + public init(accessPolicies: String? = nil, advancedOptions: [String: String]? = nil, advancedSecurityOptions: AdvancedSecurityOptionsInput? = nil, aimlOptions: AIMLOptionsInput? = nil, autoTuneOptions: AutoTuneOptions? = nil, clusterConfig: ClusterConfig? = nil, cognitoOptions: CognitoOptions? = nil, domainEndpointOptions: DomainEndpointOptions? = nil, domainName: String, dryRun: Bool? = nil, dryRunMode: DryRunMode? = nil, ebsOptions: EBSOptions? = nil, encryptionAtRestOptions: EncryptionAtRestOptions? = nil, ipAddressType: IPAddressType? = nil, logPublishingOptions: [LogType: LogPublishingOption]? = nil, nodeToNodeEncryptionOptions: NodeToNodeEncryptionOptions? = nil, offPeakWindowOptions: OffPeakWindowOptions? = nil, snapshotOptions: SnapshotOptions? = nil, softwareUpdateOptions: SoftwareUpdateOptions? = nil, vpcOptions: VPCOptions? = nil) { self.accessPolicies = accessPolicies self.advancedOptions = advancedOptions self.advancedSecurityOptions = advancedSecurityOptions + self.aimlOptions = aimlOptions self.autoTuneOptions = autoTuneOptions self.clusterConfig = clusterConfig self.cognitoOptions = cognitoOptions @@ -5655,6 +5844,7 @@ extension OpenSearch { try container.encodeIfPresent(self.accessPolicies, forKey: .accessPolicies) try container.encodeIfPresent(self.advancedOptions, forKey: .advancedOptions) try container.encodeIfPresent(self.advancedSecurityOptions, forKey: .advancedSecurityOptions) + try container.encodeIfPresent(self.aimlOptions, forKey: .aimlOptions) try container.encodeIfPresent(self.autoTuneOptions, forKey: .autoTuneOptions) try container.encodeIfPresent(self.clusterConfig, forKey: .clusterConfig) try container.encodeIfPresent(self.cognitoOptions, forKey: .cognitoOptions) @@ -5694,6 +5884,7 @@ extension OpenSearch { case accessPolicies = "AccessPolicies" case advancedOptions = "AdvancedOptions" case advancedSecurityOptions = "AdvancedSecurityOptions" + case aimlOptions = "AIMLOptions" case autoTuneOptions = "AutoTuneOptions" case clusterConfig = "ClusterConfig" case cognitoOptions = "CognitoOptions" diff --git a/Sources/Soto/Services/OpsWorks/OpsWorks_api.swift b/Sources/Soto/Services/OpsWorks/OpsWorks_api.swift index 7a12f97698..cad9fcf25d 100644 --- a/Sources/Soto/Services/OpsWorks/OpsWorks_api.swift +++ b/Sources/Soto/Services/OpsWorks/OpsWorks_api.swift @@ -19,9 +19,7 @@ /// Service object for interacting with AWS OpsWorks service. /// -/// AWS OpsWorks Welcome to the AWS OpsWorks Stacks API Reference. This guide provides descriptions, syntax, and usage examples for AWS OpsWorks Stacks actions and data types, including common parameters and error codes. AWS OpsWorks Stacks is an application management service that provides an integrated experience for overseeing the complete application lifecycle. For information about this product, go to the AWS OpsWorks details page. -/// SDKs and CLI The most common way to use the AWS OpsWorks Stacks API is by using the AWS Command Line Interface (CLI) or by using one of the AWS SDKs to implement applications in your preferred language. For more information, see: AWS CLI AWS SDK for Java AWS SDK for .NET AWS SDK for PHP 2 AWS SDK for Ruby AWS SDK for Node.js AWS SDK for Python(Boto) -/// Endpoints AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Stacks can only be accessed or managed within the endpoint in which they are created. opsworks.us-east-1.amazonaws.com opsworks.us-east-2.amazonaws.com opsworks.us-west-1.amazonaws.com opsworks.us-west-2.amazonaws.com opsworks.ca-central-1.amazonaws.com (API only; not available in the AWS console) opsworks.eu-west-1.amazonaws.com opsworks.eu-west-2.amazonaws.com opsworks.eu-west-3.amazonaws.com opsworks.eu-central-1.amazonaws.com opsworks.ap-northeast-1.amazonaws.com opsworks.ap-northeast-2.amazonaws.com opsworks.ap-south-1.amazonaws.com opsworks.ap-southeast-1.amazonaws.com opsworks.ap-southeast-2.amazonaws.com opsworks.sa-east-1.amazonaws.com Chef Versions When you call CreateStack, CloneStack, or UpdateStack we recommend you use the ConfigurationManager parameter to specify the Chef version. The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information, see Chef Versions. You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible. +/// OpsWorks Welcome to the OpsWorks Stacks API Reference. This guide provides descriptions, syntax, and usage examples for OpsWorks Stacks actions and data types, including common parameters and error codes. OpsWorks Stacks is an application management service that provides an integrated experience for managing the complete application lifecycle. For information about OpsWorks, see the OpsWorks information page. SDKs and CLI Use the OpsWorks Stacks API by using the Command Line Interface (CLI) or by using one of the Amazon Web Services SDKs to implement applications in your preferred language. For more information, see: CLI SDK for Java SDK for .NET SDK for PHP SDK for Ruby Amazon Web Services SDK for Node.js SDK for Python (Boto) Endpoints OpsWorks Stacks supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Stacks can only be accessed or managed within the endpoint in which they are created. opsworks.us-east-1.amazonaws.com opsworks.us-east-2.amazonaws.com opsworks.us-west-1.amazonaws.com opsworks.us-west-2.amazonaws.com opsworks.ca-central-1.amazonaws.com (API only; not available in the Amazon Web Services Management Console) opsworks.eu-west-1.amazonaws.com opsworks.eu-west-2.amazonaws.com opsworks.eu-west-3.amazonaws.com opsworks.eu-central-1.amazonaws.com opsworks.ap-northeast-1.amazonaws.com opsworks.ap-northeast-2.amazonaws.com opsworks.ap-south-1.amazonaws.com opsworks.ap-southeast-1.amazonaws.com opsworks.ap-southeast-2.amazonaws.com opsworks.sa-east-1.amazonaws.com Chef Versions When you call CreateStack, CloneStack, or UpdateStack we recommend you use the ConfigurationManager parameter to specify the Chef version. The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information, see Chef Versions. You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible. public struct OpsWorks: AWSService { // MARK: Member variables @@ -77,7 +75,7 @@ public struct OpsWorks: AWSService { // MARK: API Calls - /// Assign a registered instance to a layer. You can assign registered on-premises instances to any layer type. You can assign registered Amazon EC2 instances only to custom layers. You cannot use this action with instances that were created with AWS OpsWorks Stacks. Required Permissions: To use this action, an AWS Identity and Access Management (IAM) user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Assign a registered instance to a layer. You can assign registered on-premises instances to any layer type. You can assign registered Amazon EC2 instances only to custom layers. You cannot use this action with instances that were created with OpsWorks Stacks. Required Permissions: To use this action, an Identity and Access Management (IAM) user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func assignInstance(_ input: AssignInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -90,7 +88,7 @@ public struct OpsWorks: AWSService { ) } - /// Assigns one of the stack's registered Amazon EBS volumes to a specified instance. The volume must first be registered with the stack by calling RegisterVolume. After you register the volume, you must call UpdateVolume to specify a mount point before calling AssignVolume. For more information, see Resource Management. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Assigns one of the stack's registered Amazon EBS volumes to a specified instance. The volume must first be registered with the stack by calling RegisterVolume. After you register the volume, you must call UpdateVolume to specify a mount point before calling AssignVolume. For more information, see Resource Management. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func assignVolume(_ input: AssignVolumeRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -103,7 +101,7 @@ public struct OpsWorks: AWSService { ) } - /// Associates one of the stack's registered Elastic IP addresses with a specified instance. The address must first be registered with the stack by calling RegisterElasticIp. For more information, see Resource Management. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Associates one of the stack's registered Elastic IP addresses with a specified instance. The address must first be registered with the stack by calling RegisterElasticIp. For more information, see Resource Management. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func associateElasticIp(_ input: AssociateElasticIpRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -116,7 +114,7 @@ public struct OpsWorks: AWSService { ) } - /// Attaches an Elastic Load Balancing load balancer to a specified layer. AWS OpsWorks Stacks does not support Application Load Balancer. You can only use Classic Load Balancer with AWS OpsWorks Stacks. For more information, see Elastic Load Balancing. You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For more information, see Elastic Load Balancing Developer Guide. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Attaches an Elastic Load Balancing load balancer to a specified layer. OpsWorks Stacks does not support Application Load Balancer. You can only use Classic Load Balancer with OpsWorks Stacks. For more information, see Elastic Load Balancing. You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For more information, see the Elastic Load Balancing Developer Guide. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func attachElasticLoadBalancer(_ input: AttachElasticLoadBalancerRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -181,7 +179,7 @@ public struct OpsWorks: AWSService { ) } - /// Creates a layer. For more information, see How to Create a Layer. You should use CreateLayer for noncustom layer types such as PHP App Server only if the stack does not have an existing layer of that type. A stack can have at most one instance of each noncustom layer; if you attempt to create a second instance, CreateLayer fails. A stack can have an arbitrary number of custom layers, so you can call CreateLayer as many times as you like for that layer type. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Creates a layer. For more information, see How to Create a Layer. You should use CreateLayer for noncustom layer types such as PHP App Server only if the stack does not have an existing layer of that type. A stack can have at most one instance of each noncustom layer; if you attempt to create a second instance, CreateLayer fails. A stack can have an arbitrary number of custom layers, so you can call CreateLayer as many times as you like for that layer type. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func createLayer(_ input: CreateLayerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateLayerResult { return try await self.client.execute( @@ -220,7 +218,7 @@ public struct OpsWorks: AWSService { ) } - /// Deletes a specified app. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Deletes a specified app. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func deleteApp(_ input: DeleteAppRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -233,7 +231,7 @@ public struct OpsWorks: AWSService { ) } - /// Deletes a specified instance, which terminates the associated Amazon EC2 instance. You must stop an instance before you can delete it. For more information, see Deleting Instances. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Deletes a specified instance, which terminates the associated Amazon EC2 instance. You must stop an instance before you can delete it. For more information, see Deleting Instances. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func deleteInstance(_ input: DeleteInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -246,7 +244,7 @@ public struct OpsWorks: AWSService { ) } - /// Deletes a specified layer. You must first stop and then delete all associated instances or unassign registered instances. For more information, see How to Delete a Layer. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Deletes a specified layer. You must first stop and then delete all associated instances or unassign registered instances. For more information, see How to Delete a Layer. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func deleteLayer(_ input: DeleteLayerRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -272,7 +270,7 @@ public struct OpsWorks: AWSService { ) } - /// Deletes a user profile. Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions. + /// Deletes a user profile. Required Permissions: To use this action, an IAM user must have an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions. @Sendable public func deleteUserProfile(_ input: DeleteUserProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -285,7 +283,7 @@ public struct OpsWorks: AWSService { ) } - /// Deregisters a specified Amazon ECS cluster from a stack. For more information, see Resource Management. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html. + /// Deregisters a specified Amazon ECS cluster from a stack. For more information, see Resource Management. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html. @Sendable public func deregisterEcsCluster(_ input: DeregisterEcsClusterRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -298,7 +296,7 @@ public struct OpsWorks: AWSService { ) } - /// Deregisters a specified Elastic IP address. The address can then be registered by another stack. For more information, see Resource Management. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Deregisters a specified Elastic IP address. The address can be registered by another stack after it is deregistered. For more information, see Resource Management. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func deregisterElasticIp(_ input: DeregisterElasticIpRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -311,7 +309,7 @@ public struct OpsWorks: AWSService { ) } - /// Deregister a registered Amazon EC2 or on-premises instance. This action removes the instance from the stack and returns it to your control. This action cannot be used with instances that were created with AWS OpsWorks Stacks. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Deregister an instance from OpsWorks Stacks. The instance can be a registered instance (Amazon EC2 or on-premises) or an instance created with OpsWorks. This action removes the instance from the stack and returns it to your control. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func deregisterInstance(_ input: DeregisterInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -350,7 +348,7 @@ public struct OpsWorks: AWSService { ) } - /// Describes the available AWS OpsWorks Stacks agent versions. You must specify a stack ID or a configuration manager. DescribeAgentVersions returns a list of available agent versions for the specified stack or configuration manager. + /// Describes the available OpsWorks Stacks agent versions. You must specify a stack ID or a configuration manager. DescribeAgentVersions returns a list of available agent versions for the specified stack or configuration manager. @Sendable public func describeAgentVersions(_ input: DescribeAgentVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAgentVersionsResult { return try await self.client.execute( @@ -363,7 +361,7 @@ public struct OpsWorks: AWSService { ) } - /// Requests a description of a specified set of apps. This call accepts only one resource-identifying parameter. Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions. + /// Requests a description of a specified set of apps. This call accepts only one resource-identifying parameter. Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions. @Sendable public func describeApps(_ input: DescribeAppsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAppsResult { return try await self.client.execute( @@ -402,7 +400,7 @@ public struct OpsWorks: AWSService { ) } - /// Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, AWS OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element. Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing User Permissions. This call accepts only one resource-identifying parameter. + /// Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element. Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing User Permissions. This call accepts only one resource-identifying parameter. @Sendable public func describeEcsClusters(_ input: DescribeEcsClustersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeEcsClustersResult { return try await self.client.execute( @@ -492,7 +490,7 @@ public struct OpsWorks: AWSService { ) } - /// Describes the operating systems that are supported by AWS OpsWorks Stacks. + /// Describes the operating systems that are supported by OpsWorks Stacks. @Sendable public func describeOperatingSystems(logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeOperatingSystemsResponse { return try await self.client.execute( @@ -543,7 +541,7 @@ public struct OpsWorks: AWSService { ) } - /// Describes AWS OpsWorks Stacks service errors. Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions. This call accepts only one resource-identifying parameter. + /// Describes OpsWorks Stacks service errors. Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions. This call accepts only one resource-identifying parameter. @Sendable public func describeServiceErrors(_ input: DescribeServiceErrorsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeServiceErrorsResult { return try await self.client.execute( @@ -634,7 +632,7 @@ public struct OpsWorks: AWSService { ) } - /// Detaches a specified Elastic Load Balancing instance from its layer. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Detaches a specified Elastic Load Balancing instance from its layer. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func detachElasticLoadBalancer(_ input: DetachElasticLoadBalancerRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -738,7 +736,7 @@ public struct OpsWorks: AWSService { ) } - /// Registers instances that were created outside of AWS OpsWorks Stacks with a specified stack. We do not recommend using this action to register instances. The complete registration operation includes two tasks: installing the AWS OpsWorks Stacks agent on the instance, and registering the instance with the stack. RegisterInstance handles only the second step. You should instead use the AWS CLI register command, which performs the entire registration operation. For more information, see Registering an Instance with an AWS OpsWorks Stacks Stack. Registered instances have the same requirements as instances that are created by using the CreateInstance API. For example, registered instances must be running a supported Linux-based operating system, and they must have a supported instance type. For more information about requirements for instances that you want to register, see Preparing the Instance. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Registers instances that were created outside of OpsWorks Stacks with a specified stack. We do not recommend using this action to register instances. The complete registration operation includes two tasks: installing the OpsWorks Stacks agent on the instance, and registering the instance with the stack. RegisterInstance handles only the second step. You should instead use the CLI register command, which performs the entire registration operation. For more information, see Registering an Instance with an OpsWorks Stacks Stack. Registered instances have the same requirements as instances that are created by using the CreateInstance API. For example, registered instances must be running a supported Linux-based operating system, and they must have a supported instance type. For more information about requirements for instances that you want to register, see Preparing the Instance. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func registerInstance(_ input: RegisterInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterInstanceResult { return try await self.client.execute( @@ -777,7 +775,7 @@ public struct OpsWorks: AWSService { ) } - /// Specify the load-based auto scaling configuration for a specified layer. For more information, see Managing Load with Time-based and Load-based Instances. To use load-based auto scaling, you must create a set of load-based auto scaling instances. Load-based auto scaling operates only on the instances from that set, so you must ensure that you have created enough instances to handle the maximum anticipated load. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Specify the load-based auto scaling configuration for a specified layer. For more information, see Managing Load with Time-based and Load-based Instances. To use load-based auto scaling, you must create a set of load-based auto scaling instances. Load-based auto scaling operates only on the instances from that set, so you must ensure that you have created enough instances to handle the maximum anticipated load. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func setLoadBasedAutoScaling(_ input: SetLoadBasedAutoScalingRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -790,7 +788,7 @@ public struct OpsWorks: AWSService { ) } - /// Specifies a user's permissions. For more information, see Security and Permissions. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Specifies a user's permissions. For more information, see Security and Permissions. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func setPermission(_ input: SetPermissionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -868,7 +866,7 @@ public struct OpsWorks: AWSService { ) } - /// Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks Stacks. For more information about how tagging works, see Tags in the AWS OpsWorks User Guide. + /// Apply cost-allocation tags to a specified stack or layer in OpsWorks Stacks. For more information about how tagging works, see Tags in the OpsWorks User Guide. @Sendable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -881,7 +879,7 @@ public struct OpsWorks: AWSService { ) } - /// Unassigns a registered instance from all layers that are using the instance. The instance remains in the stack as an unassigned instance, and can be assigned to another layer as needed. You cannot use this action with instances that were created with AWS OpsWorks Stacks. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions. + /// Unassigns a registered instance from all layers that are using the instance. The instance remains in the stack as an unassigned instance, and can be assigned to another layer as needed. You cannot use this action with instances that were created with OpsWorks Stacks. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack or an attached policy that explicitly grants permissions. For more information about user permissions, see Managing User Permissions. @Sendable public func unassignInstance(_ input: UnassignInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -1024,7 +1022,7 @@ public struct OpsWorks: AWSService { ) } - /// Updates an Amazon EBS volume's name or mount point. For more information, see Resource Management. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. + /// Updates an Amazon EBS volume's name or mount point. For more information, see Resource Management. Required Permissions: To use this action, an IAM user must have a Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information on user permissions, see Managing User Permissions. @Sendable public func updateVolume(_ input: UpdateVolumeRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -1051,7 +1049,7 @@ extension OpsWorks { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension OpsWorks { - /// Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, AWS OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element. Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing User Permissions. This call accepts only one resource-identifying parameter. + /// Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID, you can use the MaxResults and NextToken parameters to paginate the response. However, OpsWorks Stacks currently supports only one cluster per layer, so the result set has a maximum of one element. Required Permissions: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack or an attached policy that explicitly grants permission. For more information about user permissions, see Managing User Permissions. This call accepts only one resource-identifying parameter. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/OpsWorks/OpsWorks_shapes.swift b/Sources/Soto/Services/OpsWorks/OpsWorks_shapes.swift index 46aabfc183..201953f5f9 100644 --- a/Sources/Soto/Services/OpsWorks/OpsWorks_shapes.swift +++ b/Sources/Soto/Services/OpsWorks/OpsWorks_shapes.swift @@ -294,7 +294,7 @@ extension OpsWorks { public let domains: [String]? /// Whether to enable SSL for the app. public let enableSsl: Bool? - /// An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances. For more information, see Environment Variables. There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variable names, values, and protected flag values - cannot exceed 20 KB. This limit should accommodate most if not all use cases, but if you do exceed it, you will cause an exception (API) with an "Environment: is too large (maximum is 20 KB)" message. + /// An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances. For more information, see Environment Variables. There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variable names, values, and protected flag values - cannot exceed 20 KB. This limit should accommodate most if not all use cases, but if you do exceed it, you will cause an exception (API) with an "Environment: is too large (maximum is 20 KB)" message. public let environment: [EnvironmentVariable]? /// The app name. public let name: String? @@ -411,15 +411,15 @@ extension OpsWorks { } public struct AutoScalingThresholds: AWSEncodableShape & AWSDecodableShape { - /// Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack. To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. You can either have AWS OpsWorks Stacks update the role for you when you first use this feature or you can edit the role manually. For more information, see Allowing AWS OpsWorks Stacks to Act on Your Behalf. + /// Custom CloudWatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names, which are case sensitive and must be in the same region as the stack. To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. You can either have OpsWorks Stacks update the role for you when you first use this feature or you can edit the role manually. For more information, see Allowing OpsWorks Stacks to Act on Your Behalf. public let alarms: [String]? /// The CPU utilization threshold, as a percent of the available CPU. A value of -1 disables the threshold. public let cpuThreshold: Double? - /// The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks Stacks should ignore metrics and suppress additional scaling events. For example, AWS OpsWorks Stacks adds new instances following an upscaling event but the instances won't start reducing the load until they have been booted and configured. There is no point in raising additional scaling events during that operation, which typically takes several minutes. IgnoreMetricsTime allows you to direct AWS OpsWorks Stacks to suppress scaling events long enough to get the new instances online. + /// The amount of time (in minutes) after a scaling event occurs that OpsWorks Stacks should ignore metrics and suppress additional scaling events. For example, OpsWorks Stacks adds new instances following an upscaling event but the instances won't start reducing the load until they have been booted and configured. There is no point in raising additional scaling events during that operation, which typically takes several minutes. IgnoreMetricsTime allows you to direct OpsWorks Stacks to suppress scaling events long enough to get the new instances online. public let ignoreMetricsTime: Int? /// The number of instances to add or remove when the load exceeds a threshold. public let instanceCount: Int? - /// The load threshold. A value of -1 disables the threshold. For more information about how load is computed, see Load (computing). + /// The load threshold. A value of -1 disables the threshold. For more information about how load is computed, see Load (computing). public let loadThreshold: Double? /// The memory utilization threshold, as a percent of the available memory. A value of -1 disables the threshold. public let memoryThreshold: Double? @@ -455,7 +455,7 @@ extension OpsWorks { } public struct BlockDeviceMapping: AWSEncodableShape & AWSDecodableShape { - /// The device name that is exposed to the instance, such as /dev/sdh. For the root device, you can use the explicit device name or you can set this parameter to ROOT_DEVICE and AWS OpsWorks Stacks will provide the correct device name. + /// The device name that is exposed to the instance, such as /dev/sdh. For the root device, you can use the explicit device name or you can set this parameter to ROOT_DEVICE and OpsWorks Stacks will provide the correct device name. public let deviceName: String? /// An EBSBlockDevice that defines how to configure an Amazon EBS volume when the instance is launched. public let ebs: EbsBlockDevice? @@ -497,7 +497,7 @@ extension OpsWorks { } public struct CloneStackRequest: AWSEncodableShape { - /// The default AWS OpsWorks Stacks agent version. You have the following options: Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available. Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the stack's instances. The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. You can also specify an agent version when you create or update an instance, which overrides the stack's default setting. + /// The default OpsWorks Stacks agent version. You have the following options: Auto-update - Set this parameter to LATEST. OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available. Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. OpsWorks Stacks automatically installs that version on the stack's instances. The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. You can also specify an agent version when you create or update an instance, which overrides the stack's default setting. public let agentVersion: String? /// A list of stack attributes and values as key/value pairs to be added to the cloned stack. public let attributes: [StackAttributesKeys: String]? @@ -507,39 +507,39 @@ extension OpsWorks { public let cloneAppIds: [String]? /// Whether to clone the source stack's permissions. public let clonePermissions: Bool? - /// The configuration manager. When you clone a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12. + /// The configuration manager. When you clone a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12. public let configurationManager: StackConfigurationManager? - /// Contains the information required to retrieve an app or cookbook from a repository. For more information, see Adding Apps or Cookbooks and Recipes. + /// Contains the information required to retrieve an app or cookbook from a repository. For more information, see Adding Apps or Cookbooks and Recipes. public let customCookbooksSource: Source? - /// A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes + /// A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes public let customJson: String? /// The cloned stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see the VpcId parameter description. public let defaultAvailabilityZone: String? - /// The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers. + /// The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers. public let defaultInstanceProfileArn: String? - /// The stack's operating system, which must be set to one of the following. A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs. The default option is the parent stack's operating system. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems. You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux. + /// The stack's operating system, which must be set to one of the following. A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs. The default option is the parent stack's operating system. Not all operating systems are supported with all versions of Chef. For more information about supported operating systems, see OpsWorks Stacks Operating Systems. You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux. public let defaultOs: String? /// The default root device type. This value is used by default for all instances in the cloned stack, but you can override it when you create an instance. For more information, see Storage for the Root Device. public let defaultRootDeviceType: RootDeviceType? - /// A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance. + /// A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance. public let defaultSshKeyName: String? /// The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone, the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description. public let defaultSubnetId: String? /// The stack's host name theme, with spaces are replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are: Baked_Goods Clouds Europe_Cities Fruits Greek_Deities_and_Titans Legendary_creatures_from_Japan Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme. public let hostnameTheme: String? - /// The cloned stack name. + /// The cloned stack name. Stack names can be a maximum of 64 characters. public let name: String? - /// The cloned stack AWS region, such as "ap-northeast-2". For more information about AWS regions, see Regions and Endpoints. + /// The cloned stack Amazon Web Services Region, such as ap-northeast-2. For more information about Amazon Web Services Regions, see Regions and Endpoints. public let region: String? - /// The stack AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. If you create a stack by using the AWS OpsWorks Stacks console, it creates the role for you. You can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. For more information about IAM ARNs, see Using Identifiers. You must set this parameter to a valid service role ARN or the action will fail; there is no default value. You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly. + /// The stack Identity and Access Management (IAM) role, which allows OpsWorks Stacks to work with Amazon Web Services resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. If you create a stack by using the OpsWorkss Stacks console, it creates the role for you. You can obtain an existing stack's IAM ARN programmatically by calling DescribePermissions. For more information about IAM ARNs, see Using Identifiers. You must set this parameter to a valid service role ARN or the action will fail; there is no default value. You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly. public let serviceRoleArn: String /// The source stack ID. public let sourceStackId: String /// Whether to use custom cookbooks. public let useCustomCookbooks: Bool? - /// Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers. AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings: True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot delete the built-in security group. False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon EC2) security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings. For more information, see Create a New Stack. + /// Whether to associate the OpsWorks Stacks built-in security groups with the stack's layers. OpsWorks Stacks provides a standard set of security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings: True - OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot delete the built-in security group. False - OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate Amazon EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings. For more information, see Create a New Stack. public let useOpsworksSecurityGroups: Bool? - /// The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later. If your account supports EC2 Classic, the default value is no VPC. If your account does not support EC2 Classic, the default value is the default VPC for the specified region. If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively. If you specify a nondefault VPC ID, note the following: It must belong to a VPC in your account that is in the specified region. You must specify a value for DefaultSubnetId. For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2 Classic, see Supported Platforms. + /// The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All instances are launched into this VPC, and you cannot change the ID later. If your account supports EC2 Classic, the default value is no VPC. If your account does not support EC2 Classic, the default value is the default VPC for the specified region. If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively. If you specify a nondefault VPC ID, note the following: It must belong to a VPC in your account that is in the specified region. You must specify a value for DefaultSubnetId. For more information about how to use OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2 Classic, see Supported Platforms. public let vpcId: String? public init(agentVersion: String? = nil, attributes: [StackAttributesKeys: String]? = nil, chefConfiguration: ChefConfiguration? = nil, cloneAppIds: [String]? = nil, clonePermissions: Bool? = nil, configurationManager: StackConfigurationManager? = nil, customCookbooksSource: Source? = nil, customJson: String? = nil, defaultAvailabilityZone: String? = nil, defaultInstanceProfileArn: String? = nil, defaultOs: String? = nil, defaultRootDeviceType: RootDeviceType? = nil, defaultSshKeyName: String? = nil, defaultSubnetId: String? = nil, hostnameTheme: String? = nil, name: String? = nil, region: String? = nil, serviceRoleArn: String, sourceStackId: String, useCustomCookbooks: Bool? = nil, useOpsworksSecurityGroups: Bool? = nil, vpcId: String? = nil) { @@ -634,9 +634,9 @@ extension OpsWorks { public let datetimeFormat: String? /// Specifies the encoding of the log file so that the file can be read correctly. The default is utf_8. Encodings supported by Python codecs.decode() can be used here. public let encoding: CloudWatchLogsEncoding? - /// Specifies log files that you want to push to CloudWatch Logs. File can point to a specific file or multiple files (by using wild card characters such as /var/log/system.log*). Only the latest file is pushed to CloudWatch Logs, based on file modification time. We recommend that you use wild card characters to specify a series of files of the same type, such as access_log.2014-06-01-01, access_log.2014-06-01-02, and so on by using a pattern like access_log.*. Don't use a wildcard to match multiple file types, such as access_log_80 and access_log_443. To specify multiple, different file types, add another log stream entry to the configuration file, so that each log file type is stored in a different log group. Zipped files are not supported. + /// Specifies log files that you want to push to CloudWatch Logs. File can point to a specific file or multiple files (by using wild card characters such as /var/log/system.log*). Only the latest file is pushed to CloudWatch Logs, based on file modification time. We recommend that you use wild card characters to specify a series of files of the same type, such as access_log.2014-06-01-01, access_log.2014-06-01-02, and so on by using a pattern like access_log.*. Don't use a wildcard to match multiple file types, such as access_log_80 and access_log_443. To specify multiple, different file types, add another log stream entry to the configuration file, so that each log file type is stored in a different log group. Zipped files are not supported. public let file: String? - /// Specifies the range of lines for identifying a file. The valid values are one number, or two dash-delimited numbers, such as '1', '2-5'. The default value is '1', meaning the first line is used to calculate the fingerprint. Fingerprint lines are not sent to CloudWatch Logs unless all specified lines are available. + /// Specifies the range of lines for identifying a file. The valid values are one number, or two dash-delimited numbers, such as '1', '2-5'. The default value is '1', meaning the first line is used to calculate the fingerprint. Fingerprint lines are not sent to CloudWatch Logs unless all specified lines are available. public let fileFingerprintLines: String? /// Specifies where to start to read data (start_of_file or end_of_file). The default is start_of_file. This setting is only used if there is no state persisted for that log stream. public let initialPosition: CloudWatchLogsInitialPosition? @@ -748,7 +748,7 @@ extension OpsWorks { public let sslConfiguration: SslConfiguration? /// The stack ID. public let stackId: String - /// The app type. Each supported type is associated with a particular layer. For example, PHP applications are associated with a PHP layer. AWS OpsWorks Stacks deploys an application to those instances that are members of the corresponding layer. If your app isn't one of the standard types, or you prefer to implement your own Deploy recipes, specify other. + /// The app type. Each supported type is associated with a particular layer. For example, PHP applications are associated with a PHP layer. OpsWorks Stacks deploys an application to those instances that are members of the corresponding layer. If your app isn't one of the standard types, or you prefer to implement your own Deploy recipes, specify other. public let type: AppType public init(appSource: Source? = nil, attributes: [AppAttributesKeys: String]? = nil, dataSources: [DataSource]? = nil, description: String? = nil, domains: [String]? = nil, enableSsl: Bool? = nil, environment: [EnvironmentVariable]? = nil, name: String, shortname: String? = nil, sslConfiguration: SslConfiguration? = nil, stackId: String, type: AppType) { @@ -802,7 +802,7 @@ extension OpsWorks { public let command: DeploymentCommand /// A user-defined comment. public let comment: String? - /// A string that contains user-defined, custom JSON. You can use this parameter to override some corresponding default stack configuration JSON values. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes and Overriding Attributes With Custom JSON. + /// A string that contains user-defined, custom JSON. You can use this parameter to override some corresponding default stack configuration JSON values. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes and Overriding Attributes With Custom JSON. public let customJson: String? /// The instance IDs for the deployment targets. public let instanceIds: [String]? @@ -846,7 +846,7 @@ extension OpsWorks { } public struct CreateInstanceRequest: AWSEncodableShape { - /// The default AWS OpsWorks Stacks agent version. You have the following options: INHERIT - Use the stack's default agent version setting. version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, edit the instance configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the instance. The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. + /// The default OpsWorks Stacks agent version. You have the following options: INHERIT - Use the stack's default agent version setting. version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, edit the instance configuration and specify a new version. OpsWorks Stacks installs that version on the instance. The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. public let agentVersion: String? /// A custom AMI ID to be used to create the instance. The AMI should be based on one of the supported operating systems. For more information, see Using Custom AMIs. If you specify a custom AMI, you must set Os to Custom. public let amiId: String? @@ -860,16 +860,16 @@ extension OpsWorks { public let blockDeviceMappings: [BlockDeviceMapping]? /// Whether to create an Amazon EBS-optimized instance. public let ebsOptimized: Bool? - /// The instance host name. + /// The instance host name. The following are character limits for instance host names. Linux-based instances: 63 characters Windows-based instances: 15 characters public let hostname: String? /// Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. We strongly recommend using the default value of true to ensure that your instances have the latest security updates. public let installUpdatesOnBoot: Bool? - /// The instance type, such as t2.micro. For a list of supported instance types, open the stack in the console, choose Instances, and choose + Instance. The Size list contains the currently supported types. For more information, see Instance Families and Types. The parameter values that you use to specify the various types are in the API Name column of the Available Instance Types table. + /// The instance type, such as t2.micro. For a list of supported instance types, open the stack in the console, choose Instances, and choose + Instance. The Size list contains the currently supported types. For more information, see Instance Families and Types. The parameter values that you use to specify the various types are in the API Name column of the Available Instance Types table. public let instanceType: String /// An array that contains the instance's layer IDs. public let layerIds: [String] - /// The instance's operating system, which must be set to one of the following. A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, - /// Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. For more information about the supported operating systems, see AWS OpsWorks Stacks Operating Systems. The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information about supported operating systems, see Operating SystemsFor more information about how to use custom AMIs with AWS OpsWorks Stacks, see Using Custom AMIs. + /// The instance's operating system, which must be set to one of the following. A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, + /// Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. Not all operating systems are supported with all versions of Chef. For more information about the supported operating systems, see OpsWorks Stacks Operating Systems. The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the CreateInstance action's AmiId parameter to specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information about how to use custom AMIs with OpsWorks Stacks, see Using Custom AMIs. public let os: String? /// The instance root device type. For more information, see Storage for the Root Device. public let rootDeviceType: RootDeviceType? @@ -877,9 +877,9 @@ extension OpsWorks { public let sshKeyName: String? /// The stack ID. public let stackId: String - /// The ID of the instance's subnet. If the stack is running in a VPC, you can use this parameter to override the stack's default subnet ID value and direct AWS OpsWorks Stacks to launch the instance in a different subnet. + /// The ID of the instance's subnet. If the stack is running in a VPC, you can use this parameter to override the stack's default subnet ID value and direct OpsWorks Stacks to launch the instance in a different subnet. public let subnetId: String? - /// The instance's tenancy option. The default option is no tenancy, or if the instance is running in a VPC, inherit tenancy settings from the VPC. The following are valid values for this parameter: dedicated, default, or host. Because there are costs associated with changes in tenancy options, we recommend that you research tenancy options before choosing them for your instances. For more information about dedicated hosts, see Dedicated Hosts Overview and Amazon EC2 Dedicated Hosts. For more information about dedicated instances, see Dedicated Instances and Amazon EC2 Dedicated Instances. + /// The instance's tenancy option. The default option is no tenancy, or if the instance is running in a VPC, inherit tenancy settings from the VPC. The following are valid values for this parameter: dedicated, default, or host. Because there are costs associated with changes in tenancy options, we recommend that you research tenancy options before choosing them for your instances. For more information about dedicated hosts, see Dedicated Hosts Overview and Amazon EC2 Dedicated Hosts. For more information about dedicated instances, see Dedicated Instances and Amazon EC2 Dedicated Instances. public let tenancy: String? /// The instance's virtualization type, paravirtual or hvm. public let virtualizationType: String? @@ -947,11 +947,11 @@ extension OpsWorks { public let autoAssignElasticIps: Bool? /// For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer. public let autoAssignPublicIps: Bool? - /// Specifies CloudWatch Logs configuration options for the layer. For more information, see CloudWatchLogsLogStream. + /// Specifies CloudWatch Logs configuration options for the layer. For more information, see CloudWatchLogsLogStream. public let cloudWatchLogsConfiguration: CloudWatchLogsConfiguration? /// The ARN of an IAM profile to be used for the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers. public let customInstanceProfileArn: String? - /// A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON. This feature is supported as of version 1.7.42 of the AWS CLI. + /// A JSON-formatted string containing custom stack configuration and deployment attributes to be installed on the layer's instances. For more information, see Using Custom JSON. This feature is supported as of version 1.7.42 of the CLI. public let customJson: String? /// A LayerCustomRecipes object that specifies the layer custom recipes. public let customRecipes: Recipes? @@ -963,15 +963,15 @@ extension OpsWorks { public let installUpdatesOnBoot: Bool? /// A LifeCycleEventConfiguration object that you can use to configure the Shutdown event to specify an execution timeout and enable or disable Elastic Load Balancer connection draining. public let lifecycleEventConfiguration: LifecycleEventConfiguration? - /// The layer name, which is used by the console. + /// The layer name, which is used by the console. Layer names can be a maximum of 32 characters. public let name: String /// An array of Package objects that describes the layer packages. public let packages: [String]? - /// For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef recipes. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters, which are limited to the alphanumeric characters, '-', '_', and '.'. The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference. + /// For custom layers only, use this parameter to specify the layer's short name, which is used internally by OpsWorks Stacks and by Chef recipes. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 32 characters, which are limited to the alphanumeric characters, '-', '_', and '.'. Built-in layer short names are defined by OpsWorks Stacks. For more information, see the Layer Reference. public let shortname: String /// The layer stack ID. public let stackId: String - /// The layer type. A stack cannot have more than one built-in layer of the same type. It can have any number of custom layers. Built-in layers are not available in Chef 12 stacks. + /// The layer type. A stack cannot have more than one built-in layer of the same type. It can have any number of custom layers. Built-in layers are not available in Chef 12 stacks. public let type: LayerType /// Whether to use Amazon EBS-optimized instances. public let useEbsOptimizedInstances: Bool? @@ -1035,43 +1035,43 @@ extension OpsWorks { } public struct CreateStackRequest: AWSEncodableShape { - /// The default AWS OpsWorks Stacks agent version. You have the following options: Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available. Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the stack's instances. The default setting is the most recent release of the agent. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. You can also specify an agent version when you create or update an instance, which overrides the stack's default setting. + /// The default OpsWorks Stacks agent version. You have the following options: Auto-update - Set this parameter to LATEST. OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available. Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. OpsWorks Stacks installs that version on the stack's instances. The default setting is the most recent release of the agent. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. You can also specify an agent version when you create or update an instance, which overrides the stack's default setting. public let agentVersion: String? /// One or more user-defined key-value pairs to be added to the stack attributes. public let attributes: [StackAttributesKeys: String]? /// A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack. public let chefConfiguration: ChefConfiguration? - /// The configuration manager. When you create a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12. + /// The configuration manager. When you create a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12. public let configurationManager: StackConfigurationManager? /// Contains the information required to retrieve an app or cookbook from a repository. For more information, see Adding Apps or Cookbooks and Recipes. public let customCookbooksSource: Source? - /// A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes. + /// A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes. public let customJson: String? /// The stack's default Availability Zone, which must be in the specified region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see the VpcId parameter description. public let defaultAvailabilityZone: String? /// The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers. public let defaultInstanceProfileArn: String - /// The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following. A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs. The default option is the current Amazon Linux version. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems. + /// The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following. A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information, see Using Custom AMIs. The default option is the current Amazon Linux version. Not all operating systems are supported with all versions of Chef. For more information about supported operating systems, see OpsWorks Stacks Operating Systems. public let defaultOs: String? /// The default root device type. This value is the default for all instances in the stack, but you can override it when you create an instance. The default option is instance-store. For more information, see Storage for the Root Device. public let defaultRootDeviceType: RootDeviceType? - /// A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance. + /// A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, OpsWorks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance. public let defaultSshKeyName: String? /// The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone, the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description. public let defaultSubnetId: String? /// The stack's host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are: Baked_Goods Clouds Europe_Cities Fruits Greek_Deities_and_Titans Legendary_creatures_from_Japan Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme. public let hostnameTheme: String? - /// The stack name. + /// The stack name. Stack names can be a maximum of 64 characters. public let name: String - /// The stack's AWS region, such as ap-south-1. For more information about Amazon regions, see Regions and Endpoints. In the AWS CLI, this API maps to the --stack-region parameter. If the --stack-region parameter and the AWS CLI common parameter --region are set to the same value, the stack uses a regional endpoint. If the --stack-region parameter is not set, but the AWS CLI --region parameter is, this also results in a stack with a regional endpoint. However, if the --region parameter is set to us-east-1, and the --stack-region parameter is set to one of the following, then the stack uses a legacy or classic region: us-west-1, us-west-2, sa-east-1, eu-central-1, eu-west-1, ap-northeast-1, ap-southeast-1, ap-southeast-2. In this case, the actual API endpoint of the stack is in us-east-1. Only the preceding regions are supported as classic regions in the us-east-1 API endpoint. Because it is a best practice to choose the regional endpoint that is closest to where you manage AWS, we recommend that you use regional endpoints for new stacks. The AWS CLI common --region parameter always specifies a regional API endpoint; it cannot be used to specify a classic AWS OpsWorks Stacks region. + /// The stack's Amazon Web Services Region, such as ap-south-1. For more information about Amazon Web Services Regions, see Regions and Endpoints. In the CLI, this API maps to the --stack-region parameter. If the --stack-region parameter and the CLI common parameter --region are set to the same value, the stack uses a regional endpoint. If the --stack-region parameter is not set, but the CLI --region parameter is, this also results in a stack with a regional endpoint. However, if the --region parameter is set to us-east-1, and the --stack-region parameter is set to one of the following, then the stack uses a legacy or classic region: us-west-1, us-west-2, sa-east-1, eu-central-1, eu-west-1, ap-northeast-1, ap-southeast-1, ap-southeast-2. In this case, the actual API endpoint of the stack is in us-east-1. Only the preceding regions are supported as classic regions in the us-east-1 API endpoint. Because it is a best practice to choose the regional endpoint that is closest to where you manage Amazon Web Services, we recommend that you use regional endpoints for new stacks. The CLI common --region parameter always specifies a regional API endpoint; it cannot be used to specify a classic OpsWorks Stacks region. public let region: String - /// The stack's AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. For more information about IAM ARNs, see Using Identifiers. + /// The stack's IAM role, which allows OpsWorks Stacks to work with Amazon Web Services resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an existing IAM role. For more information about IAM ARNs, see Using Identifiers. public let serviceRoleArn: String /// Whether the stack uses custom cookbooks. public let useCustomCookbooks: Bool? - /// Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers. AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings: True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group. False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings. For more information, see Create a New Stack. + /// Whether to associate the OpsWorks Stacks built-in security groups with the stack's layers. OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. With UseOpsworksSecurityGroups you can instead provide your own custom security groups. UseOpsworksSecurityGroups has the following settings: True - OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group. False - OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings. For more information, see Create a New Stack. public let useOpsworksSecurityGroups: Bool? - /// The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later. If your account supports EC2-Classic, the default value is no VPC. If your account does not support EC2-Classic, the default value is the default VPC for the specified region. If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively. If you specify a nondefault VPC ID, note the following: It must belong to a VPC in your account that is in the specified region. You must specify a value for DefaultSubnetId. For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2-Classic, see Supported Platforms. + /// The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later. If your account supports EC2-Classic, the default value is no VPC. If your account does not support EC2-Classic, the default value is the default VPC for the specified region. If the VPC ID corresponds to a default VPC and you have specified either the DefaultAvailabilityZone or the DefaultSubnetId parameter only, OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, OpsWorks Stacks sets these parameters to the first valid Availability Zone for the specified region and the corresponding default VPC subnet ID, respectively. If you specify a nondefault VPC ID, note the following: It must belong to a VPC in your account that is in the specified region. You must specify a value for DefaultSubnetId. For more information about how to use OpsWorks Stacks with a VPC, see Running a Stack in a VPC. For more information about default VPC and EC2-Classic, see Supported Platforms. public let vpcId: String? public init(agentVersion: String? = nil, attributes: [StackAttributesKeys: String]? = nil, chefConfiguration: ChefConfiguration? = nil, configurationManager: StackConfigurationManager? = nil, customCookbooksSource: Source? = nil, customJson: String? = nil, defaultAvailabilityZone: String? = nil, defaultInstanceProfileArn: String, defaultOs: String? = nil, defaultRootDeviceType: RootDeviceType? = nil, defaultSshKeyName: String? = nil, defaultSubnetId: String? = nil, hostnameTheme: String? = nil, name: String, region: String, serviceRoleArn: String, useCustomCookbooks: Bool? = nil, useOpsworksSecurityGroups: Bool? = nil, vpcId: String? = nil) { @@ -1139,7 +1139,7 @@ extension OpsWorks { public let iamUserArn: String /// The user's public SSH key. public let sshPublicKey: String? - /// The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, AWS OpsWorks Stacks removes them. For example, my.name will be changed to myname. If you do not specify an SSH user name, AWS OpsWorks Stacks generates one from the IAM user name. + /// The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, OpsWorks Stacks removes them. For example, my.name is changed to myname. If you do not specify an SSH user name, OpsWorks Stacks generates one from the IAM user name. public let sshUsername: String? public init(allowSelfManagement: Bool? = nil, iamUserArn: String, sshPublicKey: String? = nil, sshUsername: String? = nil) { @@ -1275,7 +1275,7 @@ extension OpsWorks { public let completedAt: String? /// Date when the deployment was created. public let createdAt: String? - /// A string that contains user-defined custom JSON. It can be used to override the corresponding default stack configuration attribute values for stack or to pass data to recipes. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes. + /// A string that contains user-defined custom JSON. It can be used to override the corresponding default stack configuration attribute values for stack or to pass data to recipes. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes. public let customJson: String? /// The deployment ID. public let deploymentId: String? @@ -1322,9 +1322,9 @@ extension OpsWorks { } public struct DeploymentCommand: AWSEncodableShape & AWSDecodableShape { - /// The arguments of those commands that take arguments. It should be set to a JSON object with the following format: {"arg_name1" : ["value1", "value2", ...], "arg_name2" : ["value1", "value2", ...], ...} The update_dependencies command takes two arguments: upgrade_os_to - Specifies the desired Amazon Linux version for instances whose OS you want to upgrade, such as Amazon Linux 2016.09. You must also set the allow_reboot argument to true. allow_reboot - Specifies whether to allow AWS OpsWorks Stacks to reboot the instances if necessary, after installing the updates. This argument can be set to either true or false. The default value is false. For example, to upgrade an instance to Amazon Linux 2016.09, set Args to the following. { "upgrade_os_to":["Amazon Linux 2016.09"], "allow_reboot":["true"] } + /// The arguments of those commands that take arguments. It should be set to a JSON object with the following format: {"arg_name1" : ["value1", "value2", ...], "arg_name2" : ["value1", "value2", ...], ...} The update_dependencies command takes two arguments: upgrade_os_to - Specifies the Amazon Linux version that you want instances to run, such as Amazon Linux 2. You must also set the allow_reboot argument to true. allow_reboot - Specifies whether to allow OpsWorks Stacks to reboot the instances if necessary, after installing the updates. This argument can be set to either true or false. The default value is false. For example, to upgrade an instance to Amazon Linux 2018.03, set Args to the following. { "upgrade_os_to":["Amazon Linux 2018.03"], "allow_reboot":["true"] } public let args: [String: [String]]? - /// Specifies the operation. You can specify only one command. For stacks, the following commands are available: execute_recipes: Execute one or more recipes. To specify the recipes, set an Args parameter named recipes to the list of recipes to be executed. For example, to execute phpapp::appsetup, set Args to {"recipes":["phpapp::appsetup"]}. install_dependencies: Install the stack's dependencies. update_custom_cookbooks: Update the stack's custom cookbooks. update_dependencies: Update the stack's dependencies. The update_dependencies and install_dependencies commands are supported only for Linux instances. You can run the commands successfully on Windows instances, but they do nothing. For apps, the following commands are available: deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter named migrate. Set Args to {"migrate":["true"]} to migrate the database. The default setting is {"migrate":["false"]}. rollback Roll the app back to the previous version. When you update an app, AWS OpsWorks Stacks stores the previous version, up to a maximum of five versions. You can use this command to roll an app back as many as four versions. start: Start the app's web or application server. stop: Stop the app's web or application server. restart: Restart the app's web or application server. undeploy: Undeploy the app. + /// Specifies the operation. You can specify only one command. For stacks, the following commands are available: execute_recipes: Execute one or more recipes. To specify the recipes, set an Args parameter named recipes to the list of recipes to be executed. For example, to execute phpapp::appsetup, set Args to {"recipes":["phpapp::appsetup"]}. install_dependencies: Install the stack's dependencies. update_custom_cookbooks: Update the stack's custom cookbooks. update_dependencies: Update the stack's dependencies. The update_dependencies and install_dependencies commands are supported only for Linux instances. You can run the commands successfully on Windows instances, but they do nothing. For apps, the following commands are available: deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter named migrate. Set Args to {"migrate":["true"]} to migrate the database. The default setting is {"migrate":["false"]}. rollback Roll the app back to the previous version. When you update an app, OpsWorks Stacks stores the previous version, up to a maximum of five versions. You can use this command to roll an app back as many as four versions. start: Start the app's web or application server. stop: Stop the app's web or application server. restart: Restart the app's web or application server. undeploy: Undeploy the app. public let name: DeploymentCommandName public init(args: [String: [String]]? = nil, name: DeploymentCommandName) { @@ -1391,7 +1391,7 @@ extension OpsWorks { } public struct DeregisterVolumeRequest: AWSEncodableShape { - /// The AWS OpsWorks Stacks volume ID, which is the GUID that AWS OpsWorks Stacks assigned to the instance when you registered the volume with the stack, not the Amazon EC2 volume ID. + /// The OpsWorks Stacks volume ID, which is the GUID that OpsWorks Stacks assigned to the instance when you registered the volume with the stack, not the Amazon EC2 volume ID. public let volumeId: String public init(volumeId: String) { @@ -1421,7 +1421,7 @@ extension OpsWorks { } public struct DescribeAgentVersionsResult: AWSDecodableShape { - /// The agent versions for the specified stack or configuration manager. Note that this value is the complete version number, not the abbreviated number used by the console. + /// The agent versions for the specified stack or configuration manager. Note that this value is the complete version number, not the abbreviated number used by the console. public let agentVersions: [AgentVersion]? public init(agentVersions: [AgentVersion]? = nil) { @@ -1754,7 +1754,7 @@ extension OpsWorks { } public struct DescribePermissionsRequest: AWSEncodableShape { - /// The user's IAM ARN. This can also be a federated user's ARN. For more information about IAM ARNs, see Using Identifiers. + /// The user's IAM ARN. This can also be a federated user's ARN. For more information about IAM ARNs, see Using Identifiers. public let iamUserArn: String? /// The stack ID. public let stackId: String? @@ -1820,7 +1820,7 @@ extension OpsWorks { public struct DescribeRdsDbInstancesRequest: AWSEncodableShape { /// An array containing the ARNs of the instances to be described. public let rdsDbInstanceArns: [String]? - /// The ID of the stack with which the instances are registered. The operation returns descriptions of all registered Amazon RDS instances. + /// The ID of the stack with which the instances are registered. The operation returns descriptions of all registered Amazon RDS instances. public let stackId: String public init(rdsDbInstanceArns: [String]? = nil, stackId: String) { @@ -1895,7 +1895,7 @@ extension OpsWorks { } public struct DescribeStackProvisioningParametersResult: AWSDecodableShape { - /// The AWS OpsWorks Stacks agent installer's URL. + /// The OpsWorks Stacks agent installer's URL. public let agentInstallerUrl: String? /// An embedded object that contains the provisioning parameters. public let parameters: [String: String]? @@ -1938,7 +1938,7 @@ extension OpsWorks { } public struct DescribeStacksRequest: AWSEncodableShape { - /// An array of stack IDs that specify the stacks to be described. If you omit this parameter, DescribeStacks returns a description of every stack. + /// An array of stack IDs that specify the stacks to be described. If you omit this parameter, and have permissions to get information about all stacks, DescribeStacks returns a description of every stack. If the IAM policy that is attached to an IAM user limits the DescribeStacks action to specific stack ARNs, this parameter is required, and the user must specify a stack ARN that is allowed by the policy. Otherwise, DescribeStacks returns an AccessDenied error. public let stackIds: [String]? public init(stackIds: [String]? = nil) { @@ -2092,7 +2092,7 @@ extension OpsWorks { public let snapshotId: String? /// The volume size, in GiB. For more information, see EbsBlockDevice. public let volumeSize: Int? - /// The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for Throughput Optimized hard disk drives (HDD), sc1 for Cold HDD,and standard for Magnetic volumes. If you specify the io1 volume type, you must also specify a value for the Iops attribute. The maximum ratio of provisioned IOPS to requested volume size (in GiB) is 50:1. AWS uses the default volume size (in GiB) specified in the AMI attributes to set IOPS to 50 x (volume size). + /// The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned IOPS (SSD) volumes, st1 for Throughput Optimized hard disk drives (HDD), sc1 for Cold HDD,and standard for Magnetic volumes. If you specify the io1 volume type, you must also specify a value for the Iops attribute. The maximum ratio of provisioned IOPS to requested volume size (in GiB) is 50:1. Amazon Web Services uses the default volume size (in GiB) specified in the AMI attributes to set IOPS to 50 x (volume size). public let volumeType: VolumeType? public init(deleteOnTermination: Bool? = nil, iops: Int? = nil, snapshotId: String? = nil, volumeSize: Int? = nil, volumeType: VolumeType? = nil) { @@ -2144,9 +2144,9 @@ extension OpsWorks { public let instanceId: String? /// The IP address. public let ip: String? - /// The name. + /// The name, which can be a maximum of 32 characters. public let name: String? - /// The AWS region. For more information, see Regions and Endpoints. + /// The Amazon Web Services Region. For more information, see Regions and Endpoints. public let region: String? public init(domain: String? = nil, instanceId: String? = nil, ip: String? = nil, name: String? = nil, region: String? = nil) { @@ -2171,15 +2171,15 @@ extension OpsWorks { public let availabilityZones: [String]? /// The instance's public DNS name. public let dnsName: String? - /// A list of the EC2 instances that the Elastic Load Balancing instance is managing traffic for. + /// A list of the EC2 instances for which the Elastic Load Balancing instance is managing traffic. public let ec2InstanceIds: [String]? - /// The Elastic Load Balancing instance's name. + /// The Elastic Load Balancing instance name. public let elasticLoadBalancerName: String? - /// The ID of the layer that the instance is attached to. + /// The ID of the layer to which the instance is attached. public let layerId: String? - /// The instance's AWS region. + /// The instance's Amazon Web Services Region. public let region: String? - /// The ID of the stack that the instance is associated with. + /// The ID of the stack with which the instance is associated. public let stackId: String? /// A list of subnet IDs, if the stack is running in a VPC. public let subnetIds: [String]? @@ -2212,11 +2212,11 @@ extension OpsWorks { } public struct EnvironmentVariable: AWSEncodableShape & AWSDecodableShape { - /// (Required) The environment variable's name, which can consist of up to 64 characters and must be specified. The name can contain upper- and lowercase letters, numbers, and underscores (_), but it must start with a letter or underscore. + /// (Required) The environment variable's name, which can consist of up to 64 characters and must be specified. The name can contain upper- and lowercase letters, numbers, and underscores (_), but it must start with a letter or underscore. public let key: String - /// (Optional) Whether the variable's value will be returned by the DescribeApps action. To conceal an environment variable's value, set Secure to true. DescribeApps then returns *****FILTERED***** instead of the actual value. The default value for Secure is false. + /// (Optional) Whether the variable's value is returned by the DescribeApps action. To hide an environment variable's value, set Secure to true. DescribeApps returns *****FILTERED***** instead of the actual value. The default value for Secure is false. public let secure: Bool? - /// (Optional) The environment variable's value, which can be left empty. If you specify a value, it can contain up to 256 characters, which must all be printable. + /// (Optional) The environment variable's value, which can be left empty. If you specify a value, it can contain up to 256 characters, which must all be printable. public let value: String public init(key: String, secure: Bool? = nil, value: String) { @@ -2263,9 +2263,9 @@ extension OpsWorks { } public struct GrantAccessRequest: AWSEncodableShape { - /// The instance's AWS OpsWorks Stacks ID. + /// The instance's OpsWorks Stacks ID. public let instanceId: String - /// The length of time (in minutes) that the grant is valid. When the grant expires at the end of this period, the user will no longer be able to use the credentials to log in. If the user is logged in at the time, he or she automatically will be logged out. + /// The length of time (in minutes) that the grant is valid. When the grant expires at the end of this period, the user will no longer be able to use the credentials to log in. If the user is logged in at the time, they are logged out. public let validForInMinutes: Int? public init(instanceId: String, validForInMinutes: Int? = nil) { @@ -2322,17 +2322,17 @@ extension OpsWorks { public let ecsClusterArn: String? /// For container instances, the instance's ARN. public let ecsContainerInstanceArn: String? - /// The instance Elastic IP address . + /// The instance Elastic IP address. public let elasticIp: String? - /// The instance host name. + /// The instance host name. The following are character limits for instance host names. Linux-based instances: 63 characters Windows-based instances: 15 characters public let hostname: String? /// For registered instances, the infrastructure class: ec2 or on-premises. public let infrastructureClass: String? - /// Whether to install operating system and package updates when the instance boots. The default value is true. If this value is set to false, you must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. We strongly recommend using the default value of true, to ensure that your instances have the latest security updates. + /// Whether to install operating system and package updates when the instance boots. The default value is true. If this value is set to false, you must update instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. We strongly recommend using the default value of true to ensure that your instances have the latest security updates. public let installUpdatesOnBoot: Bool? /// The instance ID. public let instanceId: String? - /// The ARN of the instance's IAM profile. For more information about IAM ARNs, see Using Identifiers. + /// The ARN of the instance's IAM profile. For more information about IAM ARNs, see Using Identifiers. public let instanceProfileArn: String? /// The instance type, such as t2.micro. public let instanceType: String? @@ -2354,7 +2354,7 @@ extension OpsWorks { public let publicIp: String? /// For registered instances, who performed the registration. public let registeredBy: String? - /// The instance's reported AWS OpsWorks Stacks agent version. + /// The instance's reported OpsWorks Stacks agent version. public let reportedAgentVersion: String? /// For registered instances, the reported operating system. public let reportedOs: ReportedOs? @@ -2579,7 +2579,7 @@ extension OpsWorks { public struct Layer: AWSDecodableShape { /// The Amazon Resource Number (ARN) of a layer. public let arn: String? - /// The layer attributes. For the HaproxyStatsPassword, MysqlRootPassword, and GangliaPassword attributes, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value For an ECS Cluster layer, AWS OpsWorks Stacks the EcsClusterArn attribute is set to the cluster's ARN. + /// The layer attributes. For the HaproxyStatsPassword, MysqlRootPassword, and GangliaPassword attributes, OpsWorks Stacks returns *****FILTERED***** instead of the actual value For an ECS Cluster layer, OpsWorks Stacks the EcsClusterArn attribute is set to the cluster's ARN. public let attributes: [LayerAttributesKeys: String]? /// Whether to automatically assign an Elastic IP address to the layer's instances. For more information, see How to Edit a Layer. public let autoAssignElasticIps: Bool? @@ -2597,7 +2597,7 @@ extension OpsWorks { public let customRecipes: Recipes? /// An array containing the layer's custom security group IDs. public let customSecurityGroupIds: [String]? - /// AWS OpsWorks Stacks supports five lifecycle events: setup, configuration, deploy, undeploy, and shutdown. For each layer, AWS OpsWorks Stacks runs a set of standard recipes for each event. You can also provide custom recipes for any or all layers and events. AWS OpsWorks Stacks runs custom event recipes after the standard recipes. LayerCustomRecipes specifies the custom recipes for a particular layer to be run in response to each of the five events. To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb recipe in the repository's phpapp2 folder. + /// OpsWorks Stacks supports five lifecycle events: setup, configuration, deploy, undeploy, and shutdown. For each layer, OpsWorks Stacks runs a set of standard recipes for each event. You can also provide custom recipes for any or all layers and events. OpsWorks Stacks runs custom event recipes after the standard recipes. LayerCustomRecipes specifies the custom recipes for a particular layer to be run in response to each of the five events. To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb recipe in the repository's phpapp2 folder. public let defaultRecipes: Recipes? /// An array containing the layer's security group names. public let defaultSecurityGroupNames: [String]? @@ -2609,7 +2609,7 @@ extension OpsWorks { public let layerId: String? /// A LifeCycleEventConfiguration object that specifies the Shutdown event configuration. public let lifecycleEventConfiguration: LifecycleEventConfiguration? - /// The layer name. + /// The layer name. Layer names can be a maximum of 32 characters. public let name: String? /// An array of Package objects that describe the layer's packages. public let packages: [String]? @@ -2691,9 +2691,9 @@ extension OpsWorks { } public struct ListTagsRequest: AWSEncodableShape { - /// Do not use. A validation exception occurs if you add a MaxResults parameter to a ListTagsRequest call. + /// Do not use. A validation exception occurs if you add a MaxResults parameter to a ListTagsRequest call. public let maxResults: Int? - /// Do not use. A validation exception occurs if you add a NextToken parameter to a ListTagsRequest call. + /// Do not use. A validation exception occurs if you add a NextToken parameter to a ListTagsRequest call. public let nextToken: String? /// The stack or layer's Amazon Resource Number (ARN). public let resourceArn: String @@ -2729,13 +2729,13 @@ extension OpsWorks { } public struct LoadBasedAutoScalingConfiguration: AWSDecodableShape { - /// An AutoScalingThresholds object that describes the downscaling configuration, which defines how and when AWS OpsWorks Stacks reduces the number of instances. + /// An AutoScalingThresholds object that describes the downscaling configuration, which defines how and when OpsWorks Stacks reduces the number of instances. public let downScaling: AutoScalingThresholds? /// Whether load-based auto scaling is enabled for the layer. public let enable: Bool? /// The layer ID. public let layerId: String? - /// An AutoScalingThresholds object that describes the upscaling configuration, which defines how and when AWS OpsWorks Stacks increases the number of instances. + /// An AutoScalingThresholds object that describes the upscaling configuration, which defines how and when OpsWorks Stacks increases the number of instances. public let upScaling: AutoScalingThresholds? public init(downScaling: AutoScalingThresholds? = nil, enable: Bool? = nil, layerId: String? = nil, upScaling: AutoScalingThresholds? = nil) { @@ -2754,11 +2754,11 @@ extension OpsWorks { } public struct OperatingSystem: AWSDecodableShape { - /// Supported configuration manager name and versions for an AWS OpsWorks Stacks operating system. + /// Supported configuration manager name and versions for an OpsWorks Stacks operating system. public let configurationManagers: [OperatingSystemConfigurationManager]? - /// The ID of a supported operating system, such as Amazon Linux 2018.03. + /// The ID of a supported operating system, such as Amazon Linux 2. public let id: String? - /// The name of the operating system, such as Amazon Linux 2018.03. + /// The name of the operating system, such as Amazon Linux 2. public let name: String? /// A short name for the operating system manufacturer. public let reportedName: String? @@ -2812,7 +2812,7 @@ extension OpsWorks { public let allowSsh: Bool? /// Whether the user can use sudo. public let allowSudo: Bool? - /// The Amazon Resource Name (ARN) for an AWS Identity and Access Management (IAM) role. For more information about IAM ARNs, see Using Identifiers. + /// The Amazon Resource Name (ARN) for an Identity and Access Management (IAM) role. For more information about IAM ARNs, see Using Identifiers. public let iamUserArn: String? /// The user's permission level, which must be the following: deny show deploy manage iam_only For more information on the permissions associated with these levels, see Managing User Permissions public let level: String? @@ -2900,19 +2900,19 @@ extension OpsWorks { public struct RdsDbInstance: AWSDecodableShape { /// The instance's address. public let address: String? - /// The DB instance identifier. + /// The database instance identifier. public let dbInstanceIdentifier: String? - /// AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value. + /// OpsWorks Stacks returns *****FILTERED***** instead of the actual value. public let dbPassword: String? /// The master user name. public let dbUser: String? /// The instance's database engine. public let engine: String? - /// Set to true if AWS OpsWorks Stacks is unable to discover the Amazon RDS instance. AWS OpsWorks Stacks attempts to discover the instance only once. If this value is set to true, you must deregister the instance, and then register it again. + /// Set to true if OpsWorks Stacks is unable to discover the Amazon RDS instance. OpsWorks Stacks attempts to discover the instance only once. If this value is set to true, you must deregister the instance, and then register it again. public let missingOnRds: Bool? /// The instance's ARN. public let rdsDbInstanceArn: String? - /// The instance's AWS region. + /// The instance's Amazon Web Services Region. public let region: String? /// The ID of the stack with which the instance is registered. public let stackId: String? @@ -3045,7 +3045,7 @@ extension OpsWorks { } public struct RegisterInstanceRequest: AWSEncodableShape { - /// The instance's hostname. + /// The instance's host name. The following are character limits for instance host names. Linux-based instances: 63 characters Windows-based instances: 15 characters public let hostname: String? /// An InstanceIdentity object that contains the instance's identity. public let instanceIdentity: InstanceIdentity? @@ -3082,7 +3082,7 @@ extension OpsWorks { } public struct RegisterInstanceResult: AWSDecodableShape { - /// The registered instance's AWS OpsWorks Stacks ID. + /// The registered instance's OpsWorks Stacks ID. public let instanceId: String? public init(instanceId: String? = nil) { @@ -3229,13 +3229,13 @@ extension OpsWorks { } public struct SetLoadBasedAutoScalingRequest: AWSEncodableShape { - /// An AutoScalingThresholds object with the downscaling threshold configuration. If the load falls below these thresholds for a specified amount of time, AWS OpsWorks Stacks stops a specified number of instances. + /// An AutoScalingThresholds object with the downscaling threshold configuration. If the load falls below these thresholds for a specified amount of time, OpsWorks Stacks stops a specified number of instances. public let downScaling: AutoScalingThresholds? /// Enables load-based auto scaling for the layer. public let enable: Bool? /// The layer ID. public let layerId: String - /// An AutoScalingThresholds object with the upscaling threshold configuration. If the load exceeds these thresholds for a specified amount of time, AWS OpsWorks Stacks starts a specified number of instances. + /// An AutoScalingThresholds object with the upscaling threshold configuration. If the load exceeds these thresholds for a specified amount of time, OpsWorks Stacks starts a specified number of instances. public let upScaling: AutoScalingThresholds? public init(downScaling: AutoScalingThresholds? = nil, enable: Bool? = nil, layerId: String, upScaling: AutoScalingThresholds? = nil) { @@ -3307,7 +3307,7 @@ extension OpsWorks { public struct ShutdownEventConfiguration: AWSEncodableShape & AWSDecodableShape { /// Whether to enable Elastic Load Balancing connection draining. For more information, see Connection Draining public let delayUntilElbConnectionsDrained: Bool? - /// The time, in seconds, that AWS OpsWorks Stacks will wait after triggering a Shutdown event before shutting down an instance. + /// The time, in seconds, that OpsWorks Stacks waits after triggering a Shutdown event before shutting down an instance. public let executionTimeout: Int? public init(delayUntilElbConnectionsDrained: Bool? = nil, executionTimeout: Int? = nil) { @@ -3322,15 +3322,15 @@ extension OpsWorks { } public struct Source: AWSEncodableShape & AWSDecodableShape { - /// When included in a request, the parameter depends on the repository type. For Amazon S3 bundles, set Password to the appropriate IAM secret access key. For HTTP bundles and Subversion repositories, set Password to the password. For more information on how to safely handle IAM credentials, see https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html. In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value. + /// When included in a request, the parameter depends on the repository type. For Amazon S3 bundles, set Password to the appropriate IAM secret access key. For HTTP bundles and Subversion repositories, set Password to the password. For more information on how to safely handle IAM credentials, see https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html. In responses, OpsWorks Stacks returns *****FILTERED***** instead of the actual value. public let password: String? - /// The application's version. AWS OpsWorks Stacks enables you to easily deploy new versions of an application. One of the simplest approaches is to have branches or revisions in your repository that represent different versions that can potentially be deployed. + /// The application's version. OpsWorks Stacks enables you to easily deploy new versions of an application. One of the simplest approaches is to have branches or revisions in your repository that represent different versions that can potentially be deployed. public let revision: String? - /// In requests, the repository's SSH key. In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value. + /// In requests, the repository's SSH key. In responses, OpsWorks Stacks returns *****FILTERED***** instead of the actual value. public let sshKey: String? /// The repository type. public let type: SourceType? - /// The source URL. The following is an example of an Amazon S3 source URL: https://s3.amazonaws.com/opsworks-demo-bucket/opsworks_cookbook_demo.tar.gz. + /// The source URL. The following is an example of an Amazon S3 source URL: https://s3.amazonaws.com/opsworks-demo-bucket/opsworks_cookbook_demo.tar.gz. public let url: String? /// This parameter depends on the repository type. For Amazon S3 bundles, set Username to the appropriate IAM access key ID. For HTTP bundles, Git repositories, and Subversion repositories, set Username to the user name. public let username: String? @@ -3390,7 +3390,7 @@ extension OpsWorks { public let createdAt: String? /// Contains the information required to retrieve an app or cookbook from a repository. For more information, see Adding Apps or Cookbooks and Recipes. public let customCookbooksSource: Source? - /// A JSON object that contains user-defined attributes to be added to the stack configuration and deployment attributes. You can use custom JSON to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes. + /// A JSON object that contains user-defined attributes to be added to the stack configuration and deployment attributes. You can use custom JSON to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information on custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes. public let customJson: String? /// The stack's default Availability Zone. For more information, see Regions and Endpoints. public let defaultAvailabilityZone: String? @@ -3400,23 +3400,23 @@ extension OpsWorks { public let defaultOs: String? /// The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device. public let defaultRootDeviceType: RootDeviceType? - /// A default Amazon EC2 key pair for the stack's instances. You can override this value when you create or update an instance. + /// A default Amazon EC2 key pair for the stack's instances. You can override this value when you create or update an instance. public let defaultSshKeyName: String? /// The default subnet ID; applicable only if the stack is running in a VPC. public let defaultSubnetId: String? /// The stack host name theme, with spaces replaced by underscores. public let hostnameTheme: String? - /// The stack name. + /// The stack name. Stack names can be a maximum of 64 characters. public let name: String? - /// The stack AWS region, such as "ap-northeast-2". For more information about AWS regions, see Regions and Endpoints. + /// The stack Amazon Web Services Region, such as ap-northeast-2. For more information about Amazon Web Services Regions, see Regions and Endpoints. public let region: String? - /// The stack AWS Identity and Access Management (IAM) role. + /// The stack Identity and Access Management (IAM) role. public let serviceRoleArn: String? /// The stack ID. public let stackId: String? /// Whether the stack uses custom cookbooks. public let useCustomCookbooks: Bool? - /// Whether the stack automatically associates the AWS OpsWorks Stacks built-in security groups with the stack's layers. + /// Whether the stack automatically associates the OpsWorks Stacks built-in security groups with the stack's layers. public let useOpsworksSecurityGroups: Bool? /// The VPC ID; applicable only if the stack is running in a VPC. public let vpcId: String? @@ -3473,9 +3473,9 @@ extension OpsWorks { } public struct StackConfigurationManager: AWSEncodableShape & AWSDecodableShape { - /// The name. This parameter must be set to "Chef". + /// The name. This parameter must be set to Chef. public let name: String? - /// The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux stacks, and to 12.2 for Windows stacks. The default value for Linux stacks is 11.4. + /// The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux stacks, and to 12.2 for Windows stacks. The default value for Linux stacks is 12. public let version: String? public init(name: String? = nil, version: String? = nil) { @@ -3549,7 +3549,7 @@ extension OpsWorks { } public struct StopInstanceRequest: AWSEncodableShape { - /// Specifies whether to force an instance to stop. If the instance's root device type is ebs, or EBS-backed, adding the Force parameter to the StopInstances API call disassociates the AWS OpsWorks Stacks instance from EC2, and forces deletion of only the OpsWorks Stacks instance. You must also delete the formerly-associated instance in EC2 after troubleshooting and replacing the AWS OpsWorks Stacks instance with a new one. + /// Specifies whether to force an instance to stop. If the instance's root device type is ebs, or EBS-backed, adding the Force parameter to the StopInstances API call disassociates the OpsWorks Stacks instance from EC2, and forces deletion of only the OpsWorks Stacks instance. You must also delete the formerly-associated instance in EC2 after troubleshooting and replacing the OpsWorks Stacks instance with a new one. public let force: Bool? /// The instance ID. public let instanceId: String @@ -3581,7 +3581,7 @@ extension OpsWorks { public struct TagResourceRequest: AWSEncodableShape { /// The stack or layer's Amazon Resource Number (ARN). public let resourceArn: String - /// A map that contains tag keys and tag values that are attached to a stack or layer. The key cannot be empty. The key can be a maximum of 127 characters, and can contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : / The value can be a maximum 255 characters, and contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : / Leading and trailing white spaces are trimmed from both the key and value. A maximum of 40 tags is allowed for any resource. + /// A map that contains tag keys and tag values that are attached to a stack or layer. The key cannot be empty. The key can be a maximum of 127 characters, and can contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : / The value can be a maximum 255 characters, and contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : / Leading and trailing white spaces are trimmed from both the key and value. A maximum of 40 tags is allowed for any resource. public let tags: [String: String] public init(resourceArn: String, tags: [String: String]) { @@ -3596,13 +3596,13 @@ extension OpsWorks { } public struct TemporaryCredential: AWSDecodableShape { - /// The instance's AWS OpsWorks Stacks ID. + /// The instance's OpsWorks Stacks ID. public let instanceId: String? /// The password. public let password: String? /// The user name. public let username: String? - /// The length of time (in minutes) that the grant is valid. When the grant expires, at the end of this period, the user will no longer be able to use the credentials to log in. If they are logged in at the time, they will be automatically logged out. + /// The length of time (in minutes) that the grant is valid. When the grant expires, at the end of this period, the user will no longer be able to use the credentials to log in. If they are logged in at the time, they are automatically logged out. public let validForInMinutes: Int? public init(instanceId: String? = nil, password: String? = nil, username: String? = nil, validForInMinutes: Int? = nil) { @@ -3695,7 +3695,7 @@ extension OpsWorks { public let domains: [String]? /// Whether SSL is enabled for the app. public let enableSsl: Bool? - /// An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances.For more information, see Environment Variables. There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 20 KB. This limit should accommodate most if not all use cases. Exceeding it will cause an exception with the message, "Environment: is too large (maximum is 20 KB)." If you have specified one or more environment variables, you cannot modify the stack's Chef version. + /// An array of EnvironmentVariable objects that specify environment variables to be associated with the app. After you deploy the app, these variables are defined on the associated app server instances.For more information, see Environment Variables. There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 20 KB. This limit should accommodate most if not all use cases. Exceeding it will cause an exception with the message, "Environment: is too large (maximum is 20 KB)." If you have specified one or more environment variables, you cannot modify the stack's Chef version. public let environment: [EnvironmentVariable]? /// The app name. public let name: String? @@ -3736,7 +3736,7 @@ extension OpsWorks { public struct UpdateElasticIpRequest: AWSEncodableShape { /// The IP address for which you want to update the name. public let elasticIp: String - /// The new name. + /// The new name, which can be a maximum of 32 characters. public let name: String? public init(elasticIp: String, name: String? = nil) { @@ -3751,9 +3751,9 @@ extension OpsWorks { } public struct UpdateInstanceRequest: AWSEncodableShape { - /// The default AWS OpsWorks Stacks agent version. You have the following options: INHERIT - Use the stack's default agent version setting. version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, you must edit the instance configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the instance. The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. + /// The default OpsWorks Stacks agent version. You have the following options: INHERIT - Use the stack's default agent version setting. version_number - Use the specified agent version. This value overrides the stack's default setting. To update the agent version, you must edit the instance configuration and specify a new version. OpsWorks Stacks installs that version on the instance. The default setting is INHERIT. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. public let agentVersion: String? - /// The ID of the AMI that was used to create the instance. The value of this parameter must be the same AMI ID that the instance is already using. You cannot apply a new AMI to an instance by running UpdateInstance. UpdateInstance does not work on instances that are using custom AMIs. + /// The ID of the AMI that was used to create the instance. The value of this parameter must be the same AMI ID that the instance is already using. You cannot apply a new AMI to an instance by running UpdateInstance. UpdateInstance does not work on instances that are using custom AMIs. public let amiId: String? /// The instance architecture. Instance types do not necessarily support both architectures. For a list of the architectures that are supported by the different instance types, see Instance Families and Types. public let architecture: Architecture? @@ -3761,7 +3761,7 @@ extension OpsWorks { public let autoScalingType: AutoScalingType? /// This property cannot be updated. public let ebsOptimized: Bool? - /// The instance host name. + /// The instance host name. The following are character limits for instance host names. Linux-based instances: 63 characters Windows-based instances: 15 characters public let hostname: String? /// Whether to install operating system and package updates when the instance boots. The default value is true. To control when updates are installed, set this value to false. You must then update your instances manually by using CreateDeployment to run the update_dependencies stack command or by manually running yum (Amazon Linux) or apt-get (Ubuntu) on the instances. We strongly recommend using the default value of true, to ensure that your instances have the latest security updates. public let installUpdatesOnBoot: Bool? @@ -3771,7 +3771,7 @@ extension OpsWorks { public let instanceType: String? /// The instance's layer IDs. public let layerIds: [String]? - /// The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI. A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems. The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information about supported operating systems, see Operating Systems. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs. You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux. + /// The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI. A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. Not all operating systems are supported with all versions of Chef. For more information about supported operating systems, see OpsWorks Stacks Operating Systems. The default option is the current Amazon Linux version. If you set this parameter to Custom, you must use the AmiId parameter to specify the custom AMI that you want to use. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs. You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux. public let os: String? /// The instance's Amazon EC2 key name. public let sshKeyName: String? @@ -3814,7 +3814,7 @@ extension OpsWorks { public let autoAssignElasticIps: Bool? /// For stacks that are running in a VPC, whether to automatically assign a public IP address to the layer's instances. For more information, see How to Edit a Layer. public let autoAssignPublicIps: Bool? - /// Specifies CloudWatch Logs configuration options for the layer. For more information, see CloudWatchLogsLogStream. + /// Specifies CloudWatch Logs configuration options for the layer. For more information, see CloudWatchLogsLogStream. public let cloudWatchLogsConfiguration: CloudWatchLogsConfiguration? /// The ARN of an IAM profile to be used for all of the layer's EC2 instances. For more information about IAM ARNs, see Using Identifiers. public let customInstanceProfileArn: String? @@ -3831,11 +3831,11 @@ extension OpsWorks { /// The layer ID. public let layerId: String public let lifecycleEventConfiguration: LifecycleEventConfiguration? - /// The layer name, which is used by the console. + /// The layer name, which is used by the console. Layer names can be a maximum of 32 characters. public let name: String? /// An array of Package objects that describe the layer's packages. public let packages: [String]? - /// For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters and must be in the following format: /\A[a-z0-9\-\_\.]+\Z/. The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference + /// For custom layers only, use this parameter to specify the layer's short name, which is used internally by OpsWorks Stacks and by Chef. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 32 characters and must be in the following format: /\A[a-z0-9\-\_\.]+\Z/. Built-in layer short names are defined by OpsWorks Stacks. For more information, see the Layer reference in the OpsWorks User Guide. public let shortname: String? /// Whether to use Amazon EBS-optimized instances. public let useEbsOptimizedInstances: Bool? @@ -3918,33 +3918,33 @@ extension OpsWorks { } public struct UpdateStackRequest: AWSEncodableShape { - /// The default AWS OpsWorks Stacks agent version. You have the following options: Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available. Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the stack's instances. The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. You can also specify an agent version when you create or update an instance, which overrides the stack's default setting. + /// The default OpsWorks Stacks agent version. You have the following options: Auto-update - Set this parameter to LATEST. OpsWorks Stacks automatically installs new agent versions on the stack's instances as soon as they are available. Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. OpsWorks Stacks installs that version on the stack's instances. The default setting is LATEST. To specify an agent version, you must use the complete version number, not the abbreviated number shown on the console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. You can also specify an agent version when you create or update an instance, which overrides the stack's default setting. public let agentVersion: String? /// One or more user-defined key-value pairs to be added to the stack attributes. public let attributes: [StackAttributesKeys: String]? /// A ChefConfiguration object that specifies whether to enable Berkshelf and the Berkshelf version on Chef 11.10 stacks. For more information, see Create a New Stack. public let chefConfiguration: ChefConfiguration? - /// The configuration manager. When you update a stack, we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12. + /// The configuration manager. When you update a stack, we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12. public let configurationManager: StackConfigurationManager? - /// Contains the information required to retrieve an app or cookbook from a repository. For more information, see Adding Apps or Cookbooks and Recipes. + /// Contains the information required to retrieve an app or cookbook from a repository. For more information, see Adding Apps or Cookbooks and Recipes. public let customCookbooksSource: Source? - /// A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration JSON values or to pass data to recipes. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes. + /// A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration JSON values or to pass data to recipes. The string should be in the following format: "{\"key1\": \"value1\", \"key2\": \"value2\",...}" For more information about custom JSON, see Use Custom JSON to Modify the Stack Configuration Attributes. public let customJson: String? /// The stack's default Availability Zone, which must be in the stack's region. For more information, see Regions and Endpoints. If you also specify a value for DefaultSubnetId, the subnet must be in the same zone. For more information, see CreateStack. public let defaultAvailabilityZone: String? /// The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances. For more information about IAM ARNs, see Using Identifiers. public let defaultInstanceProfileArn: String? - /// The stack's operating system, which must be set to one of the following: A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs. The default option is the stack's current operating system. For more information about supported operating systems, see AWS OpsWorks Stacks Operating Systems. + /// The stack's operating system, which must be set to one of the following: A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. CentOS Linux 7 Red Hat Enterprise Linux 7 A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. A custom AMI: Custom. You specify the custom AMI you want to use when you create instances. For more information about how to use custom AMIs with OpsWorks, see Using Custom AMIs. The default option is the stack's current operating system. Not all operating systems are supported with all versions of Chef. For more information about supported operating systems, see OpsWorks Stacks Operating Systems. public let defaultOs: String? /// The default root device type. This value is used by default for all instances in the stack, but you can override it when you create an instance. For more information, see Storage for the Root Device. public let defaultRootDeviceType: RootDeviceType? - /// A default Amazon EC2 key-pair name. The default value is none. If you specify a key-pair name, AWS OpsWorks Stacks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance. + /// A default Amazon EC2 key-pair name. The default value is none. If you specify a key-pair name, OpsWorks Stacks installs the public key on the instance and you can use the private key with an SSH client to log in to the instance. For more information, see Using SSH to Communicate with an Instance and Managing SSH Access. You can override this setting by specifying a different key pair, or no key pair, when you create an instance. public let defaultSshKeyName: String? /// The stack's default VPC subnet ID. This parameter is required if you specify a value for the VpcId parameter. All instances are launched into this subnet unless you specify otherwise when you create the instance. If you also specify a value for DefaultAvailabilityZone, the subnet must be in that zone. For information on default values and when this parameter is required, see the VpcId parameter description. public let defaultSubnetId: String? - /// The stack's new host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are: Baked_Goods Clouds Europe_Cities Fruits Greek_Deities_and_Titans Legendary_creatures_from_Japan Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme. + /// The stack's new host name theme, with spaces replaced by underscores. The theme is used to generate host names for the stack's instances. By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the layer's short name. The other themes are: Baked_Goods Clouds Europe_Cities Fruits Greek_Deities_and_Titans Legendary_creatures_from_Japan Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats To obtain a generated host name, call GetHostNameSuggestion, which returns a host name based on the current theme. public let hostnameTheme: String? - /// The stack's new name. + /// The stack's new name. Stack names can be a maximum of 64 characters. public let name: String? /// Do not use this parameter. You cannot update a stack's service role. public let serviceRoleArn: String? @@ -3952,7 +3952,7 @@ extension OpsWorks { public let stackId: String /// Whether the stack uses custom cookbooks. public let useCustomCookbooks: Bool? - /// Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers. AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. UseOpsworksSecurityGroups allows you to provide your own custom security groups instead of using the built-in groups. UseOpsworksSecurityGroups has the following settings: True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group. False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on. Custom security groups are required only for those layers that need custom settings. For more information, see Create a New Stack. + /// Whether to associate the OpsWorks Stacks built-in security groups with the stack's layers. OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are associated with layers by default. UseOpsworksSecurityGroups allows you to provide your own custom security groups instead of using the built-in groups. UseOpsworksSecurityGroups has the following settings: True - OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group. False - OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on. Custom security groups are required only for those layers that need custom settings. For more information, see Create a New Stack. public let useOpsworksSecurityGroups: Bool? public init(agentVersion: String? = nil, attributes: [StackAttributesKeys: String]? = nil, chefConfiguration: ChefConfiguration? = nil, configurationManager: StackConfigurationManager? = nil, customCookbooksSource: Source? = nil, customJson: String? = nil, defaultAvailabilityZone: String? = nil, defaultInstanceProfileArn: String? = nil, defaultOs: String? = nil, defaultRootDeviceType: RootDeviceType? = nil, defaultSshKeyName: String? = nil, defaultSubnetId: String? = nil, hostnameTheme: String? = nil, name: String? = nil, serviceRoleArn: String? = nil, stackId: String, useCustomCookbooks: Bool? = nil, useOpsworksSecurityGroups: Bool? = nil) { @@ -4005,7 +4005,7 @@ extension OpsWorks { public let iamUserArn: String /// The user's new SSH public key. public let sshPublicKey: String? - /// The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, AWS OpsWorks Stacks removes them. For example, my.name will be changed to myname. If you do not specify an SSH user name, AWS OpsWorks Stacks generates one from the IAM user name. + /// The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name includes other punctuation marks, OpsWorks Stacks removes them. For example, my.name will be changed to myname. If you do not specify an SSH user name, OpsWorks Stacks generates one from the IAM user name. public let sshUsername: String? public init(allowSelfManagement: Bool? = nil, iamUserArn: String, sshPublicKey: String? = nil, sshUsername: String? = nil) { @@ -4026,7 +4026,7 @@ extension OpsWorks { public struct UpdateVolumeRequest: AWSEncodableShape { /// The new mount point. public let mountPoint: String? - /// The new name. + /// The new name. Volume names can be a maximum of 128 characters. public let name: String? /// The volume ID. public let volumeId: String @@ -4088,11 +4088,11 @@ extension OpsWorks { public let iops: Int? /// The volume mount point. For example, "/mnt/disk1". public let mountPoint: String? - /// The volume name. + /// The volume name. Volume names are a maximum of 128 characters. public let name: String? /// The RAID array ID. public let raidArrayId: String? - /// The AWS region. For more information about AWS regions, see Regions and Endpoints. + /// The Amazon Web Services Region. For more information about Amazon Web Services Regions, see Regions and Endpoints. public let region: String? /// The volume size. public let size: Int? @@ -4100,7 +4100,7 @@ extension OpsWorks { public let status: String? /// The volume ID. public let volumeId: String? - /// The volume type. For more information, see Amazon EBS Volume Types. standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB. io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB. gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB. st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB. sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB. + /// The volume type. For more information, see Amazon EBS Volume Types. standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB. io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB. gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB. st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 125 GiB and a maximum size of 16384 GiB. sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 125 GiB and a maximum size of 16384 GiB. public let volumeType: String? public init(availabilityZone: String? = nil, device: String? = nil, ec2VolumeId: String? = nil, encrypted: Bool? = nil, instanceId: String? = nil, iops: Int? = nil, mountPoint: String? = nil, name: String? = nil, raidArrayId: String? = nil, region: String? = nil, size: Int? = nil, status: String? = nil, volumeId: String? = nil, volumeType: String? = nil) { @@ -4151,7 +4151,7 @@ extension OpsWorks { public let raidLevel: Int? /// The volume size. public let size: Int - /// The volume type. For more information, see Amazon EBS Volume Types. standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB. io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB. gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB. st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB. sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB. + /// The volume type. For more information, see Amazon EBS Volume Types. standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB. io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB. gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB. st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 125 GiB and a maximum size of 16384 GiB. sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 125 GiB and a maximum size of 16384 GiB. public let volumeType: String? public init(encrypted: Bool? = nil, iops: Int? = nil, mountPoint: String, numberOfDisks: Int, raidLevel: Int? = nil, size: Int, volumeType: String? = nil) { diff --git a/Sources/Soto/Services/PI/PI_api.swift b/Sources/Soto/Services/PI/PI_api.swift index 158777d4e4..632ddba1fe 100644 --- a/Sources/Soto/Services/PI/PI_api.swift +++ b/Sources/Soto/Services/PI/PI_api.swift @@ -392,6 +392,7 @@ extension PI.GetResourceMetricsRequest: AWSPaginateToken { extension PI.ListAvailableResourceDimensionsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> PI.ListAvailableResourceDimensionsRequest { return .init( + authorizedActions: self.authorizedActions, identifier: self.identifier, maxResults: self.maxResults, metrics: self.metrics, diff --git a/Sources/Soto/Services/PI/PI_shapes.swift b/Sources/Soto/Services/PI/PI_shapes.swift index c7e1bd2220..09530d03b6 100644 --- a/Sources/Soto/Services/PI/PI_shapes.swift +++ b/Sources/Soto/Services/PI/PI_shapes.swift @@ -61,6 +61,13 @@ extension PI { public var description: String { return self.rawValue } } + public enum FineGrainedAction: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case describeDimensionKeys = "DescribeDimensionKeys" + case getDimensionKeyDetails = "GetDimensionKeyDetails" + case getResourceMetrics = "GetResourceMetrics" + public var description: String { return self.rawValue } + } + public enum PeriodAlignment: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case endTime = "END_TIME" case startTime = "START_TIME" @@ -320,13 +327,13 @@ extension PI { public func validate(name: String) throws { try self.additionalMetrics?.forEach { try validate($0, name: "additionalMetrics[]", parent: name, max: 256) - try validate($0, name: "additionalMetrics[]", parent: name, pattern: "\\S") + try validate($0, name: "additionalMetrics[]", parent: name, pattern: "^[a-zA-Z0-9-_\\.:/*)( ]+$") } try self.validate(self.additionalMetrics, name: "additionalMetrics", parent: name, max: 30) try self.validate(self.additionalMetrics, name: "additionalMetrics", parent: name, min: 1) try self.filter?.forEach { try validate($0.key, name: "filter.key", parent: name, max: 256) - try validate($0.key, name: "filter.key", parent: name, pattern: "\\S") + try validate($0.key, name: "filter.key", parent: name, pattern: "^[a-zA-Z0-9-_\\.:/*)( ]+$") try validate($0.value, name: "filter[\"\($0.key)\"]", parent: name, max: 256) try validate($0.value, name: "filter[\"\($0.key)\"]", parent: name, pattern: "\\S") } @@ -418,12 +425,12 @@ extension PI { public func validate(name: String) throws { try self.dimensions?.forEach { try validate($0, name: "dimensions[]", parent: name, max: 256) - try validate($0, name: "dimensions[]", parent: name, pattern: "\\S") + try validate($0, name: "dimensions[]", parent: name, pattern: "^[a-zA-Z0-9-_\\.:/*)( ]+$") } try self.validate(self.dimensions, name: "dimensions", parent: name, max: 10) try self.validate(self.dimensions, name: "dimensions", parent: name, min: 1) try self.validate(self.group, name: "group", parent: name, max: 256) - try self.validate(self.group, name: "group", parent: name, pattern: "\\S") + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9-_\\.:/*)( ]+$") try self.validate(self.limit, name: "limit", parent: name, max: 25) try self.validate(self.limit, name: "limit", parent: name, min: 1) } @@ -540,7 +547,7 @@ extension PI { try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9-]+$") try self.requestedDimensions?.forEach { try validate($0, name: "requestedDimensions[]", parent: name, max: 256) - try validate($0, name: "requestedDimensions[]", parent: name, pattern: "\\S") + try validate($0, name: "requestedDimensions[]", parent: name, pattern: "^[a-zA-Z0-9-_\\.:/*)( ]+$") } try self.validate(self.requestedDimensions, name: "requestedDimensions", parent: name, max: 10) try self.validate(self.requestedDimensions, name: "requestedDimensions", parent: name, min: 1) @@ -800,6 +807,8 @@ extension PI { } public struct ListAvailableResourceDimensionsRequest: AWSEncodableShape { + /// The actions to discover the dimensions you are authorized to access. If you specify multiple actions, then the response will contain the dimensions common for all the actions. When you don't specify this request parameter or provide an empty list, the response contains all the available dimensions for the target database engine whether or not you are authorized to access them. + public let authorizedActions: [FineGrainedAction]? /// An immutable identifier for a data source that is unique within an Amazon Web Services Region. Performance Insights gathers metrics from this data source. To use an Amazon RDS DB instance as a data source, specify its DbiResourceId value. For example, specify db-ABCDEFGHIJKLMNOPQRSTU1VWZ. public let identifier: String /// The maximum number of items to return in the response. If more items exist than the specified MaxRecords value, a pagination token is included in the response so that the remaining results can be retrieved. @@ -811,7 +820,8 @@ extension PI { /// The Amazon Web Services service for which Performance Insights returns metrics. public let serviceType: ServiceType - public init(identifier: String, maxResults: Int? = nil, metrics: [String], nextToken: String? = nil, serviceType: ServiceType) { + public init(authorizedActions: [FineGrainedAction]? = nil, identifier: String, maxResults: Int? = nil, metrics: [String], nextToken: String? = nil, serviceType: ServiceType) { + self.authorizedActions = authorizedActions self.identifier = identifier self.maxResults = maxResults self.metrics = metrics @@ -820,13 +830,14 @@ extension PI { } public func validate(name: String) throws { + try self.validate(self.authorizedActions, name: "authorizedActions", parent: name, max: 3) try self.validate(self.identifier, name: "identifier", parent: name, max: 256) try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9-]+$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 25) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 0) try self.metrics.forEach { try validate($0, name: "metrics[]", parent: name, max: 256) - try validate($0, name: "metrics[]", parent: name, pattern: "\\S") + try validate($0, name: "metrics[]", parent: name, pattern: "^[a-zA-Z0-9-_\\.:/*)( ]+$") } try self.validate(self.metrics, name: "metrics", parent: name, max: 5) try self.validate(self.metrics, name: "metrics", parent: name, min: 1) @@ -836,6 +847,7 @@ extension PI { } private enum CodingKeys: String, CodingKey { + case authorizedActions = "AuthorizedActions" case identifier = "Identifier" case maxResults = "MaxResults" case metrics = "Metrics" @@ -888,7 +900,7 @@ extension PI { try self.validate(self.maxResults, name: "maxResults", parent: name, min: 0) try self.metricTypes.forEach { try validate($0, name: "metricTypes[]", parent: name, max: 256) - try validate($0, name: "metricTypes[]", parent: name, pattern: "\\S") + try validate($0, name: "metricTypes[]", parent: name, pattern: "^[a-zA-Z0-9-_\\.:/*)( ]+$") } try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) @@ -1064,13 +1076,13 @@ extension PI { public func validate(name: String) throws { try self.filter?.forEach { try validate($0.key, name: "filter.key", parent: name, max: 256) - try validate($0.key, name: "filter.key", parent: name, pattern: "\\S") + try validate($0.key, name: "filter.key", parent: name, pattern: "^[a-zA-Z0-9-_\\.:/*)( ]+$") try validate($0.value, name: "filter[\"\($0.key)\"]", parent: name, max: 256) try validate($0.value, name: "filter[\"\($0.key)\"]", parent: name, pattern: "\\S") } try self.groupBy?.validate(name: "\(name).groupBy") try self.validate(self.metric, name: "metric", parent: name, max: 256) - try self.validate(self.metric, name: "metric", parent: name, pattern: "\\S") + try self.validate(self.metric, name: "metric", parent: name, pattern: "^[a-zA-Z0-9-_\\.:/*)( ]+$") } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_api.swift b/Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_api.swift new file mode 100644 index 0000000000..66b90c683e --- /dev/null +++ b/Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_api.swift @@ -0,0 +1,302 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +@_exported import SotoCore + +/// Service object for interacting with AWS PcaConnectorScep service. +/// +/// Connector for SCEP (Preview) is in preview release for Amazon Web Services Private Certificate Authority and is subject to change. Connector for SCEP (Preview) creates a connector between Amazon Web Services Private CA and your SCEP-enabled clients and devices. For more information, see Connector for SCEP in the Amazon Web Services Private CA User Guide. +public struct PcaConnectorScep: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the PcaConnectorScep client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded + /// - timeout: Timeout value for HTTP requests + /// - byteBufferAllocator: Allocator for ByteBuffers + /// - options: Service options + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + middleware: AWSMiddlewareProtocol? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + serviceName: "PcaConnectorScep", + serviceIdentifier: "pca-connector-scep", + serviceProtocol: .restjson, + apiVersion: "2018-05-10", + endpoint: endpoint, + errorType: PcaConnectorScepErrorType.self, + middleware: middleware, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + + + + + // MARK: API Calls + + /// For general-purpose connectors. Creates a challenge password for the specified connector. The SCEP protocol uses a challenge password to authenticate a request before issuing a certificate from a certificate authority (CA). Your SCEP clients include the challenge password as part of their certificate request to Connector for SCEP. To retrieve the connector Amazon Resource Names (ARNs) for the connectors in your account, call ListConnectors. To create additional challenge passwords for the connector, call CreateChallenge again. We recommend frequently rotating your challenge passwords. + @Sendable + public func createChallenge(_ input: CreateChallengeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateChallengeResponse { + return try await self.client.execute( + operation: "CreateChallenge", + path: "/challenges", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Creates a SCEP connector. A SCEP connector links Amazon Web Services Private Certificate Authority to your SCEP-compatible devices and mobile device management (MDM) systems. Before you create a connector, you must complete a set of prerequisites, including creation of a private certificate authority (CA) to use with this connector. For more information, see Connector for SCEP prerequisites. + @Sendable + public func createConnector(_ input: CreateConnectorRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateConnectorResponse { + return try await self.client.execute( + operation: "CreateConnector", + path: "/connectors", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes the specified Challenge. + @Sendable + public func deleteChallenge(_ input: DeleteChallengeRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteChallenge", + path: "/challenges/{ChallengeArn}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes the specified Connector. This operation also deletes any challenges associated with the connector. + @Sendable + public func deleteConnector(_ input: DeleteConnectorRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteConnector", + path: "/connectors/{ConnectorArn}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the metadata for the specified Challenge. + @Sendable + public func getChallengeMetadata(_ input: GetChallengeMetadataRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetChallengeMetadataResponse { + return try await self.client.execute( + operation: "GetChallengeMetadata", + path: "/challengeMetadata/{ChallengeArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the challenge password for the specified Challenge. + @Sendable + public func getChallengePassword(_ input: GetChallengePasswordRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetChallengePasswordResponse { + return try await self.client.execute( + operation: "GetChallengePassword", + path: "/challengePasswords/{ChallengeArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves details about the specified Connector. Calling this action returns important details about the connector, such as the public SCEP URL where your clients can request certificates. + @Sendable + public func getConnector(_ input: GetConnectorRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetConnectorResponse { + return try await self.client.execute( + operation: "GetConnector", + path: "/connectors/{ConnectorArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the challenge metadata for the specified ARN. + @Sendable + public func listChallengeMetadata(_ input: ListChallengeMetadataRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListChallengeMetadataResponse { + return try await self.client.execute( + operation: "ListChallengeMetadata", + path: "/challengeMetadata", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Lists the connectors belonging to your Amazon Web Services account. + @Sendable + public func listConnectors(_ input: ListConnectorsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListConnectorsResponse { + return try await self.client.execute( + operation: "ListConnectors", + path: "/connectors", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the tags associated with the specified resource. Tags are key-value pairs that you can use to categorize and manage your resources, for purposes like billing. For example, you might set the tag key to "customer" and the value to the customer name or ID. You can specify one or more tags to add to each Amazon Web Services resource, up to 50 tags for a resource. + @Sendable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + return try await self.client.execute( + operation: "ListTagsForResource", + path: "/tags/{ResourceArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Adds one or more tags to your resource. + @Sendable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "TagResource", + path: "/tags/{ResourceArn}", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Removes one or more tags from your resource. + @Sendable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "UntagResource", + path: "/tags/{ResourceArn}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } +} + +extension PcaConnectorScep { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: PcaConnectorScep, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension PcaConnectorScep { + /// Retrieves the challenge metadata for the specified ARN. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listChallengeMetadataPaginator( + _ input: ListChallengeMetadataRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listChallengeMetadata, + inputKey: \ListChallengeMetadataRequest.nextToken, + outputKey: \ListChallengeMetadataResponse.nextToken, + logger: logger + ) + } + + /// Lists the connectors belonging to your Amazon Web Services account. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listConnectorsPaginator( + _ input: ListConnectorsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listConnectors, + inputKey: \ListConnectorsRequest.nextToken, + outputKey: \ListConnectorsResponse.nextToken, + logger: logger + ) + } +} + +extension PcaConnectorScep.ListChallengeMetadataRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> PcaConnectorScep.ListChallengeMetadataRequest { + return .init( + connectorArn: self.connectorArn, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension PcaConnectorScep.ListConnectorsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> PcaConnectorScep.ListConnectorsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token + ) + } +} diff --git a/Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_shapes.swift b/Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_shapes.swift new file mode 100644 index 0000000000..f3706fcb1d --- /dev/null +++ b/Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_shapes.swift @@ -0,0 +1,774 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_spi(SotoInternal) import SotoCore + +extension PcaConnectorScep { + // MARK: Enums + + public enum ConnectorStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case creating = "CREATING" + case deleting = "DELETING" + case failed = "FAILED" + public var description: String { return self.rawValue } + } + + public enum ConnectorStatusReason: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case internalFailure = "INTERNAL_FAILURE" + case privatecaAccessDenied = "PRIVATECA_ACCESS_DENIED" + case privatecaInvalidState = "PRIVATECA_INVALID_STATE" + case privatecaResourceNotFound = "PRIVATECA_RESOURCE_NOT_FOUND" + public var description: String { return self.rawValue } + } + + public enum ConnectorType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case generalPurpose = "GENERAL_PURPOSE" + case intune = "INTUNE" + public var description: String { return self.rawValue } + } + + // MARK: Shapes + + public struct Challenge: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the challenge. + public let arn: String? + /// The Amazon Resource Name (ARN) of the connector. + public let connectorArn: String? + /// The date and time that the challenge was created. + public let createdAt: Date? + /// The SCEP challenge password, in UUID format. + public let password: String? + /// The date and time that the challenge was updated. + public let updatedAt: Date? + + public init(arn: String? = nil, connectorArn: String? = nil, createdAt: Date? = nil, password: String? = nil, updatedAt: Date? = nil) { + self.arn = arn + self.connectorArn = connectorArn + self.createdAt = createdAt + self.password = password + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case connectorArn = "ConnectorArn" + case createdAt = "CreatedAt" + case password = "Password" + case updatedAt = "UpdatedAt" + } + } + + public struct ChallengeMetadata: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the challenge. + public let arn: String? + /// The Amazon Resource Name (ARN) of the connector. + public let connectorArn: String? + /// The date and time that the connector was created. + public let createdAt: Date? + /// The date and time that the connector was updated. + public let updatedAt: Date? + + public init(arn: String? = nil, connectorArn: String? = nil, createdAt: Date? = nil, updatedAt: Date? = nil) { + self.arn = arn + self.connectorArn = connectorArn + self.createdAt = createdAt + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case connectorArn = "ConnectorArn" + case createdAt = "CreatedAt" + case updatedAt = "UpdatedAt" + } + } + + public struct ChallengeMetadataSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the challenge. + public let arn: String? + /// The Amazon Resource Name (ARN) of the connector. + public let connectorArn: String? + /// The date and time that the challenge was created. + public let createdAt: Date? + /// The date and time that the challenge was updated. + public let updatedAt: Date? + + public init(arn: String? = nil, connectorArn: String? = nil, createdAt: Date? = nil, updatedAt: Date? = nil) { + self.arn = arn + self.connectorArn = connectorArn + self.createdAt = createdAt + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case connectorArn = "ConnectorArn" + case createdAt = "CreatedAt" + case updatedAt = "UpdatedAt" + } + } + + public struct Connector: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the connector. + public let arn: String? + /// The Amazon Resource Name (ARN) of the certificate authority associated with the connector. + public let certificateAuthorityArn: String? + /// The date and time that the connector was created. + public let createdAt: Date? + /// The connector's HTTPS public SCEP URL. + public let endpoint: String? + /// Contains settings relevant to the mobile device management system that you chose for the connector. If you didn't configure MobileDeviceManagement, then the connector is for general-purpose use and this object is empty. + public let mobileDeviceManagement: MobileDeviceManagement? + /// Contains OpenID Connect (OIDC) parameters for use with Connector for SCEP for Microsoft Intune. For more information about using Connector for SCEP for Microsoft Intune, see Using Connector for SCEP for Microsoft Intune. + public let openIdConfiguration: OpenIdConfiguration? + /// The connector's status. + public let status: ConnectorStatus? + /// Information about why connector creation failed, if status is FAILED. + public let statusReason: ConnectorStatusReason? + /// The connector type. + public let type: ConnectorType? + /// The date and time that the connector was updated. + public let updatedAt: Date? + + public init(arn: String? = nil, certificateAuthorityArn: String? = nil, createdAt: Date? = nil, endpoint: String? = nil, mobileDeviceManagement: MobileDeviceManagement? = nil, openIdConfiguration: OpenIdConfiguration? = nil, status: ConnectorStatus? = nil, statusReason: ConnectorStatusReason? = nil, type: ConnectorType? = nil, updatedAt: Date? = nil) { + self.arn = arn + self.certificateAuthorityArn = certificateAuthorityArn + self.createdAt = createdAt + self.endpoint = endpoint + self.mobileDeviceManagement = mobileDeviceManagement + self.openIdConfiguration = openIdConfiguration + self.status = status + self.statusReason = statusReason + self.type = type + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case certificateAuthorityArn = "CertificateAuthorityArn" + case createdAt = "CreatedAt" + case endpoint = "Endpoint" + case mobileDeviceManagement = "MobileDeviceManagement" + case openIdConfiguration = "OpenIdConfiguration" + case status = "Status" + case statusReason = "StatusReason" + case type = "Type" + case updatedAt = "UpdatedAt" + } + } + + public struct ConnectorSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the connector. + public let arn: String? + /// The Amazon Resource Name (ARN) of the connector's associated certificate authority. + public let certificateAuthorityArn: String? + /// The date and time that the challenge was created. + public let createdAt: Date? + /// The connector's HTTPS public SCEP URL. + public let endpoint: String? + /// Contains settings relevant to the mobile device management system that you chose for the connector. If you didn't configure MobileDeviceManagement, then the connector is for general-purpose use and this object is empty. + public let mobileDeviceManagement: MobileDeviceManagement? + /// Contains OpenID Connect (OIDC) parameters for use with Microsoft Intune. + public let openIdConfiguration: OpenIdConfiguration? + /// The connector's status. Status can be creating, active, deleting, or failed. + public let status: ConnectorStatus? + /// Information about why connector creation failed, if status is FAILED. + public let statusReason: ConnectorStatusReason? + /// The connector type. + public let type: ConnectorType? + /// The date and time that the challenge was updated. + public let updatedAt: Date? + + public init(arn: String? = nil, certificateAuthorityArn: String? = nil, createdAt: Date? = nil, endpoint: String? = nil, mobileDeviceManagement: MobileDeviceManagement? = nil, openIdConfiguration: OpenIdConfiguration? = nil, status: ConnectorStatus? = nil, statusReason: ConnectorStatusReason? = nil, type: ConnectorType? = nil, updatedAt: Date? = nil) { + self.arn = arn + self.certificateAuthorityArn = certificateAuthorityArn + self.createdAt = createdAt + self.endpoint = endpoint + self.mobileDeviceManagement = mobileDeviceManagement + self.openIdConfiguration = openIdConfiguration + self.status = status + self.statusReason = statusReason + self.type = type + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case certificateAuthorityArn = "CertificateAuthorityArn" + case createdAt = "CreatedAt" + case endpoint = "Endpoint" + case mobileDeviceManagement = "MobileDeviceManagement" + case openIdConfiguration = "OpenIdConfiguration" + case status = "Status" + case statusReason = "StatusReason" + case type = "Type" + case updatedAt = "UpdatedAt" + } + } + + public struct CreateChallengeRequest: AWSEncodableShape { + /// Custom string that can be used to distinguish between calls to the CreateChallenge action. Client tokens for CreateChallenge time out after five minutes. Therefore, if you call CreateChallenge multiple times with the same client token within five minutes, Connector for SCEP recognizes that you are requesting only one challenge and will only respond with one. If you change the client token for each call, Connector for SCEP recognizes that you are requesting multiple challenge passwords. + public let clientToken: String? + /// The Amazon Resource Name (ARN) of the connector that you want to create a challenge for. + public let connectorArn: String + /// The key-value pairs to associate with the resource. + public let tags: [String: String]? + + public init(clientToken: String? = CreateChallengeRequest.idempotencyToken(), connectorArn: String, tags: [String: String]? = nil) { + self.clientToken = clientToken + self.connectorArn = connectorArn + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 64) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[!-~]+$") + try self.validate(self.connectorArn, name: "connectorArn", parent: name, max: 200) + try self.validate(self.connectorArn, name: "connectorArn", parent: name, min: 5) + try self.validate(self.connectorArn, name: "connectorArn", parent: name, pattern: "^arn:aws(-[a-z]+)*:pca-connector-scep:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case connectorArn = "ConnectorArn" + case tags = "Tags" + } + } + + public struct CreateChallengeResponse: AWSDecodableShape { + /// Returns the challenge details for the specified connector. + public let challenge: Challenge? + + public init(challenge: Challenge? = nil) { + self.challenge = challenge + } + + private enum CodingKeys: String, CodingKey { + case challenge = "Challenge" + } + } + + public struct CreateConnectorRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the Amazon Web Services Private Certificate Authority certificate authority to use with this connector. Due to security vulnerabilities present in the SCEP protocol, we recommend using a private CA that's dedicated for use with the connector. To retrieve the private CAs associated with your account, you can call ListCertificateAuthorities using the Amazon Web Services Private CA API. + public let certificateAuthorityArn: String + /// Custom string that can be used to distinguish between calls to the CreateChallenge action. Client tokens for CreateChallenge time out after five minutes. Therefore, if you call CreateChallenge multiple times with the same client token within five minutes, Connector for SCEP recognizes that you are requesting only one challenge and will only respond with one. If you change the client token for each call, Connector for SCEP recognizes that you are requesting multiple challenge passwords. + public let clientToken: String? + /// If you don't supply a value, by default Connector for SCEP creates a connector for general-purpose use. A general-purpose connector is designed to work with clients or endpoints that support the SCEP protocol, except Connector for SCEP for Microsoft Intune. With connectors for general-purpose use, you manage SCEP challenge passwords using Connector for SCEP. For information about considerations and limitations with using Connector for SCEP, see Considerations and Limitations. If you provide an IntuneConfiguration, Connector for SCEP creates a connector for use with Microsoft Intune, and you manage the challenge passwords using Microsoft Intune. For more information, see Using Connector for SCEP for Microsoft Intune. + public let mobileDeviceManagement: MobileDeviceManagement? + /// The key-value pairs to associate with the resource. + public let tags: [String: String]? + + public init(certificateAuthorityArn: String, clientToken: String? = CreateConnectorRequest.idempotencyToken(), mobileDeviceManagement: MobileDeviceManagement? = nil, tags: [String: String]? = nil) { + self.certificateAuthorityArn = certificateAuthorityArn + self.clientToken = clientToken + self.mobileDeviceManagement = mobileDeviceManagement + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.certificateAuthorityArn, name: "certificateAuthorityArn", parent: name, max: 200) + try self.validate(self.certificateAuthorityArn, name: "certificateAuthorityArn", parent: name, min: 5) + try self.validate(self.certificateAuthorityArn, name: "certificateAuthorityArn", parent: name, pattern: "^arn:aws(-[a-z]+)*:acm-pca:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:certificate-authority\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 64) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[!-~]+$") + try self.mobileDeviceManagement?.validate(name: "\(name).mobileDeviceManagement") + } + + private enum CodingKeys: String, CodingKey { + case certificateAuthorityArn = "CertificateAuthorityArn" + case clientToken = "ClientToken" + case mobileDeviceManagement = "MobileDeviceManagement" + case tags = "Tags" + } + } + + public struct CreateConnectorResponse: AWSDecodableShape { + /// Returns the Amazon Resource Name (ARN) of the connector. + public let connectorArn: String? + + public init(connectorArn: String? = nil) { + self.connectorArn = connectorArn + } + + private enum CodingKeys: String, CodingKey { + case connectorArn = "ConnectorArn" + } + } + + public struct DeleteChallengeRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the challenge password to delete. + public let challengeArn: String + + public init(challengeArn: String) { + self.challengeArn = challengeArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.challengeArn, key: "ChallengeArn") + } + + public func validate(name: String) throws { + try self.validate(self.challengeArn, name: "challengeArn", parent: name, max: 200) + try self.validate(self.challengeArn, name: "challengeArn", parent: name, min: 5) + try self.validate(self.challengeArn, name: "challengeArn", parent: name, pattern: "^arn:aws(-[a-z]+)*:pca-connector-scep:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}\\/challenge\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteConnectorRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the connector to delete. + public let connectorArn: String + + public init(connectorArn: String) { + self.connectorArn = connectorArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.connectorArn, key: "ConnectorArn") + } + + public func validate(name: String) throws { + try self.validate(self.connectorArn, name: "connectorArn", parent: name, max: 200) + try self.validate(self.connectorArn, name: "connectorArn", parent: name, min: 5) + try self.validate(self.connectorArn, name: "connectorArn", parent: name, pattern: "^arn:aws(-[a-z]+)*:pca-connector-scep:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetChallengeMetadataRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the challenge. + public let challengeArn: String + + public init(challengeArn: String) { + self.challengeArn = challengeArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.challengeArn, key: "ChallengeArn") + } + + public func validate(name: String) throws { + try self.validate(self.challengeArn, name: "challengeArn", parent: name, max: 200) + try self.validate(self.challengeArn, name: "challengeArn", parent: name, min: 5) + try self.validate(self.challengeArn, name: "challengeArn", parent: name, pattern: "^arn:aws(-[a-z]+)*:pca-connector-scep:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}\\/challenge\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetChallengeMetadataResponse: AWSDecodableShape { + /// The metadata for the challenge. + public let challengeMetadata: ChallengeMetadata? + + public init(challengeMetadata: ChallengeMetadata? = nil) { + self.challengeMetadata = challengeMetadata + } + + private enum CodingKeys: String, CodingKey { + case challengeMetadata = "ChallengeMetadata" + } + } + + public struct GetChallengePasswordRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the challenge. + public let challengeArn: String + + public init(challengeArn: String) { + self.challengeArn = challengeArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.challengeArn, key: "ChallengeArn") + } + + public func validate(name: String) throws { + try self.validate(self.challengeArn, name: "challengeArn", parent: name, max: 200) + try self.validate(self.challengeArn, name: "challengeArn", parent: name, min: 5) + try self.validate(self.challengeArn, name: "challengeArn", parent: name, pattern: "^arn:aws(-[a-z]+)*:pca-connector-scep:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}\\/challenge\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetChallengePasswordResponse: AWSDecodableShape { + /// The SCEP challenge password. + public let password: String? + + public init(password: String? = nil) { + self.password = password + } + + private enum CodingKeys: String, CodingKey { + case password = "Password" + } + } + + public struct GetConnectorRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the connector. + public let connectorArn: String + + public init(connectorArn: String) { + self.connectorArn = connectorArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.connectorArn, key: "ConnectorArn") + } + + public func validate(name: String) throws { + try self.validate(self.connectorArn, name: "connectorArn", parent: name, max: 200) + try self.validate(self.connectorArn, name: "connectorArn", parent: name, min: 5) + try self.validate(self.connectorArn, name: "connectorArn", parent: name, pattern: "^arn:aws(-[a-z]+)*:pca-connector-scep:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetConnectorResponse: AWSDecodableShape { + /// The properties of the connector. + public let connector: Connector? + + public init(connector: Connector? = nil) { + self.connector = connector + } + + private enum CodingKeys: String, CodingKey { + case connector = "Connector" + } + } + + public struct IntuneConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The directory (tenant) ID from your Microsoft Entra ID app registration. + public let azureApplicationId: String + /// The primary domain from your Microsoft Entra ID app registration. + public let domain: String + + public init(azureApplicationId: String, domain: String) { + self.azureApplicationId = azureApplicationId + self.domain = domain + } + + public func validate(name: String) throws { + try self.validate(self.azureApplicationId, name: "azureApplicationId", parent: name, max: 100) + try self.validate(self.azureApplicationId, name: "azureApplicationId", parent: name, min: 15) + try self.validate(self.azureApplicationId, name: "azureApplicationId", parent: name, pattern: "^[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}$") + try self.validate(self.domain, name: "domain", parent: name, max: 256) + try self.validate(self.domain, name: "domain", parent: name, min: 1) + try self.validate(self.domain, name: "domain", parent: name, pattern: "^[a-zA-Z0-9._-]+$") + } + + private enum CodingKeys: String, CodingKey { + case azureApplicationId = "AzureApplicationId" + case domain = "Domain" + } + } + + public struct ListChallengeMetadataRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the connector. + public let connectorArn: String + /// The maximum number of objects that you want Connector for SCEP to return for this request. If more objects are available, in the response, Connector for SCEP provides a NextToken value that you can use in a subsequent call to get the next batch of objects. + public let maxResults: Int? + /// When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request. + public let nextToken: String? + + public init(connectorArn: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.connectorArn = connectorArn + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.connectorArn, key: "ConnectorArn") + request.encodeQuery(self.maxResults, key: "MaxResults") + request.encodeQuery(self.nextToken, key: "NextToken") + } + + public func validate(name: String) throws { + try self.validate(self.connectorArn, name: "connectorArn", parent: name, max: 200) + try self.validate(self.connectorArn, name: "connectorArn", parent: name, min: 5) + try self.validate(self.connectorArn, name: "connectorArn", parent: name, pattern: "^arn:aws(-[a-z]+)*:pca-connector-scep:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1000) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9_-]{4})*(?:[A-Za-z0-9_-]{2}==|[A-Za-z0-9_-]{3}=)?$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListChallengeMetadataResponse: AWSDecodableShape { + /// The challenge metadata for the challenges belonging to your Amazon Web Services account. + public let challenges: [ChallengeMetadataSummary]? + /// When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request. + public let nextToken: String? + + public init(challenges: [ChallengeMetadataSummary]? = nil, nextToken: String? = nil) { + self.challenges = challenges + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case challenges = "Challenges" + case nextToken = "NextToken" + } + } + + public struct ListConnectorsRequest: AWSEncodableShape { + /// The maximum number of objects that you want Connector for SCEP to return for this request. If more objects are available, in the response, Connector for SCEP provides a NextToken value that you can use in a subsequent call to get the next batch of objects. + public let maxResults: Int? + /// When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request. + public let nextToken: String? + + public init(maxResults: Int? = nil, nextToken: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "MaxResults") + request.encodeQuery(self.nextToken, key: "NextToken") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1000) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^(?:[A-Za-z0-9_-]{4})*(?:[A-Za-z0-9_-]{2}==|[A-Za-z0-9_-]{3}=)?$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListConnectorsResponse: AWSDecodableShape { + /// The connectors belonging to your Amazon Web Services account. + public let connectors: [ConnectorSummary]? + /// When you request a list of objects with a MaxResults setting, if the number of objects that are still available for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request. + public let nextToken: String? + + public init(connectors: [ConnectorSummary]? = nil, nextToken: String? = nil) { + self.connectors = connectors + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case connectors = "Connectors" + case nextToken = "NextToken" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "ResourceArn") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// The key-value pairs to associate with the resource. + public let tags: [String: String]? + + public init(tags: [String: String]? = nil) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "Tags" + } + } + + public struct OpenIdConfiguration: AWSDecodableShape { + /// The audience value to copy into your Microsoft Entra app registration's OIDC. + public let audience: String? + /// The issuer value to copy into your Microsoft Entra app registration's OIDC. + public let issuer: String? + /// The subject value to copy into your Microsoft Entra app registration's OIDC. + public let subject: String? + + public init(audience: String? = nil, issuer: String? = nil, subject: String? = nil) { + self.audience = audience + self.issuer = issuer + self.subject = subject + } + + private enum CodingKeys: String, CodingKey { + case audience = "Audience" + case issuer = "Issuer" + case subject = "Subject" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + /// The key-value pairs to associate with the resource. + public let tags: [String: String] + + public init(resourceArn: String, tags: [String: String]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "ResourceArn") + try container.encode(self.tags, forKey: .tags) + } + + private enum CodingKeys: String, CodingKey { + case tags = "Tags" + } + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + /// Specifies a list of tag keys that you want to remove from the specified resources. + public let tagKeys: [String] + + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "ResourceArn") + request.encodeQuery(self.tagKeys, key: "tagKeys") + } + + private enum CodingKeys: CodingKey {} + } + + public struct MobileDeviceManagement: AWSEncodableShape & AWSDecodableShape { + /// Configuration settings for use with Microsoft Intune. For information about using Connector for SCEP for Microsoft Intune, see Using Connector for SCEP for Microsoft Intune. + public let intune: IntuneConfiguration? + + public init(intune: IntuneConfiguration? = nil) { + self.intune = intune + } + + public func validate(name: String) throws { + try self.intune?.validate(name: "\(name).intune") + } + + private enum CodingKeys: String, CodingKey { + case intune = "Intune" + } + } +} + +// MARK: - Errors + +/// Error enum for PcaConnectorScep +public struct PcaConnectorScepErrorType: AWSErrorType { + enum Code: String { + case accessDeniedException = "AccessDeniedException" + case badRequestException = "BadRequestException" + case conflictException = "ConflictException" + case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" + case serviceQuotaExceededException = "ServiceQuotaExceededException" + case throttlingException = "ThrottlingException" + case validationException = "ValidationException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize PcaConnectorScep + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// You can receive this error if you attempt to perform an operation and you don't have the required permissions. This can be caused by insufficient permissions in policies attached to your Amazon Web Services Identity and Access Management (IAM) principal. It can also happen because of restrictions in place from an Amazon Web Services Organizations service control policy (SCP) that affects your Amazon Web Services account. + public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// The request is malformed or contains an error such as an invalid parameter value or a missing required parameter. + public static var badRequestException: Self { .init(.badRequestException) } + /// This request can't be completed for one of the following reasons because the requested resource was being concurrently modified by another request. + public static var conflictException: Self { .init(.conflictException) } + /// The request processing has failed because of an unknown error, exception or failure with an internal server. + public static var internalServerException: Self { .init(.internalServerException) } + /// The operation tried to access a nonexistent resource. The resource might be incorrectly specified, or it might have a status other than ACTIVE. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// The request would cause a service quota to be exceeded. + public static var serviceQuotaExceededException: Self { .init(.serviceQuotaExceededException) } + /// The limit on the number of requests per second was exceeded. + public static var throttlingException: Self { .init(.throttlingException) } + /// An input validation error occurred. For example, invalid characters in a name tag, or an invalid pagination token. + public static var validationException: Self { .init(.validationException) } +} + +extension PcaConnectorScepErrorType: Equatable { + public static func == (lhs: PcaConnectorScepErrorType, rhs: PcaConnectorScepErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension PcaConnectorScepErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/Pinpoint/Pinpoint_shapes.swift b/Sources/Soto/Services/Pinpoint/Pinpoint_shapes.swift index 37824c0df7..04751cd9a0 100644 --- a/Sources/Soto/Services/Pinpoint/Pinpoint_shapes.swift +++ b/Sources/Soto/Services/Pinpoint/Pinpoint_shapes.swift @@ -1519,14 +1519,17 @@ extension Pinpoint { public let body: String? /// The verified email address to send the email from. The default address is the FromAddress specified for the email channel for the application. public let fromAddress: String? + /// The list of MessageHeaders for the email. You can have up to 15 MessageHeaders for each email. + public let headers: [MessageHeader]? /// The body of the email, in HTML format, for recipients whose email clients render HTML content. public let htmlBody: String? /// The subject line, or title, of the email. public let title: String? - public init(body: String? = nil, fromAddress: String? = nil, htmlBody: String? = nil, title: String? = nil) { + public init(body: String? = nil, fromAddress: String? = nil, headers: [MessageHeader]? = nil, htmlBody: String? = nil, title: String? = nil) { self.body = body self.fromAddress = fromAddress + self.headers = headers self.htmlBody = htmlBody self.title = title } @@ -1534,6 +1537,7 @@ extension Pinpoint { private enum CodingKeys: String, CodingKey { case body = "Body" case fromAddress = "FromAddress" + case headers = "Headers" case htmlBody = "HtmlBody" case title = "Title" } @@ -3617,6 +3621,8 @@ extension Pinpoint { public struct EmailTemplateRequest: AWSEncodableShape { /// A JSON object that specifies the default values to use for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable. When you create a message that's based on the template, you can override these defaults with message-specific and address-specific variables and values. public let defaultSubstitutions: String? + /// The list of MessageHeaders for the email. You can have up to 15 Headers. + public let headers: [MessageHeader]? /// The message body, in HTML format, to use in email messages that are based on the message template. We recommend using HTML format for email clients that render HTML content. You can include links, formatted text, and more in an HTML message. public let htmlPart: String? /// The unique identifier for the recommender model to use for the message template. Amazon Pinpoint uses this value to determine how to retrieve and process data from a recommender model when it sends messages that use the template, if the template contains message variables for recommendation data. @@ -3630,8 +3636,9 @@ extension Pinpoint { /// The message body, in plain text format, to use in email messages that are based on the message template. We recommend using plain text format for email clients that don't render HTML content and clients that are connected to high-latency networks, such as mobile devices. public let textPart: String? - public init(defaultSubstitutions: String? = nil, htmlPart: String? = nil, recommenderId: String? = nil, subject: String? = nil, tags: [String: String]? = nil, templateDescription: String? = nil, textPart: String? = nil) { + public init(defaultSubstitutions: String? = nil, headers: [MessageHeader]? = nil, htmlPart: String? = nil, recommenderId: String? = nil, subject: String? = nil, tags: [String: String]? = nil, templateDescription: String? = nil, textPart: String? = nil) { self.defaultSubstitutions = defaultSubstitutions + self.headers = headers self.htmlPart = htmlPart self.recommenderId = recommenderId self.subject = subject @@ -3642,6 +3649,7 @@ extension Pinpoint { private enum CodingKeys: String, CodingKey { case defaultSubstitutions = "DefaultSubstitutions" + case headers = "Headers" case htmlPart = "HtmlPart" case recommenderId = "RecommenderId" case subject = "Subject" @@ -3658,6 +3666,8 @@ extension Pinpoint { public let creationDate: String? /// The JSON object that specifies the default values that are used for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable. public let defaultSubstitutions: String? + /// The list of MessageHeaders for the email. You can have up to 15 Headers. + public let headers: [MessageHeader]? /// The message body, in HTML format, that's used in email messages that are based on the message template. public let htmlPart: String? /// The date, in ISO 8601 format, when the message template was last modified. @@ -3679,10 +3689,11 @@ extension Pinpoint { /// The unique identifier, as an integer, for the active version of the message template, or the version of the template that you specified by using the version parameter in your request. public let version: String? - public init(arn: String? = nil, creationDate: String? = nil, defaultSubstitutions: String? = nil, htmlPart: String? = nil, lastModifiedDate: String? = nil, recommenderId: String? = nil, subject: String? = nil, tags: [String: String]? = nil, templateDescription: String? = nil, templateName: String? = nil, templateType: TemplateType? = nil, textPart: String? = nil, version: String? = nil) { + public init(arn: String? = nil, creationDate: String? = nil, defaultSubstitutions: String? = nil, headers: [MessageHeader]? = nil, htmlPart: String? = nil, lastModifiedDate: String? = nil, recommenderId: String? = nil, subject: String? = nil, tags: [String: String]? = nil, templateDescription: String? = nil, templateName: String? = nil, templateType: TemplateType? = nil, textPart: String? = nil, version: String? = nil) { self.arn = arn self.creationDate = creationDate self.defaultSubstitutions = defaultSubstitutions + self.headers = headers self.htmlPart = htmlPart self.lastModifiedDate = lastModifiedDate self.recommenderId = recommenderId @@ -3699,6 +3710,7 @@ extension Pinpoint { case arn = "Arn" case creationDate = "CreationDate" case defaultSubstitutions = "DefaultSubstitutions" + case headers = "Headers" case htmlPart = "HtmlPart" case lastModifiedDate = "LastModifiedDate" case recommenderId = "RecommenderId" @@ -7772,6 +7784,23 @@ extension Pinpoint { } } + public struct MessageHeader: AWSEncodableShape & AWSDecodableShape { + /// The name of the message header. The header name can contain up to 126 characters. + public let name: String? + /// The value of the message header. The header value can contain up to 870 characters, including the length of any rendered attributes. For example if you add the {CreationDate} attribute, it renders as YYYY-MM-DDTHH:MM:SS.SSSZ and is 24 characters in length. + public let value: String? + + public init(name: String? = nil, value: String? = nil) { + self.name = name + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case name = "Name" + case value = "Value" + } + } + public struct MessageRequest: AWSEncodableShape { /// A map of key-value pairs, where each key is an address and each value is an AddressConfiguration object. An address can be a push notification token, a phone number, or an email address. You can use an AddressConfiguration object to tailor the message for an address by specifying settings such as content overrides and message variables. public let addresses: [String: AddressConfiguration]? @@ -9384,6 +9413,8 @@ extension Pinpoint { } public struct SimpleEmail: AWSEncodableShape { + /// The list of MessageHeaders for the email. You can have up to 15 Headers. + public let headers: [MessageHeader]? /// The body of the email message, in HTML format. We recommend using HTML format for email clients that render HTML content. You can include links, formatted text, and more in an HTML message. public let htmlPart: SimpleEmailPart? /// The subject line, or title, of the email. @@ -9391,13 +9422,15 @@ extension Pinpoint { /// The body of the email message, in plain text format. We recommend using plain text format for email clients that don't render HTML content and clients that are connected to high-latency networks, such as mobile devices. public let textPart: SimpleEmailPart? - public init(htmlPart: SimpleEmailPart? = nil, subject: SimpleEmailPart? = nil, textPart: SimpleEmailPart? = nil) { + public init(headers: [MessageHeader]? = nil, htmlPart: SimpleEmailPart? = nil, subject: SimpleEmailPart? = nil, textPart: SimpleEmailPart? = nil) { + self.headers = headers self.htmlPart = htmlPart self.subject = subject self.textPart = textPart } private enum CodingKeys: String, CodingKey { + case headers = "Headers" case htmlPart = "HtmlPart" case subject = "Subject" case textPart = "TextPart" diff --git a/Sources/Soto/Services/Pipes/Pipes_api.swift b/Sources/Soto/Services/Pipes/Pipes_api.swift index 422695a028..fc65a846cd 100644 --- a/Sources/Soto/Services/Pipes/Pipes_api.swift +++ b/Sources/Soto/Services/Pipes/Pipes_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS Pipes service. /// -/// Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need for specialized knowledge and integration code when developing event driven architectures. This helps ensures consistency across your company’s applications. With Pipes, the target can be any available EventBridge target. To set up a pipe, you select the event source, add optional event filtering, define optional enrichment, and select the target for the event data. +/// Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need for specialized knowledge and integration code when developing event driven architectures. This helps ensures consistency across your company’s applications. With Pipes, the target can be any available EventBridge target. To set up a pipe, you select the event source, add optional event filtering, define optional enrichment, and select the target for the event data. public struct Pipes: AWSService { // MARK: Member variables @@ -191,7 +191,7 @@ public struct Pipes: AWSService { ) } - /// Update an existing pipe. When you call UpdatePipe, EventBridge only the updates fields you have specified in the request; the rest remain unchanged. The exception to this is if you modify any Amazon Web Services-service specific fields in the SourceParameters, EnrichmentParameters, or TargetParameters objects. For example, DynamoDBStreamParameters or EventBridgeEventBusParameters. EventBridge updates the fields in these objects atomically as one and overrides existing values. This is by design, and means that if you don't specify an optional field in one of these Parameters objects, EventBridge sets that field to its system-default value during the update. For more information about pipes, see Amazon EventBridge Pipes in the Amazon EventBridge User Guide. + /// Update an existing pipe. When you call UpdatePipe, EventBridge only the updates fields you have specified in the request; the rest remain unchanged. The exception to this is if you modify any Amazon Web Services-service specific fields in the SourceParameters, EnrichmentParameters, or TargetParameters objects. For example, DynamoDBStreamParameters or EventBridgeEventBusParameters. EventBridge updates the fields in these objects atomically as one and overrides existing values. This is by design, and means that if you don't specify an optional field in one of these Parameters objects, EventBridge sets that field to its system-default value during the update. For more information about pipes, see Amazon EventBridge Pipes in the Amazon EventBridge User Guide. @Sendable public func updatePipe(_ input: UpdatePipeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdatePipeResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/Pipes/Pipes_shapes.swift b/Sources/Soto/Services/Pipes/Pipes_shapes.swift index 41883d4f31..ccc52fd6ac 100644 --- a/Sources/Soto/Services/Pipes/Pipes_shapes.swift +++ b/Sources/Soto/Services/Pipes/Pipes_shapes.swift @@ -45,6 +45,11 @@ extension Pipes { public var description: String { return self.rawValue } } + public enum DimensionValueType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case varchar = "VARCHAR" + public var description: String { return self.rawValue } + } + public enum DynamoDBStreamStartPosition: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case latest = "LATEST" case trimHorizon = "TRIM_HORIZON" @@ -62,6 +67,14 @@ extension Pipes { public var description: String { return self.rawValue } } + public enum EpochTimeUnit: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case microseconds = "MICROSECONDS" + case milliseconds = "MILLISECONDS" + case nanoseconds = "NANOSECONDS" + case seconds = "SECONDS" + public var description: String { return self.rawValue } + } + public enum IncludeExecutionDataOption: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case all = "ALL" public var description: String { return self.rawValue } @@ -95,6 +108,15 @@ extension Pipes { public var description: String { return self.rawValue } } + public enum MeasureValueType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case bigint = "BIGINT" + case boolean = "BOOLEAN" + case double = "DOUBLE" + case timestamp = "TIMESTAMP" + case varchar = "VARCHAR" + public var description: String { return self.rawValue } + } + public enum OnPartialBatchItemFailureStreams: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case automaticBisect = "AUTOMATIC_BISECT" public var description: String { return self.rawValue } @@ -169,6 +191,12 @@ extension Pipes { public var description: String { return self.rawValue } } + public enum TimeFieldType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case epoch = "EPOCH" + case timestampFormat = "TIMESTAMP_FORMAT" + public var description: String { return self.rawValue } + } + public enum MSKAccessCredentials: AWSEncodableShape & AWSDecodableShape, Sendable { /// The ARN of the Secrets Manager secret. case clientCertificateTlsAuth(String) @@ -304,9 +332,9 @@ extension Pipes { // MARK: Shapes public struct AwsVpcConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when LaunchType in EcsParameters is set to FARGATE. + /// Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when LaunchType in EcsParameters is set to FARGATE. public let assignPublicIp: AssignPublicIp? - /// Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used. + /// Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used. public let securityGroups: [String]? /// Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets. public let subnets: [String] @@ -360,11 +388,11 @@ extension Pipes { public struct BatchContainerOverrides: AWSEncodableShape & AWSDecodableShape { /// The command to send to the container that overrides the default command from the Docker image or the task definition. public let command: [String]? - /// The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. Environment variables cannot start with "Batch". This naming convention is reserved for variables that Batch sets. + /// The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. Environment variables cannot start with "Batch". This naming convention is reserved for variables that Batch sets. public let environment: [BatchEnvironmentVariable]? /// The instance type to use for a multi-node parallel job. This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided. public let instanceType: String? - /// The type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include GPU, MEMORY, and VCPU. + /// The type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include GPU, MEMORY, and VCPU. public let resourceRequirements: [BatchResourceRequirement]? public init(command: [String]? = nil, environment: [BatchEnvironmentVariable]? = nil, instanceType: String? = nil, resourceRequirements: [BatchResourceRequirement]? = nil) { @@ -419,7 +447,7 @@ extension Pipes { public struct BatchResourceRequirement: AWSEncodableShape & AWSDecodableShape { /// The type of resource to assign to a container. The supported resources include GPU, MEMORY, and VCPU. public let type: BatchResourceRequirementType - /// The quantity of the specified resource to reserve for the container. The values vary based on the type specified. type="GPU" The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched on. GPUs aren't available for jobs that are running on Fargate resources. type="MEMORY" The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory management in the Batch User Guide. For jobs that are running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value. value = 512 VCPU = 0.25 value = 1024 VCPU = 0.25 or 0.5 value = 2048 VCPU = 0.25, 0.5, or 1 value = 3072 VCPU = 0.5, or 1 value = 4096 VCPU = 0.5, 1, or 2 value = 5120, 6144, or 7168 VCPU = 1 or 2 value = 8192 VCPU = 1, 2, 4, or 8 value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360 VCPU = 2 or 4 value = 16384 VCPU = 2, 4, or 8 value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720 VCPU = 4 value = 20480, 24576, or 28672 VCPU = 4 or 8 value = 36864, 45056, 53248, or 61440 VCPU = 8 value = 32768, 40960, 49152, or 57344 VCPU = 8 or 16 value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880 VCPU = 16 type="VCPU" The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once. The default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about Fargate quotas, see Fargate quotas in the Amazon Web Services General Reference. For jobs that are running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16 value = 0.25 MEMORY = 512, 1024, or 2048 value = 0.5 MEMORY = 1024, 2048, 3072, or 4096 value = 1 MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192 value = 2 MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384 value = 4 MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720 value = 8 MEMORY = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440 value = 16 MEMORY = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880 + /// The quantity of the specified resource to reserve for the container. The values vary based on the type specified. type="GPU" The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched on. GPUs aren't available for jobs that are running on Fargate resources. type="MEMORY" The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory management in the Batch User Guide. For jobs that are running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value. value = 512 VCPU = 0.25 value = 1024 VCPU = 0.25 or 0.5 value = 2048 VCPU = 0.25, 0.5, or 1 value = 3072 VCPU = 0.5, or 1 value = 4096 VCPU = 0.5, 1, or 2 value = 5120, 6144, or 7168 VCPU = 1 or 2 value = 8192 VCPU = 1, 2, 4, or 8 value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360 VCPU = 2 or 4 value = 16384 VCPU = 2, 4, or 8 value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720 VCPU = 4 value = 20480, 24576, or 28672 VCPU = 4 or 8 value = 36864, 45056, 53248, or 61440 VCPU = 8 value = 32768, 40960, 49152, or 57344 VCPU = 8 or 16 value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880 VCPU = 16 type="VCPU" The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once. The default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about Fargate quotas, see Fargate quotas in the Amazon Web Services General Reference. For jobs that are running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16 value = 0.25 MEMORY = 512, 1024, or 2048 value = 0.5 MEMORY = 1024, 2048, 3072, or 4096 value = 1 MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192 value = 2 MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384 value = 4 MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720 value = 8 MEMORY = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440 value = 16 MEMORY = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880 public let value: String public init(type: BatchResourceRequirementType, value: String) { @@ -434,7 +462,7 @@ extension Pipes { } public struct BatchRetryStrategy: AWSEncodableShape & AWSDecodableShape { - /// The number of times to move a job to the RUNNABLE status. If the value of attempts is greater than one, the job is retried on failure the same number of attempts as the value. + /// The number of times to move a job to the RUNNABLE status. If the value of attempts is greater than one, the job is retried on failure the same number of attempts as the value. public let attempts: Int? public init(attempts: Int? = nil) { @@ -824,18 +852,46 @@ extension Pipes { } } + public struct DimensionMapping: AWSEncodableShape & AWSDecodableShape { + /// The metadata attributes of the time series. For example, the name and Availability Zone of an Amazon EC2 instance or the name of the manufacturer of a wind turbine are dimensions. + public let dimensionName: String + /// Dynamic path to the dimension value in the source event. + public let dimensionValue: String + /// The data type of the dimension for the time-series data. + public let dimensionValueType: DimensionValueType + + public init(dimensionName: String, dimensionValue: String, dimensionValueType: DimensionValueType) { + self.dimensionName = dimensionName + self.dimensionValue = dimensionValue + self.dimensionValueType = dimensionValueType + } + + public func validate(name: String) throws { + try self.validate(self.dimensionName, name: "dimensionName", parent: name, max: 256) + try self.validate(self.dimensionName, name: "dimensionName", parent: name, min: 1) + try self.validate(self.dimensionValue, name: "dimensionValue", parent: name, max: 2048) + try self.validate(self.dimensionValue, name: "dimensionValue", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case dimensionName = "DimensionName" + case dimensionValue = "DimensionValue" + case dimensionValueType = "DimensionValueType" + } + } + public struct EcsContainerOverride: AWSEncodableShape & AWSDecodableShape { /// The command to send to the container that overrides the default command from the Docker image or the task definition. You must also specify a container name. public let command: [String]? /// The number of cpu units reserved for the container, instead of the default value from the task definition. You must also specify a container name. public let cpu: Int? - /// The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. You must also specify a container name. + /// The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. You must also specify a container name. public let environment: [EcsEnvironmentVariable]? /// A list of files containing the environment variables to pass to a container, instead of the value from the container definition. public let environmentFiles: [EcsEnvironmentFile]? - /// The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name. + /// The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name. public let memory: Int? - /// The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. You must also specify a container name. + /// The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. You must also specify a container name. public let memoryReservation: Int? /// The name of the container that receives the override. This parameter is required if any override is specified. public let name: String? @@ -1036,7 +1092,7 @@ extension Pipes { } public struct FirehoseLogDestination: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records. + /// The Amazon Resource Name (ARN) of the Firehose delivery stream to which EventBridge delivers the pipe log records. public let deliveryStreamArn: String? public init(deliveryStreamArn: String? = nil) { @@ -1049,7 +1105,7 @@ extension Pipes { } public struct FirehoseLogDestinationParameters: AWSEncodableShape { - /// Specifies the Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records. + /// Specifies the Amazon Resource Name (ARN) of the Firehose delivery stream to which EventBridge delivers the pipe log records. public let deliveryStreamArn: String public init(deliveryStreamArn: String) { @@ -1074,7 +1130,7 @@ extension Pipes { public let desiredState: RequestedPipeState? /// The maximum number of pipes to include in the response. public let limit: Int? - /// A value that will return a subset of the pipes associated with this account. For example, "NamePrefix": "ABC" will return all endpoints with "ABC" in the name. + /// A value that will return a subset of the pipes associated with this account. For example, "NamePrefix": "ABC" will return all endpoints with "ABC" in the name. public let namePrefix: String? /// If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. public let nextToken: String? @@ -1175,6 +1231,61 @@ extension Pipes { } } + public struct MultiMeasureAttributeMapping: AWSEncodableShape & AWSDecodableShape { + /// Dynamic path to the measurement attribute in the source event. + public let measureValue: String + /// Data type of the measurement attribute in the source event. + public let measureValueType: MeasureValueType + /// Target measure name to be used. + public let multiMeasureAttributeName: String + + public init(measureValue: String, measureValueType: MeasureValueType, multiMeasureAttributeName: String) { + self.measureValue = measureValue + self.measureValueType = measureValueType + self.multiMeasureAttributeName = multiMeasureAttributeName + } + + public func validate(name: String) throws { + try self.validate(self.measureValue, name: "measureValue", parent: name, max: 2048) + try self.validate(self.measureValue, name: "measureValue", parent: name, min: 1) + try self.validate(self.multiMeasureAttributeName, name: "multiMeasureAttributeName", parent: name, max: 256) + try self.validate(self.multiMeasureAttributeName, name: "multiMeasureAttributeName", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case measureValue = "MeasureValue" + case measureValueType = "MeasureValueType" + case multiMeasureAttributeName = "MultiMeasureAttributeName" + } + } + + public struct MultiMeasureMapping: AWSEncodableShape & AWSDecodableShape { + /// Mappings that represent multiple source event fields mapped to measures in the same Timestream for LiveAnalytics record. + public let multiMeasureAttributeMappings: [MultiMeasureAttributeMapping] + /// The name of the multiple measurements per record (multi-measure). + public let multiMeasureName: String + + public init(multiMeasureAttributeMappings: [MultiMeasureAttributeMapping], multiMeasureName: String) { + self.multiMeasureAttributeMappings = multiMeasureAttributeMappings + self.multiMeasureName = multiMeasureName + } + + public func validate(name: String) throws { + try self.multiMeasureAttributeMappings.forEach { + try $0.validate(name: "\(name).multiMeasureAttributeMappings[]") + } + try self.validate(self.multiMeasureAttributeMappings, name: "multiMeasureAttributeMappings", parent: name, max: 256) + try self.validate(self.multiMeasureAttributeMappings, name: "multiMeasureAttributeMappings", parent: name, min: 1) + try self.validate(self.multiMeasureName, name: "multiMeasureName", parent: name, max: 256) + try self.validate(self.multiMeasureName, name: "multiMeasureName", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case multiMeasureAttributeMappings = "MultiMeasureAttributeMappings" + case multiMeasureName = "MultiMeasureName" + } + } + public struct NetworkConfiguration: AWSEncodableShape & AWSDecodableShape { /// Use this structure to specify the VPC subnets and security groups for the task, and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode. public let awsvpcConfiguration: AwsVpcConfiguration? @@ -1246,7 +1357,7 @@ extension Pipes { public let headerParameters: [String: String]? /// The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*"). public let pathParameterValues: [String]? - /// The query string keys/values that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. + /// The query string keys/values that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. public let queryStringParameters: [String: String]? public init(headerParameters: [String: String]? = nil, pathParameterValues: [String]? = nil, queryStringParameters: [String: String]? = nil) { @@ -1305,9 +1416,9 @@ extension Pipes { public struct PipeLogConfiguration: AWSDecodableShape { /// The Amazon CloudWatch Logs logging configuration settings for the pipe. public let cloudwatchLogsLogDestination: CloudwatchLogsLogDestination? - /// The Amazon Kinesis Data Firehose logging configuration settings for the pipe. + /// The Amazon Data Firehose logging configuration settings for the pipe. public let firehoseLogDestination: FirehoseLogDestination? - /// Whether the execution data (specifically, the payload, awsRequest, and awsResponse fields) is included in the log messages for this pipe. This applies to all log destinations for the pipe. For more information, see Including execution data in logs in the Amazon EventBridge User Guide. + /// Whether the execution data (specifically, the payload, awsRequest, and awsResponse fields) is included in the log messages for this pipe. This applies to all log destinations for the pipe. For more information, see Including execution data in logs in the Amazon EventBridge User Guide. public let includeExecutionData: [IncludeExecutionDataOption]? /// The level of logging detail to include. This applies to all log destinations for the pipe. public let level: LogLevel? @@ -1334,9 +1445,9 @@ extension Pipes { public struct PipeLogConfigurationParameters: AWSEncodableShape { /// The Amazon CloudWatch Logs logging configuration settings for the pipe. public let cloudwatchLogsLogDestination: CloudwatchLogsLogDestinationParameters? - /// The Amazon Kinesis Data Firehose logging configuration settings for the pipe. + /// The Amazon Data Firehose logging configuration settings for the pipe. public let firehoseLogDestination: FirehoseLogDestinationParameters? - /// Specify ON to include the execution data (specifically, the payload and awsRequest fields) in the log messages for this pipe. This applies to all log destinations for the pipe. For more information, see Including execution data in logs in the Amazon EventBridge User Guide. The default is OFF. + /// Specify ALL to include the execution data (specifically, the payload, awsRequest, and awsResponse fields) in the log messages for this pipe. This applies to all log destinations for the pipe. For more information, see Including execution data in logs in the Amazon EventBridge User Guide. By default, execution data is not included. public let includeExecutionData: [IncludeExecutionDataOption]? /// The level of logging detail to include. This applies to all log destinations for the pipe. For more information, see Specifying EventBridge Pipes log level in the Amazon EventBridge User Guide. public let level: LogLevel @@ -1581,7 +1692,7 @@ extension Pipes { public let managedStreamingKafkaParameters: PipeSourceManagedStreamingKafkaParameters? /// The parameters for using a Rabbit MQ broker as a source. public let rabbitMQBrokerParameters: PipeSourceRabbitMQBrokerParameters? - /// The parameters for using a self-managed Apache Kafka stream as a source. + /// The parameters for using a self-managed Apache Kafka stream as a source. A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services. This includes both clusters you manage yourself, as well as those hosted by a third-party provider, such as Confluent Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide. public let selfManagedKafkaParameters: PipeSourceSelfManagedKafkaParameters? /// The parameters for using a Amazon SQS stream as a source. public let sqsQueueParameters: PipeSourceSqsQueueParameters? @@ -1761,13 +1872,13 @@ extension Pipes { public let arrayProperties: BatchArrayProperties? /// The overrides that are sent to a container. public let containerOverrides: BatchContainerOverrides? - /// A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a SEQUENTIAL type dependency without specifying a job ID for array jobs so that each child array job completes sequentially, starting at index 0. You can also specify an N_TO_N type dependency with a job ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of each dependency to complete before it can begin. + /// A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a SEQUENTIAL type dependency without specifying a job ID for array jobs so that each child array job completes sequentially, starting at index 0. You can also specify an N_TO_N type dependency with a job ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of each dependency to complete before it can begin. public let dependsOn: [BatchJobDependency]? - /// The job definition used by this job. This value can be one of name, name:revision, or the Amazon Resource Name (ARN) for the job definition. If name is specified without a revision then the latest active revision is used. + /// The job definition used by this job. This value can be one of name, name:revision, or the Amazon Resource Name (ARN) for the job definition. If name is specified without a revision then the latest active revision is used. public let jobDefinition: String - /// The name of the job. It can be up to 128 letters long. The first character must be alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). + /// The name of the job. It can be up to 128 letters long. The first character must be alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). public let jobName: String - /// Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job definition. Parameters are specified as a key and value pair mapping. Parameters included here override any corresponding parameter defaults from the job definition. + /// Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job definition. Parameters are specified as a key and value pair mapping. Parameters included here override any corresponding parameter defaults from the job definition. public let parameters: [String: String]? /// The retry strategy to use for failed jobs. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition. public let retryStrategy: BatchRetryStrategy? @@ -1972,7 +2083,7 @@ extension Pipes { public let headerParameters: [String: String]? /// The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*"). public let pathParameterValues: [String]? - /// The query string keys/values that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. + /// The query string keys/values that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. public let queryStringParameters: [String: String]? public init(headerParameters: [String: String]? = nil, pathParameterValues: [String]? = nil, queryStringParameters: [String: String]? = nil) { @@ -2007,7 +2118,7 @@ extension Pipes { } public struct PipeTargetKinesisStreamParameters: AWSEncodableShape & AWSDecodableShape { - /// Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. + /// Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. public let partitionKey: String public init(partitionKey: String) { @@ -2061,8 +2172,10 @@ extension Pipes { public let sqsQueueParameters: PipeTargetSqsQueueParameters? /// The parameters for using a Step Functions state machine as a target. public let stepFunctionStateMachineParameters: PipeTargetStateMachineParameters? + /// The parameters for using a Timestream for LiveAnalytics table as a target. + public let timestreamParameters: PipeTargetTimestreamParameters? - public init(batchJobParameters: PipeTargetBatchJobParameters? = nil, cloudWatchLogsParameters: PipeTargetCloudWatchLogsParameters? = nil, ecsTaskParameters: PipeTargetEcsTaskParameters? = nil, eventBridgeEventBusParameters: PipeTargetEventBridgeEventBusParameters? = nil, httpParameters: PipeTargetHttpParameters? = nil, inputTemplate: String? = nil, kinesisStreamParameters: PipeTargetKinesisStreamParameters? = nil, lambdaFunctionParameters: PipeTargetLambdaFunctionParameters? = nil, redshiftDataParameters: PipeTargetRedshiftDataParameters? = nil, sageMakerPipelineParameters: PipeTargetSageMakerPipelineParameters? = nil, sqsQueueParameters: PipeTargetSqsQueueParameters? = nil, stepFunctionStateMachineParameters: PipeTargetStateMachineParameters? = nil) { + public init(batchJobParameters: PipeTargetBatchJobParameters? = nil, cloudWatchLogsParameters: PipeTargetCloudWatchLogsParameters? = nil, ecsTaskParameters: PipeTargetEcsTaskParameters? = nil, eventBridgeEventBusParameters: PipeTargetEventBridgeEventBusParameters? = nil, httpParameters: PipeTargetHttpParameters? = nil, inputTemplate: String? = nil, kinesisStreamParameters: PipeTargetKinesisStreamParameters? = nil, lambdaFunctionParameters: PipeTargetLambdaFunctionParameters? = nil, redshiftDataParameters: PipeTargetRedshiftDataParameters? = nil, sageMakerPipelineParameters: PipeTargetSageMakerPipelineParameters? = nil, sqsQueueParameters: PipeTargetSqsQueueParameters? = nil, stepFunctionStateMachineParameters: PipeTargetStateMachineParameters? = nil, timestreamParameters: PipeTargetTimestreamParameters? = nil) { self.batchJobParameters = batchJobParameters self.cloudWatchLogsParameters = cloudWatchLogsParameters self.ecsTaskParameters = ecsTaskParameters @@ -2075,6 +2188,7 @@ extension Pipes { self.sageMakerPipelineParameters = sageMakerPipelineParameters self.sqsQueueParameters = sqsQueueParameters self.stepFunctionStateMachineParameters = stepFunctionStateMachineParameters + self.timestreamParameters = timestreamParameters } public func validate(name: String) throws { @@ -2088,6 +2202,7 @@ extension Pipes { try self.redshiftDataParameters?.validate(name: "\(name).redshiftDataParameters") try self.sageMakerPipelineParameters?.validate(name: "\(name).sageMakerPipelineParameters") try self.sqsQueueParameters?.validate(name: "\(name).sqsQueueParameters") + try self.timestreamParameters?.validate(name: "\(name).timestreamParameters") } private enum CodingKeys: String, CodingKey { @@ -2103,6 +2218,7 @@ extension Pipes { case sageMakerPipelineParameters = "SageMakerPipelineParameters" case sqsQueueParameters = "SqsQueueParameters" case stepFunctionStateMachineParameters = "StepFunctionStateMachineParameters" + case timestreamParameters = "TimestreamParameters" } } @@ -2212,6 +2328,69 @@ extension Pipes { } } + public struct PipeTargetTimestreamParameters: AWSEncodableShape & AWSDecodableShape { + /// Map source data to dimensions in the target Timestream for LiveAnalytics table. For more information, see Amazon Timestream for LiveAnalytics concepts + public let dimensionMappings: [DimensionMapping] + /// The granularity of the time units used. Default is MILLISECONDS. Required if TimeFieldType is specified as EPOCH. + public let epochTimeUnit: EpochTimeUnit? + /// Maps multiple measures from the source event to the same record in the specified Timestream for LiveAnalytics table. + public let multiMeasureMappings: [MultiMeasureMapping]? + /// Mappings of single source data fields to individual records in the specified Timestream for LiveAnalytics table. + public let singleMeasureMappings: [SingleMeasureMapping]? + /// The type of time value used. The default is EPOCH. + public let timeFieldType: TimeFieldType? + /// How to format the timestamps. For example, YYYY-MM-DDThh:mm:ss.sssTZD. Required if TimeFieldType is specified as TIMESTAMP_FORMAT. + public let timestampFormat: String? + /// Dynamic path to the source data field that represents the time value for your data. + public let timeValue: String + /// 64 bit version value or source data field that represents the version value for your data. Write requests with a higher version number will update the existing measure values of the record and version. In cases where the measure value is the same, the version will still be updated. Default value is 1. Timestream for LiveAnalytics does not support updating partial measure values in a record. Write requests for duplicate data with a higher version number will update the existing measure value and version. In cases where the measure value is the same, Version will still be updated. Default value is 1. Version must be 1 or greater, or you will receive a ValidationException error. + public let versionValue: String + + public init(dimensionMappings: [DimensionMapping], epochTimeUnit: EpochTimeUnit? = nil, multiMeasureMappings: [MultiMeasureMapping]? = nil, singleMeasureMappings: [SingleMeasureMapping]? = nil, timeFieldType: TimeFieldType? = nil, timestampFormat: String? = nil, timeValue: String, versionValue: String) { + self.dimensionMappings = dimensionMappings + self.epochTimeUnit = epochTimeUnit + self.multiMeasureMappings = multiMeasureMappings + self.singleMeasureMappings = singleMeasureMappings + self.timeFieldType = timeFieldType + self.timestampFormat = timestampFormat + self.timeValue = timeValue + self.versionValue = versionValue + } + + public func validate(name: String) throws { + try self.dimensionMappings.forEach { + try $0.validate(name: "\(name).dimensionMappings[]") + } + try self.validate(self.dimensionMappings, name: "dimensionMappings", parent: name, max: 128) + try self.validate(self.dimensionMappings, name: "dimensionMappings", parent: name, min: 1) + try self.multiMeasureMappings?.forEach { + try $0.validate(name: "\(name).multiMeasureMappings[]") + } + try self.validate(self.multiMeasureMappings, name: "multiMeasureMappings", parent: name, max: 1024) + try self.singleMeasureMappings?.forEach { + try $0.validate(name: "\(name).singleMeasureMappings[]") + } + try self.validate(self.singleMeasureMappings, name: "singleMeasureMappings", parent: name, max: 8192) + try self.validate(self.timestampFormat, name: "timestampFormat", parent: name, max: 256) + try self.validate(self.timestampFormat, name: "timestampFormat", parent: name, min: 1) + try self.validate(self.timeValue, name: "timeValue", parent: name, max: 256) + try self.validate(self.timeValue, name: "timeValue", parent: name, min: 1) + try self.validate(self.versionValue, name: "versionValue", parent: name, max: 256) + try self.validate(self.versionValue, name: "versionValue", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case dimensionMappings = "DimensionMappings" + case epochTimeUnit = "EpochTimeUnit" + case multiMeasureMappings = "MultiMeasureMappings" + case singleMeasureMappings = "SingleMeasureMappings" + case timeFieldType = "TimeFieldType" + case timestampFormat = "TimestampFormat" + case timeValue = "TimeValue" + case versionValue = "VersionValue" + } + } + public struct PlacementConstraint: AWSEncodableShape & AWSDecodableShape { /// A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. To learn more, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide. public let expression: String? @@ -2286,7 +2465,7 @@ extension Pipes { public let bucketOwner: String /// How EventBridge should format the log records. json: JSON plain: Plain text w3c: W3C extended logging file format public let outputFormat: S3OutputFormat? - /// Specifies any prefix text with which to begin Amazon S3 log object names. You can use prefixes to organize the data that you store in Amazon S3 buckets. A prefix is a string of characters at the beginning of the object key name. A prefix can be any length, subject to the maximum length of the object key name (1,024 bytes). For more information, see Organizing objects using prefixes in the Amazon Simple Storage Service User Guide. + /// Specifies any prefix text with which to begin Amazon S3 log object names. You can use prefixes to organize the data that you store in Amazon S3 buckets. A prefix is a string of characters at the beginning of the object key name. A prefix can be any length, subject to the maximum length of the object key name (1,024 bytes). For more information, see Organizing objects using prefixes in the Amazon Simple Storage Service User Guide. public let prefix: String? public init(bucketName: String, bucketOwner: String, outputFormat: S3OutputFormat? = nil, prefix: String? = nil) { @@ -2329,7 +2508,7 @@ extension Pipes { } public struct SelfManagedKafkaAccessConfigurationVpc: AWSEncodableShape & AWSDecodableShape { - /// Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used. + /// Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used. public let securityGroup: [String]? /// Specifies the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets. public let subnets: [String]? @@ -2360,6 +2539,34 @@ extension Pipes { } } + public struct SingleMeasureMapping: AWSEncodableShape & AWSDecodableShape { + /// Target measure name for the measurement attribute in the Timestream table. + public let measureName: String + /// Dynamic path of the source field to map to the measure in the record. + public let measureValue: String + /// Data type of the source field. + public let measureValueType: MeasureValueType + + public init(measureName: String, measureValue: String, measureValueType: MeasureValueType) { + self.measureName = measureName + self.measureValue = measureValue + self.measureValueType = measureValueType + } + + public func validate(name: String) throws { + try self.validate(self.measureName, name: "measureName", parent: name, max: 1024) + try self.validate(self.measureName, name: "measureName", parent: name, min: 1) + try self.validate(self.measureValue, name: "measureValue", parent: name, max: 2048) + try self.validate(self.measureValue, name: "measureValue", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case measureName = "MeasureName" + case measureValue = "MeasureValue" + case measureValueType = "MeasureValueType" + } + } + public struct StartPipeRequest: AWSEncodableShape { /// The name of the pipe. public let name: String @@ -2866,7 +3073,7 @@ extension Pipes { public let managedStreamingKafkaParameters: UpdatePipeSourceManagedStreamingKafkaParameters? /// The parameters for using a Rabbit MQ broker as a source. public let rabbitMQBrokerParameters: UpdatePipeSourceRabbitMQBrokerParameters? - /// The parameters for using a self-managed Apache Kafka stream as a source. + /// The parameters for using a self-managed Apache Kafka stream as a source. A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services. This includes both clusters you manage yourself, as well as those hosted by a third-party provider, such as Confluent Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide. public let selfManagedKafkaParameters: UpdatePipeSourceSelfManagedKafkaParameters? /// The parameters for using a Amazon SQS stream as a source. public let sqsQueueParameters: UpdatePipeSourceSqsQueueParameters? diff --git a/Sources/Soto/Services/Polly/Polly_shapes.swift b/Sources/Soto/Services/Polly/Polly_shapes.swift index 6f485ee689..8795161c2c 100644 --- a/Sources/Soto/Services/Polly/Polly_shapes.swift +++ b/Sources/Soto/Services/Polly/Polly_shapes.swift @@ -27,6 +27,7 @@ extension Polly { // MARK: Enums public enum Engine: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case generative = "generative" case longForm = "long-form" case neural = "neural" case standard = "standard" @@ -240,7 +241,7 @@ extension Polly { } public struct DescribeVoicesInput: AWSEncodableShape { - /// Specifies the engine (standard, neural or long-form) used by Amazon Polly when processing input text for speech synthesis. + /// Specifies the engine (standard, neural, long-form or generative) used by Amazon Polly when processing input text for speech synthesis. public let engine: Engine? /// Boolean value indicating whether to return any bilingual voices that use the specified language as an additional language. For instance, if you request all languages that use US English (es-US), and there is an Italian voice that speaks both Italian (it-IT) and US English, that voice will be included if you specify yes but not if you specify no. public let includeAdditionalLanguageCodes: Bool? @@ -546,7 +547,7 @@ extension Polly { } public struct StartSpeechSynthesisTaskInput: AWSEncodableShape { - /// Specifies the engine (standard, neural or long-form) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error. + /// Specifies the engine (standard, neural, long-form or generative) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error. public let engine: Engine? /// Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN). If a bilingual voice is used and no language code is specified, Amazon Polly uses the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi. public let languageCode: LanguageCode? @@ -558,7 +559,7 @@ extension Polly { public let outputS3BucketName: String /// The Amazon S3 key prefix for the output speech file. public let outputS3KeyPrefix: String? - /// The audio frequency specified in Hz. The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000". The default value for long-form voices is "24000". Valid values for pcm are "8000" and "16000" The default value is "16000". + /// The audio frequency specified in Hz. The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000". The default value for long-form voices is "24000". The default value for generative voices is "24000". Valid values for pcm are "8000" and "16000" The default value is "16000". public let sampleRate: String? /// ARN for the SNS topic optionally used for providing status notification for a speech synthesis task. public let snsTopicArn: String? @@ -593,7 +594,7 @@ extension Polly { try self.validate(self.lexiconNames, name: "lexiconNames", parent: name, max: 5) try self.validate(self.outputS3BucketName, name: "outputS3BucketName", parent: name, pattern: "^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$") try self.validate(self.outputS3KeyPrefix, name: "outputS3KeyPrefix", parent: name, pattern: "^[0-9a-zA-Z\\/\\!\\-_\\.\\*\\'\\(\\):;\\$@=+\\,\\?&]{0,800}$") - try self.validate(self.snsTopicArn, name: "snsTopicArn", parent: name, pattern: "^arn:aws(-(cn|iso(-b)?|us-gov))?:sns:[a-z0-9_-]{1,50}:\\d{12}:[a-zA-Z0-9_-]{1,256}$") + try self.validate(self.snsTopicArn, name: "snsTopicArn", parent: name, pattern: "^arn:aws(-(cn|iso(-b)?|us-gov))?:sns:[a-z0-9_-]{1,50}:\\d{12}:[a-zA-Z0-9_-]{1,251}([a-zA-Z0-9_-]{0,5}|\\.fifo)$") try self.validate(self.speechMarkTypes, name: "speechMarkTypes", parent: name, max: 4) } @@ -629,7 +630,7 @@ extension Polly { public struct SynthesisTask: AWSDecodableShape { /// Timestamp for the time the synthesis task was started. public let creationTime: Date? - /// Specifies the engine (standard, neural or long-form) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error. + /// Specifies the engine (standard, neural, long-form or generative) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error. public let engine: Engine? /// Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN). If a bilingual voice is used and no language code is specified, Amazon Polly uses the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi. public let languageCode: LanguageCode? @@ -641,7 +642,7 @@ extension Polly { public let outputUri: String? /// Number of billable characters synthesized. public let requestCharacters: Int? - /// The audio frequency specified in Hz. The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000". The default value for long-form voices is "24000". Valid values for pcm are "8000" and "16000" The default value is "16000". + /// The audio frequency specified in Hz. The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000". The default value for long-form voices is "24000". The default value for generative voices is "24000". Valid values for pcm are "8000" and "16000" The default value is "16000". public let sampleRate: String? /// ARN for the SNS topic optionally used for providing status notification for a speech synthesis task. public let snsTopicArn: String? @@ -696,7 +697,7 @@ extension Polly { } public struct SynthesizeSpeechInput: AWSEncodableShape { - /// Specifies the engine (standard, neural or long-form) for Amazon Polly to use when processing input text for speech synthesis. For information on Amazon Polly voices and which voices are available for each engine, see Available Voices. NTTS-only voices When using NTTS-only voices such as Kevin (en-US), this parameter is required and must be set to neural. If the engine is not specified, or is set to standard, this will result in an error. long-form-only voices When using long-form-only voices such as Danielle (en-US), this parameter is required and must be set to long-form. If the engine is not specified, or is set to standard or neural, this will result in an error. Type: String Valid Values: standard | neural | long-form Required: Yes Standard voices For standard voices, this is not required; the engine parameter defaults to standard. If the engine is not specified, or is set to standard and an NTTS-only voice is selected, this will result in an error. + /// Specifies the engine (standard, neural, long-form, or generative) for Amazon Polly to use when processing input text for speech synthesis. Provide an engine that is supported by the voice you select. If you don't provide an engine, the standard engine is selected by default. If a chosen voice isn't supported by the standard engine, this will result in an error. For information on Amazon Polly voices and which voices are available for each engine, see Available Voices. Type: String Valid Values: standard | neural | long-form | generative Required: Yes public let engine: Engine? /// Optional language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN). If a bilingual voice is used and no language code is specified, Amazon Polly uses the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi. public let languageCode: LanguageCode? @@ -704,7 +705,7 @@ extension Polly { public let lexiconNames: [String]? /// The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json. When pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format. public let outputFormat: OutputFormat - /// The audio frequency specified in Hz. The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000". The default value for long-form voices is "24000". Valid values for pcm are "8000" and "16000" The default value is "16000". + /// The audio frequency specified in Hz. The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000". The default value for long-form voices is "24000". The default value for generative voices is "24000". Valid values for pcm are "8000" and "16000" The default value is "16000". public let sampleRate: String? /// The type of speech marks returned for the input text. public let speechMarkTypes: [SpeechMarkType]? @@ -787,7 +788,7 @@ extension Polly { public let languageName: String? /// Name of the voice (for example, Salli, Kendra, etc.). This provides a human readable voice name that you might display in your application. public let name: String? - /// Specifies which engines (standard, neural or long-form) are supported by a given voice. + /// Specifies which engines (standard, neural, long-form or generative) are supported by a given voice. public let supportedEngines: [Engine]? public init(additionalLanguageCodes: [LanguageCode]? = nil, gender: Gender? = nil, id: VoiceId? = nil, languageCode: LanguageCode? = nil, languageName: String? = nil, name: String? = nil, supportedEngines: [Engine]? = nil) { diff --git a/Sources/Soto/Services/QBusiness/QBusiness_shapes.swift b/Sources/Soto/Services/QBusiness/QBusiness_shapes.swift index 2e6b7b7768..0a37f5eb8a 100644 --- a/Sources/Soto/Services/QBusiness/QBusiness_shapes.swift +++ b/Sources/Soto/Services/QBusiness/QBusiness_shapes.swift @@ -268,6 +268,12 @@ extension QBusiness { public var description: String { return self.rawValue } } + public enum QAppsControlMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum ReadAccessType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case allow = "ALLOW" case deny = "DENY" @@ -1224,7 +1230,7 @@ extension QBusiness { public let andAllFilters: [AttributeFilter]? /// Returns true when a document contains all the specified document attributes or metadata fields. Supported for the following document attribute value types: stringListValue. public let containsAll: DocumentAttribute? - /// Returns true when a document contains any of the specified document attributes or metadata fields. Supported for the following document attribute value types: dateValue, longValue, stringListValue and stringValue. + /// Returns true when a document contains any of the specified document attributes or metadata fields. Supported for the following document attribute value types: stringListValue. public let containsAny: DocumentAttribute? /// Performs an equals operation on two document attributes or metadata fields. Supported for the following document attribute value types: dateValue, longValue, stringListValue and stringValue. public let equalsTo: DocumentAttribute? @@ -1659,7 +1665,7 @@ extension QBusiness { public let clientToken: String? /// The identifier of the Amazon Q Business conversation. public let conversationId: String? - /// The identifier of the previous end user text input message in a conversation. + /// The identifier of the previous system message in a conversation. public let parentMessageId: String? /// The groups that a user associated with the chat input belongs to. public let userGroups: [String]? @@ -1884,18 +1890,21 @@ extension QBusiness { public let encryptionConfiguration: EncryptionConfiguration? /// The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for—or connecting to—your Amazon Q Business application. public let identityCenterInstanceArn: String? + /// An option to allow end users to create and use Amazon Q Apps in the web experience. + public let qAppsConfiguration: QAppsConfiguration? /// The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics. public let roleArn: String? /// A list of key-value pairs that identify or categorize your Amazon Q Business application. You can also use tags to help control access to the application. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @. public let tags: [Tag]? - public init(attachmentsConfiguration: AttachmentsConfiguration? = nil, clientToken: String? = CreateApplicationRequest.idempotencyToken(), description: String? = nil, displayName: String, encryptionConfiguration: EncryptionConfiguration? = nil, identityCenterInstanceArn: String? = nil, roleArn: String? = nil, tags: [Tag]? = nil) { + public init(attachmentsConfiguration: AttachmentsConfiguration? = nil, clientToken: String? = CreateApplicationRequest.idempotencyToken(), description: String? = nil, displayName: String, encryptionConfiguration: EncryptionConfiguration? = nil, identityCenterInstanceArn: String? = nil, qAppsConfiguration: QAppsConfiguration? = nil, roleArn: String? = nil, tags: [Tag]? = nil) { self.attachmentsConfiguration = attachmentsConfiguration self.clientToken = clientToken self.description = description self.displayName = displayName self.encryptionConfiguration = encryptionConfiguration self.identityCenterInstanceArn = identityCenterInstanceArn + self.qAppsConfiguration = qAppsConfiguration self.roleArn = roleArn self.tags = tags } @@ -1927,6 +1936,7 @@ extension QBusiness { case displayName = "displayName" case encryptionConfiguration = "encryptionConfiguration" case identityCenterInstanceArn = "identityCenterInstanceArn" + case qAppsConfiguration = "qAppsConfiguration" case roleArn = "roleArn" case tags = "tags" } @@ -2071,7 +2081,7 @@ extension QBusiness { public let displayName: String /// A list of key-value pairs that identify or categorize the index. You can also use tags to help control access to the index. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @. public let tags: [Tag]? - /// The index type that's suitable for your needs. For more information on what's included in each type of index or index tier, see Amazon Q Business tiers. + /// The index type that's suitable for your needs. For more information on what's included in each type of index, see Amazon Q Business tiers. public let type: IndexType? public init(applicationId: String, capacityConfiguration: IndexCapacityConfiguration? = nil, clientToken: String? = CreateIndexRequest.idempotencyToken(), description: String? = nil, displayName: String, tags: [Tag]? = nil, type: IndexType? = nil) { @@ -3388,6 +3398,8 @@ extension QBusiness { public let error: ErrorDetail? /// The Amazon Resource Name (ARN) of the AWS IAM Identity Center instance attached to your Amazon Q Business application. public let identityCenterApplicationArn: String? + /// Settings for whether end users can create and use Amazon Q Apps in the web experience. + public let qAppsConfiguration: QAppsConfiguration? /// The Amazon Resource Name (ARN) of the IAM with permissions to access your CloudWatch logs and metrics. public let roleArn: String? /// The status of the Amazon Q Business application. @@ -3395,7 +3407,7 @@ extension QBusiness { /// The Unix timestamp when the Amazon Q Business application was last updated. public let updatedAt: Date? - public init(applicationArn: String? = nil, applicationId: String? = nil, attachmentsConfiguration: AppliedAttachmentsConfiguration? = nil, createdAt: Date? = nil, description: String? = nil, displayName: String? = nil, encryptionConfiguration: EncryptionConfiguration? = nil, error: ErrorDetail? = nil, identityCenterApplicationArn: String? = nil, roleArn: String? = nil, status: ApplicationStatus? = nil, updatedAt: Date? = nil) { + public init(applicationArn: String? = nil, applicationId: String? = nil, attachmentsConfiguration: AppliedAttachmentsConfiguration? = nil, createdAt: Date? = nil, description: String? = nil, displayName: String? = nil, encryptionConfiguration: EncryptionConfiguration? = nil, error: ErrorDetail? = nil, identityCenterApplicationArn: String? = nil, qAppsConfiguration: QAppsConfiguration? = nil, roleArn: String? = nil, status: ApplicationStatus? = nil, updatedAt: Date? = nil) { self.applicationArn = applicationArn self.applicationId = applicationId self.attachmentsConfiguration = attachmentsConfiguration @@ -3405,6 +3417,7 @@ extension QBusiness { self.encryptionConfiguration = encryptionConfiguration self.error = error self.identityCenterApplicationArn = identityCenterApplicationArn + self.qAppsConfiguration = qAppsConfiguration self.roleArn = roleArn self.status = status self.updatedAt = updatedAt @@ -3420,6 +3433,7 @@ extension QBusiness { case encryptionConfiguration = "encryptionConfiguration" case error = "error" case identityCenterApplicationArn = "identityCenterApplicationArn" + case qAppsConfiguration = "qAppsConfiguration" case roleArn = "roleArn" case status = "status" case updatedAt = "updatedAt" @@ -5413,6 +5427,19 @@ extension QBusiness { public init() {} } + public struct QAppsConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Status information about whether end users can create and use Amazon Q Apps in the web experience. + public let qAppsControlMode: QAppsControlMode + + public init(qAppsControlMode: QAppsControlMode) { + self.qAppsControlMode = qAppsControlMode + } + + private enum CodingKeys: String, CodingKey { + case qAppsControlMode = "qAppsControlMode" + } + } + public struct Retriever: AWSDecodableShape { /// The identifier of the Amazon Q Business application using the retriever. public let applicationId: String? @@ -5939,15 +5966,18 @@ extension QBusiness { public let displayName: String? /// The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for—or connecting to—your Amazon Q Business application. public let identityCenterInstanceArn: String? + /// An option to allow end users to create and use Amazon Q Apps in the web experience. + public let qAppsConfiguration: QAppsConfiguration? /// An Amazon Web Services Identity and Access Management (IAM) role that gives Amazon Q Business permission to access Amazon CloudWatch logs and metrics. public let roleArn: String? - public init(applicationId: String, attachmentsConfiguration: AttachmentsConfiguration? = nil, description: String? = nil, displayName: String? = nil, identityCenterInstanceArn: String? = nil, roleArn: String? = nil) { + public init(applicationId: String, attachmentsConfiguration: AttachmentsConfiguration? = nil, description: String? = nil, displayName: String? = nil, identityCenterInstanceArn: String? = nil, qAppsConfiguration: QAppsConfiguration? = nil, roleArn: String? = nil) { self.applicationId = applicationId self.attachmentsConfiguration = attachmentsConfiguration self.description = description self.displayName = displayName self.identityCenterInstanceArn = identityCenterInstanceArn + self.qAppsConfiguration = qAppsConfiguration self.roleArn = roleArn } @@ -5959,6 +5989,7 @@ extension QBusiness { try container.encodeIfPresent(self.description, forKey: .description) try container.encodeIfPresent(self.displayName, forKey: .displayName) try container.encodeIfPresent(self.identityCenterInstanceArn, forKey: .identityCenterInstanceArn) + try container.encodeIfPresent(self.qAppsConfiguration, forKey: .qAppsConfiguration) try container.encodeIfPresent(self.roleArn, forKey: .roleArn) } @@ -5983,6 +6014,7 @@ extension QBusiness { case description = "description" case displayName = "displayName" case identityCenterInstanceArn = "identityCenterInstanceArn" + case qAppsConfiguration = "qAppsConfiguration" case roleArn = "roleArn" } } diff --git a/Sources/Soto/Services/QuickSight/QuickSight_api.swift b/Sources/Soto/Services/QuickSight/QuickSight_api.swift index 4a53860cb1..4f911740a8 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_api.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_api.swift @@ -1075,6 +1075,19 @@ public struct QuickSight: AWSService { ) } + /// Describes all customer managed key registrations in a Amazon QuickSight account. + @Sendable + public func describeKeyRegistration(_ input: DescribeKeyRegistrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeKeyRegistrationResponse { + return try await self.client.execute( + operation: "DescribeKeyRegistration", + path: "/accounts/{AwsAccountId}/key-registration", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Describes the current namespace. @Sendable public func describeNamespace(_ input: DescribeNamespaceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeNamespaceResponse { @@ -2163,6 +2176,19 @@ public struct QuickSight: AWSService { ) } + /// Updates a customer managed key in a Amazon QuickSight account. + @Sendable + public func updateKeyRegistration(_ input: UpdateKeyRegistrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateKeyRegistrationResponse { + return try await self.client.execute( + operation: "UpdateKeyRegistration", + path: "/accounts/{AwsAccountId}/key-registration", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Use the UpdatePublicSharingSettings operation to turn on or turn off the public sharing settings of an Amazon QuickSight dashboard. To use this operation, turn on session capacity pricing for your Amazon QuickSight account. Before you can turn on public sharing on your account, make sure to give public sharing permissions to an administrative user in the Identity and Access Management (IAM) console. For more information on using IAM with Amazon QuickSight, see Using Amazon QuickSight with IAM in the Amazon QuickSight User Guide. @Sendable public func updatePublicSharingSettings(_ input: UpdatePublicSharingSettingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdatePublicSharingSettingsResponse { diff --git a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift index 1a0a94f73d..6f3fe073ab 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift @@ -14966,6 +14966,58 @@ extension QuickSight { } } + public struct DescribeKeyRegistrationRequest: AWSEncodableShape { + /// The ID of the Amazon Web Services account that contains the customer managed key registration that you want to describe. + public let awsAccountId: String + /// Determines whether the request returns the default key only. + public let defaultKeyOnly: Bool? + + public init(awsAccountId: String, defaultKeyOnly: Bool? = nil) { + self.awsAccountId = awsAccountId + self.defaultKeyOnly = defaultKeyOnly + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.awsAccountId, key: "AwsAccountId") + request.encodeQuery(self.defaultKeyOnly, key: "default-key-only") + } + + public func validate(name: String) throws { + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, max: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, min: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^[0-9]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DescribeKeyRegistrationResponse: AWSDecodableShape { + /// The ID of the Amazon Web Services account that contains the customer managed key registration specified in the request. + public let awsAccountId: String? + /// A list of RegisteredCustomerManagedKey objects in a Amazon QuickSight account. + public let keyRegistration: [RegisteredCustomerManagedKey]? + /// The Amazon Web Services request ID for this operation. + public let requestId: String? + /// The HTTP status of the request. + public let status: Int? + + public init(awsAccountId: String? = nil, keyRegistration: [RegisteredCustomerManagedKey]? = nil, requestId: String? = nil, status: Int? = nil) { + self.awsAccountId = awsAccountId + self.keyRegistration = keyRegistration + self.requestId = requestId + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "AwsAccountId" + case keyRegistration = "KeyRegistration" + case requestId = "RequestId" + case status = "Status" + } + } + public struct DescribeNamespaceRequest: AWSEncodableShape { /// The ID for the Amazon Web Services account that contains the Amazon QuickSight namespace that you want to describe. public let awsAccountId: String @@ -16465,6 +16517,31 @@ extension QuickSight { } } + public struct FailedKeyRegistrationEntry: AWSDecodableShape { + /// The ARN of the KMS key that failed to update. + public let keyArn: String? + /// A message that provides information about why a FailedKeyRegistrationEntry error occurred. + public let message: String + /// A boolean that indicates whether a FailedKeyRegistrationEntry resulted from user error. If the value of this property is True, the error was caused by user error. If the value of this property is False, the error occurred on the backend. If your job continues fail and with a False SenderFault value, contact Amazon Web Services Support. + public let senderFault: Bool + /// The HTTP status of a FailedKeyRegistrationEntry error. + public let statusCode: Int + + public init(keyArn: String? = nil, message: String, senderFault: Bool, statusCode: Int) { + self.keyArn = keyArn + self.message = message + self.senderFault = senderFault + self.statusCode = statusCode + } + + private enum CodingKeys: String, CodingKey { + case keyArn = "KeyArn" + case message = "Message" + case senderFault = "SenderFault" + case statusCode = "StatusCode" + } + } + public struct FieldBasedTooltip: AWSEncodableShape & AWSDecodableShape { /// The visibility of Show aggregations. public let aggregationVisibility: Visibility? @@ -26474,11 +26551,11 @@ extension QuickSight { /// A list of groups whose permissions will be granted to Amazon QuickSight to access the cluster. These permissions are combined with the permissions granted to Amazon QuickSight by the DatabaseUser. If you choose to include this parameter, the RoleArn must grant access to redshift:JoinGroup. public let databaseGroups: [String]? /// The user whose permissions and group memberships will be used by Amazon QuickSight to access the cluster. If this user already exists in your database, Amazon QuickSight is granted the same permissions that the user has. If the user doesn't exist, set the value of AutoCreateDatabaseUser to True to create a new user with PUBLIC permissions. - public let databaseUser: String + public let databaseUser: String? /// Use the RoleArn structure to allow Amazon QuickSight to call redshift:GetClusterCredentials on your cluster. The calling principal must have iam:PassRole access to pass the role to Amazon QuickSight. The role's trust policy must allow the Amazon QuickSight service principal to assume the role. public let roleArn: String - public init(autoCreateDatabaseUser: Bool? = nil, databaseGroups: [String]? = nil, databaseUser: String, roleArn: String) { + public init(autoCreateDatabaseUser: Bool? = nil, databaseGroups: [String]? = nil, databaseUser: String? = nil, roleArn: String) { self.autoCreateDatabaseUser = autoCreateDatabaseUser self.databaseGroups = databaseGroups self.databaseUser = databaseUser @@ -26863,7 +26940,7 @@ extension QuickSight { /// The Amazon QuickSight role for the user. The user role can be one of the /// following: READER: A user who has read-only access to dashboards. AUTHOR: A user who can create data sources, datasets, analyses, and /// dashboards. ADMIN: A user who is an author, who can also manage Amazon QuickSight - /// settings. RESTRICTED_READER: This role isn't currently available for + /// settings. READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q in Amazon QuickSight, can build stories with Amazon Q, and can generate executive summaries from dashboards. AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards. ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing. RESTRICTED_READER: This role isn't currently available for /// use. RESTRICTED_AUTHOR: This role isn't currently available for /// use. public let userRole: UserRole @@ -26972,6 +27049,23 @@ extension QuickSight { } } + public struct RegisteredCustomerManagedKey: AWSEncodableShape & AWSDecodableShape { + /// Indicates whether a RegisteredCustomerManagedKey is set as the default key for encryption and decryption use. + public let defaultKey: Bool? + /// The ARN of the KMS key that is registered to a Amazon QuickSight account for encryption and decryption use. + public let keyArn: String? + + public init(defaultKey: Bool? = nil, keyArn: String? = nil) { + self.defaultKey = defaultKey + self.keyArn = keyArn + } + + private enum CodingKeys: String, CodingKey { + case defaultKey = "DefaultKey" + case keyArn = "KeyArn" + } + } + public struct RegisteredUserConsoleFeatureConfigurations: AWSEncodableShape { /// The state persistence configurations of an embedded Amazon QuickSight console. public let statePersistence: StatePersistenceConfigurations? @@ -30360,6 +30454,23 @@ extension QuickSight { } } + public struct SuccessfulKeyRegistrationEntry: AWSDecodableShape { + /// The ARN of the KMS key that is associated with the SuccessfulKeyRegistrationEntry entry. + public let keyArn: String + /// The HTTP status of a SuccessfulKeyRegistrationEntry entry. + public let statusCode: Int + + public init(keyArn: String, statusCode: Int) { + self.keyArn = keyArn + self.statusCode = statusCode + } + + private enum CodingKeys: String, CodingKey { + case keyArn = "KeyArn" + case statusCode = "StatusCode" + } + } + public struct TableAggregatedFieldWells: AWSEncodableShape & AWSDecodableShape { /// The group by field well for a pivot table. Values are grouped by group by fields. public let groupBy: [DimensionField]? @@ -35068,6 +35179,56 @@ extension QuickSight { } } + public struct UpdateKeyRegistrationRequest: AWSEncodableShape { + /// The ID of the Amazon Web Services account that contains the customer managed key registration that you want to update. + public let awsAccountId: String + /// A list of RegisteredCustomerManagedKey objects to be updated to the Amazon QuickSight account. + public let keyRegistration: [RegisteredCustomerManagedKey] + + public init(awsAccountId: String, keyRegistration: [RegisteredCustomerManagedKey]) { + self.awsAccountId = awsAccountId + self.keyRegistration = keyRegistration + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.awsAccountId, key: "AwsAccountId") + try container.encode(self.keyRegistration, forKey: .keyRegistration) + } + + public func validate(name: String) throws { + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, max: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, min: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^[0-9]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case keyRegistration = "KeyRegistration" + } + } + + public struct UpdateKeyRegistrationResponse: AWSDecodableShape { + /// A list of all customer managed key registrations that failed to update. + public let failedKeyRegistration: [FailedKeyRegistrationEntry]? + /// The Amazon Web Services request ID for this operation. + public let requestId: String? + /// A list of all customer managed key registrations that were successfully updated. + public let successfulKeyRegistration: [SuccessfulKeyRegistrationEntry]? + + public init(failedKeyRegistration: [FailedKeyRegistrationEntry]? = nil, requestId: String? = nil, successfulKeyRegistration: [SuccessfulKeyRegistrationEntry]? = nil) { + self.failedKeyRegistration = failedKeyRegistration + self.requestId = requestId + self.successfulKeyRegistration = successfulKeyRegistration + } + + private enum CodingKeys: String, CodingKey { + case failedKeyRegistration = "FailedKeyRegistration" + case requestId = "RequestId" + case successfulKeyRegistration = "SuccessfulKeyRegistration" + } + } + public struct UpdatePublicSharingSettingsRequest: AWSEncodableShape { /// The Amazon Web Services account ID associated with your Amazon QuickSight subscription. public let awsAccountId: String @@ -36097,7 +36258,7 @@ extension QuickSight { /// The Amazon QuickSight role of the user. The role can be one of the /// following default security cohorts: READER: A user who has read-only access to dashboards. AUTHOR: A user who can create data sources, datasets, analyses, and /// dashboards. ADMIN: A user who is an author, who can also manage Amazon QuickSight - /// settings. READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q Business, can build stories with Amazon Q, and can generate executive summaries from dashboards. AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards. ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing. The name of the Amazon QuickSight role is invisible to the user except for the console + /// settings. READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q in Amazon QuickSight, can build stories with Amazon Q, and can generate executive summaries from dashboards. AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards. ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing. The name of the Amazon QuickSight role is invisible to the user except for the console /// screens dealing with permissions. public let role: UserRole /// A flag that you use to indicate that you want to remove all custom permissions from this user. Using this parameter resets the user to the state it was in before a custom permissions profile was applied. This parameter defaults to NULL and it doesn't accept any other value. @@ -36363,7 +36524,7 @@ extension QuickSight { public let identityType: IdentityType? /// The principal ID of the user. public let principalId: String? - /// The Amazon QuickSight role for the user. The user role can be one of the following:. READER: A user who has read-only access to dashboards. AUTHOR: A user who can create data sources, datasets, analyses, and dashboards. ADMIN: A user who is an author, who can also manage Amazon Amazon QuickSight settings. READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q Business, can build stories with Amazon Q, and can generate executive summaries from dashboards. AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards. ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing. RESTRICTED_READER: This role isn't currently available for use. RESTRICTED_AUTHOR: This role isn't currently available for use. + /// The Amazon QuickSight role for the user. The user role can be one of the following:. READER: A user who has read-only access to dashboards. AUTHOR: A user who can create data sources, datasets, analyses, and dashboards. ADMIN: A user who is an author, who can also manage Amazon Amazon QuickSight settings. READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q in Amazon QuickSight, can build stories with Amazon Q, and can generate executive summaries from dashboards. AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards. ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing. RESTRICTED_READER: This role isn't currently available for use. RESTRICTED_AUTHOR: This role isn't currently available for use. public let role: UserRole? /// The user's user name. This value is required if you are registering a user that will be managed in Amazon QuickSight. In the output, the value for UserName is N/A when the value for IdentityType is IAM and the corresponding IAM user is deleted. public let userName: String? diff --git a/Sources/Soto/Services/RAM/RAM_api.swift b/Sources/Soto/Services/RAM/RAM_api.swift index 4221816bac..56ebdb7616 100644 --- a/Sources/Soto/Services/RAM/RAM_api.swift +++ b/Sources/Soto/Services/RAM/RAM_api.swift @@ -86,9 +86,6 @@ public struct RAM: AWSService { "us-east-2": "ram-fips.us-east-2.amazonaws.com", "us-gov-east-1": "ram.us-gov-east-1.amazonaws.com", "us-gov-west-1": "ram.us-gov-west-1.amazonaws.com", - "us-iso-east-1": "ram-fips.us-iso-east-1.c2s.ic.gov", - "us-iso-west-1": "ram-fips.us-iso-west-1.c2s.ic.gov", - "us-isob-east-1": "ram-fips.us-isob-east-1.sc2s.sgov.gov", "us-west-1": "ram-fips.us-west-1.amazonaws.com", "us-west-2": "ram-fips.us-west-2.amazonaws.com" ]) diff --git a/Sources/Soto/Services/RDS/RDS_api.swift b/Sources/Soto/Services/RDS/RDS_api.swift index 5c7968a092..383699c0d0 100644 --- a/Sources/Soto/Services/RDS/RDS_api.swift +++ b/Sources/Soto/Services/RDS/RDS_api.swift @@ -81,9 +81,9 @@ public struct RDS: AWSService { "us-east-2": "rds-fips.us-east-2.amazonaws.com", "us-gov-east-1": "rds.us-gov-east-1.amazonaws.com", "us-gov-west-1": "rds.us-gov-west-1.amazonaws.com", - "us-iso-east-1": "rds-fips.us-iso-east-1.c2s.ic.gov", - "us-iso-west-1": "rds-fips.us-iso-west-1.c2s.ic.gov", - "us-isob-east-1": "rds-fips.us-isob-east-1.sc2s.sgov.gov", + "us-iso-east-1": "rds.us-iso-east-1.c2s.ic.gov", + "us-iso-west-1": "rds.us-iso-west-1.c2s.ic.gov", + "us-isob-east-1": "rds.us-isob-east-1.sc2s.sgov.gov", "us-west-1": "rds-fips.us-west-1.amazonaws.com", "us-west-2": "rds-fips.us-west-2.amazonaws.com" ]) @@ -130,7 +130,7 @@ public struct RDS: AWSService { ) } - /// Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS. For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources. + /// Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS. For an overview on tagging your relational database resources, see Tagging Amazon RDS Resources or Tagging Amazon Aurora and Amazon RDS Resources. @Sendable public func addTagsToResource(_ input: AddTagsToResourceMessage, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( diff --git a/Sources/Soto/Services/RDS/RDS_shapes.swift b/Sources/Soto/Services/RDS/RDS_shapes.swift index b7de510522..dc92e76326 100644 --- a/Sources/Soto/Services/RDS/RDS_shapes.swift +++ b/Sources/Soto/Services/RDS/RDS_shapes.swift @@ -1266,7 +1266,7 @@ extension RDS { public let allocatedStorage: Int? /// Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically. Valid for Cluster Type: Multi-AZ DB clusters only public let autoMinorVersionUpgrade: Bool? - /// A list of Availability Zones (AZs) where DB instances in the DB cluster can be created. For information on Amazon Web Services Regions and Availability Zones, see Choosing the Regions and Availability Zones in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only + /// A list of Availability Zones (AZs) where you specifically want to create DB instances in the DB cluster. For information on AZs, see Availability Zones in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only Constraints: Can't specify more than three AZs. @OptionalCustomCoding> public var availabilityZones: [String]? /// The target backtrack window, in seconds. To disable backtracking, set this value to 0. Valid for Cluster Type: Aurora MySQL DB clusters only Default: 0 Constraints: If specified, this value must be set to a number from 0 to 259,200 (72 hours). @@ -1279,7 +1279,7 @@ extension RDS { public let characterSetName: String? /// Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let copyTagsToSnapshot: Bool? - /// The name for your database of up to 64 alphanumeric characters. If you don't provide a name, Amazon RDS doesn't create a database in the DB cluster you are creating. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters + /// The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional database with this name is created. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let databaseName: String? /// The identifier for this DB cluster. This parameter is stored as a lowercase string. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 1 to 63 (for Aurora DB clusters) or 1 to 52 (for Multi-AZ DB clusters) letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: my-cluster1 public let dbClusterIdentifier: String? @@ -1312,8 +1312,10 @@ extension RDS { public let enableLocalWriteForwarding: Bool? /// Specifies whether to turn on Performance Insights for the DB cluster. For more information, see Using Amazon Performance Insights in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters only public let enablePerformanceInsights: Bool? - /// The database engine to use for this DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: aurora-mysql | aurora-postgresql | mysql | postgres + /// The database engine to use for this DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: aurora-mysql aurora-postgresql mysql postgres neptune - For information about using Amazon Neptune, see the Amazon Neptune User Guide . public let engine: String? + /// The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora (PostgreSQL only) - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support + public let engineLifecycleSupport: String? /// The DB engine mode of the DB cluster, either provisioned or serverless. The serverless engine mode only applies for Aurora Serverless v1 DB clusters. Aurora Serverless v2 DB clusters use the provisioned engine mode. For information about limitations and requirements for Serverless DB clusters, see the following sections in the Amazon Aurora User Guide: Limitations of Aurora Serverless v1 Requirements for Aurora Serverless v2 Valid for Cluster Type: Aurora DB clusters only public let engineMode: String? /// The version number of the database engine to use. To list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (MySQL 8.0-compatible), use the following command: aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" You can supply either 5.7 or 8.0 to use the default engine version for Aurora MySQL version 2 or version 3, respectively. To list all of the available engine versions for Aurora PostgreSQL, use the following command: aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" To list all of the available engine versions for RDS for MySQL, use the following command: aws rds describe-db-engine-versions --engine mysql --query "DBEngineVersions[].EngineVersion" To list all of the available engine versions for RDS for PostgreSQL, use the following command: aws rds describe-db-engine-versions --engine postgres --query "DBEngineVersions[].EngineVersion" For information about a specific engine, see the following topics: Aurora MySQL - see Database engine updates for Amazon Aurora MySQL in the Amazon Aurora User Guide. Aurora PostgreSQL - see Amazon Aurora PostgreSQL releases and engine versions in the Amazon Aurora User Guide. RDS for MySQL - see Amazon RDS for MySQL in the Amazon RDS User Guide. RDS for PostgreSQL - see Amazon RDS for PostgreSQL in the Amazon RDS User Guide. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters @@ -1372,7 +1374,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZones: [String]? = nil, backtrackWindow: Int64? = nil, backupRetentionPeriod: Int? = nil, caCertificateIdentifier: String? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, databaseName: String? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, dbSystemId: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainIAMRoleName: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableGlobalWriteForwarding: Bool? = nil, enableHttpEndpoint: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enableLimitlessDatabase: Bool? = nil, enableLocalWriteForwarding: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, preSignedUrl: String? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, replicationSourceIdentifier: String? = nil, scalingConfiguration: ScalingConfiguration? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZones: [String]? = nil, backtrackWindow: Int64? = nil, backupRetentionPeriod: Int? = nil, caCertificateIdentifier: String? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, databaseName: String? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, dbSystemId: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainIAMRoleName: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableGlobalWriteForwarding: Bool? = nil, enableHttpEndpoint: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enableLimitlessDatabase: Bool? = nil, enableLocalWriteForwarding: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, preSignedUrl: String? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, replicationSourceIdentifier: String? = nil, scalingConfiguration: ScalingConfiguration? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZones = availabilityZones @@ -1398,6 +1400,7 @@ extension RDS { self.enableLocalWriteForwarding = enableLocalWriteForwarding self.enablePerformanceInsights = enablePerformanceInsights self.engine = engine + self.engineLifecycleSupport = engineLifecycleSupport self.engineMode = engineMode self.engineVersion = engineVersion self.globalClusterIdentifier = globalClusterIdentifier @@ -1454,6 +1457,7 @@ extension RDS { case enableLocalWriteForwarding = "EnableLocalWriteForwarding" case enablePerformanceInsights = "EnablePerformanceInsights" case engine = "Engine" + case engineLifecycleSupport = "EngineLifecycleSupport" case engineMode = "EngineMode" case engineVersion = "EngineVersion" case globalClusterIdentifier = "GlobalClusterIdentifier" @@ -1603,7 +1607,7 @@ extension RDS { public let dbInstanceClass: String? /// The identifier for this DB instance. This parameter is stored as a lowercase string. Constraints: Must contain from 1 to 63 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: mydbinstance public let dbInstanceIdentifier: String? - /// The meaning of this parameter differs according to the database engine you use. Amazon Aurora MySQL The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster. Constraints: Must contain 1 to 64 alphanumeric characters. Can't be a word reserved by the database engine. Amazon Aurora PostgreSQL The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres is created in the DB cluster. Constraints: It must contain 1 to 63 alphanumeric characters. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9). Can't be a word reserved by the database engine. Amazon RDS Custom for Oracle The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for non-CDBs and RDSCDB for CDBs. Default: ORCL Constraints: Must contain 1 to 8 alphanumeric characters. Must contain a letter. Can't be a word reserved by the database engine. Amazon RDS Custom for SQL Server Not applicable. Must be null. RDS for Db2 The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. In some cases, we recommend that you don't add a database name. For more information, see Additional considerations in the Amazon RDS User Guide. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine. RDS for MariaDB The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine. RDS for MySQL The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine. RDS for Oracle The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL. You can't specify the string null, or any other reserved word, for DBName. Default: ORCL Constraints: Can't be longer than 8 characters. RDS for PostgreSQL The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres is created in the DB instance. Constraints: Must contain 1 to 63 letters, numbers, or underscores. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine. RDS for SQL Server Not applicable. Must be null. + /// The meaning of this parameter differs according to the database engine you use. Amazon Aurora MySQL The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster. Constraints: Must contain 1 to 64 alphanumeric characters. Can't be a word reserved by the database engine. Amazon Aurora PostgreSQL The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. A database named postgres is always created. If this parameter is specified, an additional database with this name is created. Constraints: It must contain 1 to 63 alphanumeric characters. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9). Can't be a word reserved by the database engine. Amazon RDS Custom for Oracle The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for non-CDBs and RDSCDB for CDBs. Default: ORCL Constraints: Must contain 1 to 8 alphanumeric characters. Must contain a letter. Can't be a word reserved by the database engine. Amazon RDS Custom for SQL Server Not applicable. Must be null. RDS for Db2 The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. In some cases, we recommend that you don't add a database name. For more information, see Additional considerations in the Amazon RDS User Guide. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine. RDS for MariaDB The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine. RDS for MySQL The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine. RDS for Oracle The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL. You can't specify the string null, or any other reserved word, for DBName. Default: ORCL Constraints: Can't be longer than 8 characters. RDS for PostgreSQL The name of the database to create when the DB instance is created. A database named postgres is always created. If this parameter is specified, an additional database with this name is created. Constraints: Must contain 1 to 63 letters, numbers, or underscores. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine. RDS for SQL Server Not applicable. Must be null. public let dbName: String? /// The name of the DB parameter group to associate with this DB instance. If you don't specify a value, then Amazon RDS uses the default DB parameter group for the specified DB engine and version. This setting doesn't apply to RDS Custom DB instances. Constraints: Must be 1 to 255 letters, numbers, or hyphens. The first character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. public let dbParameterGroupName: String? @@ -1642,13 +1646,15 @@ extension RDS { public let enablePerformanceInsights: Bool? /// The database engine to use for this DB instance. Not every database engine is available in every Amazon Web Services Region. Valid Values: aurora-mysql (for Aurora MySQL DB instances) aurora-postgresql (for Aurora PostgreSQL DB instances) custom-oracle-ee (for RDS Custom for Oracle DB instances) custom-oracle-ee-cdb (for RDS Custom for Oracle DB instances) custom-oracle-se2 (for RDS Custom for Oracle DB instances) custom-oracle-se2-cdb (for RDS Custom for Oracle DB instances) custom-sqlserver-ee (for RDS Custom for SQL Server DB instances) custom-sqlserver-se (for RDS Custom for SQL Server DB instances) custom-sqlserver-web (for RDS Custom for SQL Server DB instances) db2-ae db2-se mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web public let engine: String? + /// The life cycle type for this DB instance. By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB instance will fail if the DB major version is past its end of standard support date. This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster. You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide. Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support + public let engineLifecycleSupport: String? /// The version number of the database engine to use. This setting doesn't apply to Amazon Aurora DB instances. The version number of the database engine the DB instance uses is managed by the DB cluster. For a list of valid engine versions, use the DescribeDBEngineVersions operation. The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region. Amazon RDS Custom for Oracle A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide. Amazon RDS Custom for SQL Server See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide. RDS for Db2 For information, see Db2 on Amazon RDS versions in the Amazon RDS User Guide. RDS for MariaDB For information, see MariaDB on Amazon RDS versions in the Amazon RDS User Guide. RDS for Microsoft SQL Server For information, see Microsoft SQL Server versions on Amazon RDS in the Amazon RDS User Guide. RDS for MySQL For information, see MySQL on Amazon RDS versions in the Amazon RDS User Guide. RDS for Oracle For information, see Oracle Database Engine release notes in the Amazon RDS User Guide. RDS for PostgreSQL For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide. public let engineVersion: String? /// The amount of Provisioned IOPS (input/output operations per second) to initially allocate for the DB instance. For information about valid IOPS values, see Amazon RDS DB instance storage in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster. Constraints: For RDS for Db2, MariaDB, MySQL, Oracle, and PostgreSQL - Must be a multiple between .5 and 50 of the storage amount for the DB instance. For RDS for SQL Server - Must be a multiple between 1 and 50 of the storage amount for the DB instance. public let iops: Int? /// The Amazon Web Services KMS key identifier for an encrypted DB instance. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. This setting doesn't apply to Amazon Aurora DB instances. The Amazon Web Services KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster. If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. For Amazon RDS Custom, a KMS key is required for DB instances. For most RDS engines, if you leave this parameter empty while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't use the default key when this parameter is empty. You must explicitly specify a key. public let kmsKeyId: String? - /// The license model information for this DB instance. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license + /// The license model information for this DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. The default for RDS for Db2 is bring-your-own-license. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license public let licenseModel: String? /// Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. public let manageMasterUserPassword: Bool? @@ -1710,7 +1716,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupRetentionPeriod: Int? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterIdentifier: String? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSecurityGroups: [String]? = nil, dbSubnetGroupName: String? = nil, dbSystemId: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, multiTenant: Bool? = nil, ncharCharacterSetName: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, promotionTier: Int? = nil, publiclyAccessible: Bool? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, timezone: String? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupRetentionPeriod: Int? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterIdentifier: String? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSecurityGroups: [String]? = nil, dbSubnetGroupName: String? = nil, dbSystemId: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineVersion: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, multiTenant: Bool? = nil, ncharCharacterSetName: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, promotionTier: Int? = nil, publiclyAccessible: Bool? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, timezone: String? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone @@ -1741,6 +1747,7 @@ extension RDS { self.enableIAMDatabaseAuthentication = enableIAMDatabaseAuthentication self.enablePerformanceInsights = enablePerformanceInsights self.engine = engine + self.engineLifecycleSupport = engineLifecycleSupport self.engineVersion = engineVersion self.iops = iops self.kmsKeyId = kmsKeyId @@ -1806,6 +1813,7 @@ extension RDS { case enableIAMDatabaseAuthentication = "EnableIAMDatabaseAuthentication" case enablePerformanceInsights = "EnablePerformanceInsights" case engine = "Engine" + case engineLifecycleSupport = "EngineLifecycleSupport" case engineVersion = "EngineVersion" case iops = "Iops" case kmsKeyId = "KmsKeyId" @@ -2386,7 +2394,7 @@ extension RDS { /// The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens. Constraints: If SourceIds are supplied, SourceType must also be provided. If the source type is a DB instance, a DBInstanceIdentifier value must be supplied. If the source type is a DB cluster, a DBClusterIdentifier value must be supplied. If the source type is a DB parameter group, a DBParameterGroupName value must be supplied. If the source type is a DB security group, a DBSecurityGroupName value must be supplied. If the source type is a DB snapshot, a DBSnapshotIdentifier value must be supplied. If the source type is a DB cluster snapshot, a DBClusterSnapshotIdentifier value must be supplied. If the source type is an RDS Proxy, a DBProxyName value must be supplied. @OptionalCustomCoding> public var sourceIds: [String]? - /// The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned. Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy + /// The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned. Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment public let sourceType: String? /// The name of the subscription. Constraints: The name must be less than 255 characters. public let subscriptionName: String? @@ -2433,6 +2441,8 @@ extension RDS { public let deletionProtection: Bool? /// The database engine to use for this global database cluster. Valid Values: aurora-mysql | aurora-postgresql Constraints: Can't be specified if SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the engine of the source DB cluster. public let engine: String? + /// The life cycle type for this global database cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your global cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the global cluster will fail if the DB major version is past its end of standard support date. This setting only applies to Aurora PostgreSQL-based global databases. You can use this setting to enroll your global cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your global cluster past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon Aurora User Guide. Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support + public let engineLifecycleSupport: String? /// The engine version to use for this global database cluster. Constraints: Can't be specified if SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the engine version of the source DB cluster. public let engineVersion: String? /// The cluster identifier for this global database cluster. This parameter is stored as a lowercase string. @@ -2442,10 +2452,11 @@ extension RDS { /// Specifies whether to enable storage encryption for the new global database cluster. Constraints: Can't be specified if SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the setting from the source DB cluster. public let storageEncrypted: Bool? - public init(databaseName: String? = nil, deletionProtection: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, sourceDBClusterIdentifier: String? = nil, storageEncrypted: Bool? = nil) { + public init(databaseName: String? = nil, deletionProtection: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, sourceDBClusterIdentifier: String? = nil, storageEncrypted: Bool? = nil) { self.databaseName = databaseName self.deletionProtection = deletionProtection self.engine = engine + self.engineLifecycleSupport = engineLifecycleSupport self.engineVersion = engineVersion self.globalClusterIdentifier = globalClusterIdentifier self.sourceDBClusterIdentifier = sourceDBClusterIdentifier @@ -2456,6 +2467,7 @@ extension RDS { case databaseName = "DatabaseName" case deletionProtection = "DeletionProtection" case engine = "Engine" + case engineLifecycleSupport = "EngineLifecycleSupport" case engineVersion = "EngineVersion" case globalClusterIdentifier = "GlobalClusterIdentifier" case sourceDBClusterIdentifier = "SourceDBClusterIdentifier" @@ -2740,6 +2752,8 @@ extension RDS { public let endpoint: String? /// The database engine used for this DB cluster. public let engine: String? + /// The life cycle type for the DB cluster. For more information, see CreateDBCluster. + public let engineLifecycleSupport: String? /// The DB engine mode of the DB cluster, either provisioned or serverless. For more information, see CreateDBCluster. public let engineMode: String? /// The version of the database engine. @@ -2824,7 +2838,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroups: [VpcSecurityGroupMembership]? - public init(activityStreamKinesisStreamName: String? = nil, activityStreamKmsKeyId: String? = nil, activityStreamMode: ActivityStreamMode? = nil, activityStreamStatus: ActivityStreamStatus? = nil, allocatedStorage: Int? = nil, associatedRoles: [DBClusterRole]? = nil, automaticRestartTime: Date? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZones: [String]? = nil, awsBackupRecoveryPointArn: String? = nil, backtrackConsumedChangeRecords: Int64? = nil, backtrackWindow: Int64? = nil, backupRetentionPeriod: Int? = nil, capacity: Int? = nil, certificateDetails: CertificateDetails? = nil, characterSetName: String? = nil, cloneGroupId: String? = nil, clusterCreateTime: Date? = nil, copyTagsToSnapshot: Bool? = nil, crossAccountClone: Bool? = nil, customEndpoints: [String]? = nil, databaseName: String? = nil, dbClusterArn: String? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterMembers: [DBClusterMember]? = nil, dbClusterOptionGroupMemberships: [DBClusterOptionGroupStatus]? = nil, dbClusterParameterGroup: String? = nil, dbClusterResourceId: String? = nil, dbSubnetGroup: String? = nil, dbSystemId: String? = nil, deletionProtection: Bool? = nil, domainMemberships: [DomainMembership]? = nil, earliestBacktrackTime: Date? = nil, earliestRestorableTime: Date? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: String? = nil, engine: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, globalWriteForwardingRequested: Bool? = nil, globalWriteForwardingStatus: WriteForwardingStatus? = nil, hostedZoneId: String? = nil, httpEndpointEnabled: Bool? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, ioOptimizedNextAllowedModificationTime: Date? = nil, iops: Int? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, limitlessDatabase: LimitlessDatabase? = nil, localWriteForwardingStatus: LocalWriteForwardingStatus? = nil, masterUsername: String? = nil, masterUserSecret: MasterUserSecret? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, pendingModifiedValues: ClusterPendingModifiedValues? = nil, percentProgress: String? = nil, performanceInsightsEnabled: Bool? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, readerEndpoint: String? = nil, readReplicaIdentifiers: [String]? = nil, replicationSourceIdentifier: String? = nil, scalingConfigurationInfo: ScalingConfigurationInfo? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfigurationInfo? = nil, status: String? = nil, statusInfos: [DBClusterStatusInfo]? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tagList: [Tag]? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { + public init(activityStreamKinesisStreamName: String? = nil, activityStreamKmsKeyId: String? = nil, activityStreamMode: ActivityStreamMode? = nil, activityStreamStatus: ActivityStreamStatus? = nil, allocatedStorage: Int? = nil, associatedRoles: [DBClusterRole]? = nil, automaticRestartTime: Date? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZones: [String]? = nil, awsBackupRecoveryPointArn: String? = nil, backtrackConsumedChangeRecords: Int64? = nil, backtrackWindow: Int64? = nil, backupRetentionPeriod: Int? = nil, capacity: Int? = nil, certificateDetails: CertificateDetails? = nil, characterSetName: String? = nil, cloneGroupId: String? = nil, clusterCreateTime: Date? = nil, copyTagsToSnapshot: Bool? = nil, crossAccountClone: Bool? = nil, customEndpoints: [String]? = nil, databaseName: String? = nil, dbClusterArn: String? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterMembers: [DBClusterMember]? = nil, dbClusterOptionGroupMemberships: [DBClusterOptionGroupStatus]? = nil, dbClusterParameterGroup: String? = nil, dbClusterResourceId: String? = nil, dbSubnetGroup: String? = nil, dbSystemId: String? = nil, deletionProtection: Bool? = nil, domainMemberships: [DomainMembership]? = nil, earliestBacktrackTime: Date? = nil, earliestRestorableTime: Date? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: String? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, globalWriteForwardingRequested: Bool? = nil, globalWriteForwardingStatus: WriteForwardingStatus? = nil, hostedZoneId: String? = nil, httpEndpointEnabled: Bool? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, ioOptimizedNextAllowedModificationTime: Date? = nil, iops: Int? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, limitlessDatabase: LimitlessDatabase? = nil, localWriteForwardingStatus: LocalWriteForwardingStatus? = nil, masterUsername: String? = nil, masterUserSecret: MasterUserSecret? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, pendingModifiedValues: ClusterPendingModifiedValues? = nil, percentProgress: String? = nil, performanceInsightsEnabled: Bool? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, readerEndpoint: String? = nil, readReplicaIdentifiers: [String]? = nil, replicationSourceIdentifier: String? = nil, scalingConfigurationInfo: ScalingConfigurationInfo? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfigurationInfo? = nil, status: String? = nil, statusInfos: [DBClusterStatusInfo]? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tagList: [Tag]? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { self.activityStreamKinesisStreamName = activityStreamKinesisStreamName self.activityStreamKmsKeyId = activityStreamKmsKeyId self.activityStreamMode = activityStreamMode @@ -2863,6 +2877,7 @@ extension RDS { self.enabledCloudwatchLogsExports = enabledCloudwatchLogsExports self.endpoint = endpoint self.engine = engine + self.engineLifecycleSupport = engineLifecycleSupport self.engineMode = engineMode self.engineVersion = engineVersion self.globalWriteForwardingRequested = globalWriteForwardingRequested @@ -2945,6 +2960,7 @@ extension RDS { case enabledCloudwatchLogsExports = "EnabledCloudwatchLogsExports" case endpoint = "Endpoint" case engine = "Engine" + case engineLifecycleSupport = "EngineLifecycleSupport" case engineMode = "EngineMode" case engineVersion = "EngineVersion" case globalWriteForwardingRequested = "GlobalWriteForwardingRequested" @@ -3915,6 +3931,8 @@ extension RDS { public let endpoint: Endpoint? /// The database engine used for this DB instance. public let engine: String? + /// The life cycle type for the DB instance. For more information, see CreateDBInstance. + public let engineLifecycleSupport: String? /// The version of the database engine. public let engineVersion: String? /// The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that receives the Enhanced Monitoring metrics data for the DB instance. @@ -3931,7 +3949,7 @@ extension RDS { public let kmsKeyId: String? /// The latest time to which a database in this DB instance can be restored with point-in-time restore. public let latestRestorableTime: Date? - /// The license model information for this DB instance. This setting doesn't apply to RDS Custom DB instances. + /// The license model information for this DB instance. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. public let licenseModel: String? /// The listener connection endpoint for SQL Server Always On. public let listenerEndpoint: Endpoint? @@ -4012,7 +4030,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroups: [VpcSecurityGroupMembership]? - public init(activityStreamEngineNativeAuditFieldsIncluded: Bool? = nil, activityStreamKinesisStreamName: String? = nil, activityStreamKmsKeyId: String? = nil, activityStreamMode: ActivityStreamMode? = nil, activityStreamPolicyStatus: ActivityStreamPolicyStatus? = nil, activityStreamStatus: ActivityStreamStatus? = nil, allocatedStorage: Int? = nil, associatedRoles: [DBInstanceRole]? = nil, automaticRestartTime: Date? = nil, automationMode: AutomationMode? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, awsBackupRecoveryPointArn: String? = nil, backupRetentionPeriod: Int? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, certificateDetails: CertificateDetails? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, customerOwnedIpEnabled: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterIdentifier: String? = nil, dbInstanceArn: String? = nil, dbInstanceAutomatedBackupsReplications: [DBInstanceAutomatedBackupsReplication]? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbInstancePort: Int? = nil, dbInstanceStatus: String? = nil, dbiResourceId: String? = nil, dbName: String? = nil, dbParameterGroups: [DBParameterGroupStatus]? = nil, dbSecurityGroups: [DBSecurityGroupMembership]? = nil, dbSubnetGroup: DBSubnetGroup? = nil, dbSystemId: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domainMemberships: [DomainMembership]? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: Endpoint? = nil, engine: String? = nil, engineVersion: String? = nil, enhancedMonitoringResourceArn: String? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, instanceCreateTime: Date? = nil, iops: Int? = nil, isStorageConfigUpgradeAvailable: Bool? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, licenseModel: String? = nil, listenerEndpoint: Endpoint? = nil, masterUsername: String? = nil, masterUserSecret: MasterUserSecret? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, multiTenant: Bool? = nil, ncharCharacterSetName: String? = nil, networkType: String? = nil, optionGroupMemberships: [OptionGroupMembership]? = nil, pendingModifiedValues: PendingModifiedValues? = nil, percentProgress: String? = nil, performanceInsightsEnabled: Bool? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, promotionTier: Int? = nil, publiclyAccessible: Bool? = nil, readReplicaDBClusterIdentifiers: [String]? = nil, readReplicaDBInstanceIdentifiers: [String]? = nil, readReplicaSourceDBClusterIdentifier: String? = nil, readReplicaSourceDBInstanceIdentifier: String? = nil, replicaMode: ReplicaMode? = nil, resumeFullAutomationModeTime: Date? = nil, secondaryAvailabilityZone: String? = nil, statusInfos: [DBInstanceStatusInfo]? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tagList: [Tag]? = nil, tdeCredentialArn: String? = nil, timezone: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { + public init(activityStreamEngineNativeAuditFieldsIncluded: Bool? = nil, activityStreamKinesisStreamName: String? = nil, activityStreamKmsKeyId: String? = nil, activityStreamMode: ActivityStreamMode? = nil, activityStreamPolicyStatus: ActivityStreamPolicyStatus? = nil, activityStreamStatus: ActivityStreamStatus? = nil, allocatedStorage: Int? = nil, associatedRoles: [DBInstanceRole]? = nil, automaticRestartTime: Date? = nil, automationMode: AutomationMode? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, awsBackupRecoveryPointArn: String? = nil, backupRetentionPeriod: Int? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, certificateDetails: CertificateDetails? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, customerOwnedIpEnabled: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterIdentifier: String? = nil, dbInstanceArn: String? = nil, dbInstanceAutomatedBackupsReplications: [DBInstanceAutomatedBackupsReplication]? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbInstancePort: Int? = nil, dbInstanceStatus: String? = nil, dbiResourceId: String? = nil, dbName: String? = nil, dbParameterGroups: [DBParameterGroupStatus]? = nil, dbSecurityGroups: [DBSecurityGroupMembership]? = nil, dbSubnetGroup: DBSubnetGroup? = nil, dbSystemId: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domainMemberships: [DomainMembership]? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: Endpoint? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineVersion: String? = nil, enhancedMonitoringResourceArn: String? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, instanceCreateTime: Date? = nil, iops: Int? = nil, isStorageConfigUpgradeAvailable: Bool? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, licenseModel: String? = nil, listenerEndpoint: Endpoint? = nil, masterUsername: String? = nil, masterUserSecret: MasterUserSecret? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, multiTenant: Bool? = nil, ncharCharacterSetName: String? = nil, networkType: String? = nil, optionGroupMemberships: [OptionGroupMembership]? = nil, pendingModifiedValues: PendingModifiedValues? = nil, percentProgress: String? = nil, performanceInsightsEnabled: Bool? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, promotionTier: Int? = nil, publiclyAccessible: Bool? = nil, readReplicaDBClusterIdentifiers: [String]? = nil, readReplicaDBInstanceIdentifiers: [String]? = nil, readReplicaSourceDBClusterIdentifier: String? = nil, readReplicaSourceDBInstanceIdentifier: String? = nil, replicaMode: ReplicaMode? = nil, resumeFullAutomationModeTime: Date? = nil, secondaryAvailabilityZone: String? = nil, statusInfos: [DBInstanceStatusInfo]? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tagList: [Tag]? = nil, tdeCredentialArn: String? = nil, timezone: String? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { self.activityStreamEngineNativeAuditFieldsIncluded = activityStreamEngineNativeAuditFieldsIncluded self.activityStreamKinesisStreamName = activityStreamKinesisStreamName self.activityStreamKmsKeyId = activityStreamKmsKeyId @@ -4053,6 +4071,7 @@ extension RDS { self.enabledCloudwatchLogsExports = enabledCloudwatchLogsExports self.endpoint = endpoint self.engine = engine + self.engineLifecycleSupport = engineLifecycleSupport self.engineVersion = engineVersion self.enhancedMonitoringResourceArn = enhancedMonitoringResourceArn self.iamDatabaseAuthenticationEnabled = iamDatabaseAuthenticationEnabled @@ -4141,6 +4160,7 @@ extension RDS { case enabledCloudwatchLogsExports = "EnabledCloudwatchLogsExports" case endpoint = "Endpoint" case engine = "Engine" + case engineLifecycleSupport = "EngineLifecycleSupport" case engineVersion = "EngineVersion" case enhancedMonitoringResourceArn = "EnhancedMonitoringResourceArn" case iamDatabaseAuthenticationEnabled = "IAMDatabaseAuthenticationEnabled" @@ -8221,6 +8241,8 @@ extension RDS { public let deletionProtection: Bool? /// The Aurora database engine used by the global database cluster. public let engine: String? + /// The life cycle type for the global cluster. For more information, see CreateGlobalCluster. + public let engineLifecycleSupport: String? /// Indicates the database engine version. public let engineVersion: String? /// A data object containing all properties for the current state of an in-process or pending switchover or failover process for this global cluster (Aurora global database). This object is empty unless the SwitchoverGlobalCluster or FailoverGlobalCluster operation was called on this global cluster. @@ -8239,10 +8261,11 @@ extension RDS { /// The storage encryption setting for the global database cluster. public let storageEncrypted: Bool? - public init(databaseName: String? = nil, deletionProtection: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, failoverState: FailoverState? = nil, globalClusterArn: String? = nil, globalClusterIdentifier: String? = nil, globalClusterMembers: [GlobalClusterMember]? = nil, globalClusterResourceId: String? = nil, status: String? = nil, storageEncrypted: Bool? = nil) { + public init(databaseName: String? = nil, deletionProtection: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineVersion: String? = nil, failoverState: FailoverState? = nil, globalClusterArn: String? = nil, globalClusterIdentifier: String? = nil, globalClusterMembers: [GlobalClusterMember]? = nil, globalClusterResourceId: String? = nil, status: String? = nil, storageEncrypted: Bool? = nil) { self.databaseName = databaseName self.deletionProtection = deletionProtection self.engine = engine + self.engineLifecycleSupport = engineLifecycleSupport self.engineVersion = engineVersion self.failoverState = failoverState self.globalClusterArn = globalClusterArn @@ -8257,6 +8280,7 @@ extension RDS { case databaseName = "DatabaseName" case deletionProtection = "DeletionProtection" case engine = "Engine" + case engineLifecycleSupport = "EngineLifecycleSupport" case engineVersion = "EngineVersion" case failoverState = "FailoverState" case globalClusterArn = "GlobalClusterArn" @@ -9600,7 +9624,7 @@ extension RDS { public var eventCategories: [String]? /// The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it. public let snsTopicArn: String? - /// The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned. Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy + /// The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned. Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment public let sourceType: String? /// The name of the RDS event notification subscription. public let subscriptionName: String? @@ -10456,7 +10480,7 @@ extension RDS { } public struct PendingMaintenanceAction: AWSDecodableShape { - /// The type of pending maintenance action that is available for the resource. Valid actions are system-update, db-upgrade, hardware-maintenance, and ca-certificate-rotation. + /// The type of pending maintenance action that is available for the resource. For more information about maintenance actions, see Maintaining a DB instance. Valid Values: system-update | db-upgrade | hardware-maintenance | ca-certificate-rotation public let action: String? /// The date of the maintenance window when the action is applied. The maintenance action is applied to the resource during its first maintenance window after this date. public let autoAppliedAfterDate: Date? @@ -11445,6 +11469,8 @@ extension RDS { public let enableIAMDatabaseAuthentication: Bool? /// The name of the database engine to be used for this DB cluster. Valid Values: aurora-mysql (for Aurora MySQL) public let engine: String? + /// The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora (PostgreSQL only) - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support + public let engineLifecycleSupport: String? /// The version number of the database engine to use. To list all of the available engine versions for aurora-mysql (Aurora MySQL), use the following command: aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" Aurora MySQL Examples: 5.7.mysql_aurora.2.12.0, 8.0.mysql_aurora.3.04.0 public let engineVersion: String? /// The Amazon Web Services KMS key identifier for an encrypted DB cluster. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. If the StorageEncrypted parameter is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS will use your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. @@ -11488,7 +11514,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(availabilityZones: [String]? = nil, backtrackWindow: Int64? = nil, backupRetentionPeriod: Int? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, databaseName: String? = nil, dbClusterIdentifier: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainIAMRoleName: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, kmsKeyId: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, s3BucketName: String? = nil, s3IngestionRoleArn: String? = nil, s3Prefix: String? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? = nil, sourceEngine: String? = nil, sourceEngineVersion: String? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(availabilityZones: [String]? = nil, backtrackWindow: Int64? = nil, backupRetentionPeriod: Int? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, databaseName: String? = nil, dbClusterIdentifier: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainIAMRoleName: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineVersion: String? = nil, kmsKeyId: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, s3BucketName: String? = nil, s3IngestionRoleArn: String? = nil, s3Prefix: String? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? = nil, sourceEngine: String? = nil, sourceEngineVersion: String? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { self.availabilityZones = availabilityZones self.backtrackWindow = backtrackWindow self.backupRetentionPeriod = backupRetentionPeriod @@ -11504,6 +11530,7 @@ extension RDS { self.enableCloudwatchLogsExports = enableCloudwatchLogsExports self.enableIAMDatabaseAuthentication = enableIAMDatabaseAuthentication self.engine = engine + self.engineLifecycleSupport = engineLifecycleSupport self.engineVersion = engineVersion self.kmsKeyId = kmsKeyId self.manageMasterUserPassword = manageMasterUserPassword @@ -11543,6 +11570,7 @@ extension RDS { case enableCloudwatchLogsExports = "EnableCloudwatchLogsExports" case enableIAMDatabaseAuthentication = "EnableIAMDatabaseAuthentication" case engine = "Engine" + case engineLifecycleSupport = "EngineLifecycleSupport" case engineVersion = "EngineVersion" case kmsKeyId = "KmsKeyId" case manageMasterUserPassword = "ManageMasterUserPassword" @@ -11614,6 +11642,8 @@ extension RDS { public let enableIAMDatabaseAuthentication: Bool? /// The database engine to use for the new DB cluster. Default: The same as source Constraint: Must be compatible with the engine of the source Valid for: Aurora DB clusters and Multi-AZ DB clusters public let engine: String? + /// The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora (PostgreSQL only) - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support + public let engineLifecycleSupport: String? /// The DB engine mode of the DB cluster, either provisioned or serverless. For more information, see CreateDBCluster. Valid for: Aurora DB clusters only public let engineMode: String? /// The version of the database engine to use for the new DB cluster. If you don't specify an engine version, the default version for the database engine in the Amazon Web Services Region is used. To list all of the available engine versions for Aurora MySQL, use the following command: aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" To list all of the available engine versions for Aurora PostgreSQL, use the following command: aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" To list all of the available engine versions for RDS for MySQL, use the following command: aws rds describe-db-engine-versions --engine mysql --query "DBEngineVersions[].EngineVersion" To list all of the available engine versions for RDS for PostgreSQL, use the following command: aws rds describe-db-engine-versions --engine postgres --query "DBEngineVersions[].EngineVersion" Aurora MySQL See Database engine updates for Amazon Aurora MySQL in the Amazon Aurora User Guide. Aurora PostgreSQL See Amazon Aurora PostgreSQL releases and engine versions in the Amazon Aurora User Guide. MySQL See Amazon RDS for MySQL in the Amazon RDS User Guide. PostgreSQL See Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide. Valid for: Aurora DB clusters and Multi-AZ DB clusters @@ -11646,7 +11676,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(availabilityZones: [String]? = nil, backtrackWindow: Int64? = nil, copyTagsToSnapshot: Bool? = nil, databaseName: String? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainIAMRoleName: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, scalingConfiguration: ScalingConfiguration? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? = nil, snapshotIdentifier: String? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(availabilityZones: [String]? = nil, backtrackWindow: Int64? = nil, copyTagsToSnapshot: Bool? = nil, databaseName: String? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainIAMRoleName: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, scalingConfiguration: ScalingConfiguration? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? = nil, snapshotIdentifier: String? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { self.availabilityZones = availabilityZones self.backtrackWindow = backtrackWindow self.copyTagsToSnapshot = copyTagsToSnapshot @@ -11661,6 +11691,7 @@ extension RDS { self.enableCloudwatchLogsExports = enableCloudwatchLogsExports self.enableIAMDatabaseAuthentication = enableIAMDatabaseAuthentication self.engine = engine + self.engineLifecycleSupport = engineLifecycleSupport self.engineMode = engineMode self.engineVersion = engineVersion self.iops = iops @@ -11693,6 +11724,7 @@ extension RDS { case enableCloudwatchLogsExports = "EnableCloudwatchLogsExports" case enableIAMDatabaseAuthentication = "EnableIAMDatabaseAuthentication" case engine = "Engine" + case engineLifecycleSupport = "EngineLifecycleSupport" case engineMode = "EngineMode" case engineVersion = "EngineVersion" case iops = "Iops" @@ -11750,6 +11782,8 @@ extension RDS { public var enableCloudwatchLogsExports: [String]? /// Specifies whether to enable mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts. By default, mapping isn't enabled. For more information, see IAM Database Authentication in the Amazon Aurora User Guide. Valid for: Aurora DB clusters only public let enableIAMDatabaseAuthentication: Bool? + /// The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora (PostgreSQL only) - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support + public let engineLifecycleSupport: String? /// The engine mode of the new cluster. Specify provisioned or serverless, depending on the type of the cluster you are creating. You can create an Aurora Serverless v1 clone from a provisioned cluster, or a provisioned clone from an Aurora Serverless v1 cluster. To create a clone that is an Aurora Serverless v1 cluster, the original cluster must be an Aurora Serverless v1 cluster or an encrypted provisioned cluster. Valid for: Aurora DB clusters only public let engineMode: String? /// The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. For information about valid IOPS values, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. Constraints: Must be a multiple between .5 and 50 of the storage amount for the DB instance. Valid for: Multi-AZ DB clusters only @@ -11787,7 +11821,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(backtrackWindow: Int64? = nil, copyTagsToSnapshot: Bool? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainIAMRoleName: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engineMode: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, restoreToTime: Date? = nil, restoreType: String? = nil, scalingConfiguration: ScalingConfiguration? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? = nil, sourceDBClusterIdentifier: String? = nil, sourceDbClusterResourceId: String? = nil, storageType: String? = nil, tags: [Tag]? = nil, useLatestRestorableTime: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(backtrackWindow: Int64? = nil, copyTagsToSnapshot: Bool? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainIAMRoleName: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engineLifecycleSupport: String? = nil, engineMode: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, restoreToTime: Date? = nil, restoreType: String? = nil, scalingConfiguration: ScalingConfiguration? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? = nil, sourceDBClusterIdentifier: String? = nil, sourceDbClusterResourceId: String? = nil, storageType: String? = nil, tags: [Tag]? = nil, useLatestRestorableTime: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { self.backtrackWindow = backtrackWindow self.copyTagsToSnapshot = copyTagsToSnapshot self.dbClusterIdentifier = dbClusterIdentifier @@ -11799,6 +11833,7 @@ extension RDS { self.domainIAMRoleName = domainIAMRoleName self.enableCloudwatchLogsExports = enableCloudwatchLogsExports self.enableIAMDatabaseAuthentication = enableIAMDatabaseAuthentication + self.engineLifecycleSupport = engineLifecycleSupport self.engineMode = engineMode self.iops = iops self.kmsKeyId = kmsKeyId @@ -11831,6 +11866,7 @@ extension RDS { case domainIAMRoleName = "DomainIAMRoleName" case enableCloudwatchLogsExports = "EnableCloudwatchLogsExports" case enableIAMDatabaseAuthentication = "EnableIAMDatabaseAuthentication" + case engineLifecycleSupport = "EngineLifecycleSupport" case engineMode = "EngineMode" case iops = "Iops" case kmsKeyId = "KmsKeyId" @@ -11923,9 +11959,11 @@ extension RDS { public let enableIAMDatabaseAuthentication: Bool? /// The database engine to use for the new instance. This setting doesn't apply to RDS Custom. Default: The same as source Constraint: Must be compatible with the engine of the source. For example, you can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot. Valid Values: db2-ae db2-se mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web public let engine: String? + /// The life cycle type for this DB instance. By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide. This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster. Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support + public let engineLifecycleSupport: String? /// Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter isn't specified, the IOPS value is taken from the backup. If this parameter is set to 0, the new instance is converted to a non-PIOPS instance. The conversion takes additional time, though your DB instance is available for connections before the conversion starts. The provisioned IOPS value must follow the requirements for your database engine. For more information, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. Constraints: Must be an integer greater than 1000. public let iops: Int? - /// License model information for the restored DB instance. This setting doesn't apply to RDS Custom. Default: Same as source. Valid Values: license-included | bring-your-own-license | general-public-license + /// License model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source. public let licenseModel: String? /// Specifies whether the DB instance is a Multi-AZ deployment. This setting doesn't apply to RDS Custom. Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. public let multiAZ: Bool? @@ -11956,7 +11994,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterSnapshotIdentifier: String? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSnapshotIdentifier: String? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, iops: Int? = nil, licenseModel: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbClusterSnapshotIdentifier: String? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSnapshotIdentifier: String? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, iops: Int? = nil, licenseModel: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone @@ -11983,6 +12021,7 @@ extension RDS { self.enableCustomerOwnedIp = enableCustomerOwnedIp self.enableIAMDatabaseAuthentication = enableIAMDatabaseAuthentication self.engine = engine + self.engineLifecycleSupport = engineLifecycleSupport self.iops = iops self.licenseModel = licenseModel self.multiAZ = multiAZ @@ -12027,6 +12066,7 @@ extension RDS { case enableCustomerOwnedIp = "EnableCustomerOwnedIp" case enableIAMDatabaseAuthentication = "EnableIAMDatabaseAuthentication" case engine = "Engine" + case engineLifecycleSupport = "EngineLifecycleSupport" case iops = "Iops" case licenseModel = "LicenseModel" case multiAZ = "MultiAZ" @@ -12101,6 +12141,8 @@ extension RDS { public let enablePerformanceInsights: Bool? /// The name of the database engine to be used for this instance. Valid Values: mysql public let engine: String? + /// The life cycle type for this DB instance. By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide. This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster. Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support + public let engineLifecycleSupport: String? /// The version number of the database engine to use. Choose the latest minor version of your database engine. For information about engine versions, see CreateDBInstance, or call DescribeDBEngineVersions. public let engineVersion: String? /// The amount of Provisioned IOPS (input/output operations per second) to allocate initially for the DB instance. For information about valid IOPS values, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. @@ -12146,7 +12188,7 @@ extension RDS { public let publiclyAccessible: Bool? /// The name of your Amazon S3 bucket that contains your database backup file. public let s3BucketName: String? - /// An Amazon Web Services Identity and Access Management (IAM) role to allow Amazon RDS to access your Amazon S3 bucket. + /// An Amazon Web Services Identity and Access Management (IAM) role with a trust policy and a permissions policy that allows Amazon RDS to access your Amazon S3 bucket. For information about this role, see Creating an IAM role manually in the Amazon RDS User Guide. public let s3IngestionRoleArn: String? /// The prefix of your Amazon S3 bucket. public let s3Prefix: String? @@ -12169,7 +12211,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupRetentionPeriod: Int? = nil, caCertificateIdentifier: String? = nil, copyTagsToSnapshot: Bool? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSecurityGroups: [String]? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, enableCloudwatchLogsExports: [String]? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, s3BucketName: String? = nil, s3IngestionRoleArn: String? = nil, s3Prefix: String? = nil, sourceEngine: String? = nil, sourceEngineVersion: String? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupRetentionPeriod: Int? = nil, caCertificateIdentifier: String? = nil, copyTagsToSnapshot: Bool? = nil, dbInstanceClass: String? = nil, dbInstanceIdentifier: String? = nil, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSecurityGroups: [String]? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, enableCloudwatchLogsExports: [String]? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineVersion: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, licenseModel: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, maxAllocatedStorage: Int? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, s3BucketName: String? = nil, s3IngestionRoleArn: String? = nil, s3Prefix: String? = nil, sourceEngine: String? = nil, sourceEngineVersion: String? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, useDefaultProcessorFeatures: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone @@ -12188,6 +12230,7 @@ extension RDS { self.enableIAMDatabaseAuthentication = enableIAMDatabaseAuthentication self.enablePerformanceInsights = enablePerformanceInsights self.engine = engine + self.engineLifecycleSupport = engineLifecycleSupport self.engineVersion = engineVersion self.iops = iops self.kmsKeyId = kmsKeyId @@ -12241,6 +12284,7 @@ extension RDS { case enableIAMDatabaseAuthentication = "EnableIAMDatabaseAuthentication" case enablePerformanceInsights = "EnablePerformanceInsights" case engine = "Engine" + case engineLifecycleSupport = "EngineLifecycleSupport" case engineVersion = "EngineVersion" case iops = "Iops" case kmsKeyId = "KmsKeyId" @@ -12341,9 +12385,11 @@ extension RDS { public let enableIAMDatabaseAuthentication: Bool? /// The database engine to use for the new instance. This setting doesn't apply to RDS Custom. Valid Values: db2-ae db2-se mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web Default: The same as source Constraints: Must be compatible with the engine of the source. public let engine: String? + /// The life cycle type for this DB instance. By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide. This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster. Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support + public let engineLifecycleSupport: String? /// The amount of Provisioned IOPS (input/output operations per second) to initially allocate for the DB instance. This setting doesn't apply to SQL Server. Constraints: Must be an integer greater than 1000. public let iops: Int? - /// The license model information for the restored DB instance. This setting doesn't apply to RDS Custom. Valid Values: license-included | bring-your-own-license | general-public-license Default: Same as the source. + /// The license model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source. public let licenseModel: String? /// The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance. For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. public let maxAllocatedStorage: Int? @@ -12388,7 +12434,7 @@ extension RDS { @OptionalCustomCoding> public var vpcSecurityGroupIds: [String]? - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbInstanceClass: String? = nil, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, iops: Int? = nil, licenseModel: String? = nil, maxAllocatedStorage: Int? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, restoreTime: Date? = nil, sourceDBInstanceAutomatedBackupsArn: String? = nil, sourceDBInstanceIdentifier: String? = nil, sourceDbiResourceId: String? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, targetDBInstanceIdentifier: String? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, useDefaultProcessorFeatures: Bool? = nil, useLatestRestorableTime: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZone: String? = nil, backupTarget: String? = nil, caCertificateIdentifier: String? = nil, copyTagsToSnapshot: Bool? = nil, customIamInstanceProfile: String? = nil, dbInstanceClass: String? = nil, dbName: String? = nil, dbParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, dedicatedLogVolume: Bool? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainAuthSecretArn: String? = nil, domainDnsIps: [String]? = nil, domainFqdn: String? = nil, domainIAMRoleName: String? = nil, domainOu: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableCustomerOwnedIp: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, iops: Int? = nil, licenseModel: String? = nil, maxAllocatedStorage: Int? = nil, multiAZ: Bool? = nil, networkType: String? = nil, optionGroupName: String? = nil, port: Int? = nil, processorFeatures: [ProcessorFeature]? = nil, publiclyAccessible: Bool? = nil, restoreTime: Date? = nil, sourceDBInstanceAutomatedBackupsArn: String? = nil, sourceDBInstanceIdentifier: String? = nil, sourceDbiResourceId: String? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tags: [Tag]? = nil, targetDBInstanceIdentifier: String? = nil, tdeCredentialArn: String? = nil, tdeCredentialPassword: String? = nil, useDefaultProcessorFeatures: Bool? = nil, useLatestRestorableTime: Bool? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZone = availabilityZone @@ -12412,6 +12458,7 @@ extension RDS { self.enableCustomerOwnedIp = enableCustomerOwnedIp self.enableIAMDatabaseAuthentication = enableIAMDatabaseAuthentication self.engine = engine + self.engineLifecycleSupport = engineLifecycleSupport self.iops = iops self.licenseModel = licenseModel self.maxAllocatedStorage = maxAllocatedStorage @@ -12460,6 +12507,7 @@ extension RDS { case enableCustomerOwnedIp = "EnableCustomerOwnedIp" case enableIAMDatabaseAuthentication = "EnableIAMDatabaseAuthentication" case engine = "Engine" + case engineLifecycleSupport = "EngineLifecycleSupport" case iops = "Iops" case licenseModel = "LicenseModel" case maxAllocatedStorage = "MaxAllocatedStorage" @@ -13329,7 +13377,7 @@ extension RDS { } public struct UpgradeTarget: AWSDecodableShape { - /// Indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true. + /// Indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true. This parameter is dynamic, and is set by RDS. public let autoUpgrade: Bool? /// The version of the database engine that a DB instance can be upgraded to. public let description: String? diff --git a/Sources/Soto/Services/Redshift/Redshift_api.swift b/Sources/Soto/Services/Redshift/Redshift_api.swift index 04163a517a..acd722df2d 100644 --- a/Sources/Soto/Services/Redshift/Redshift_api.swift +++ b/Sources/Soto/Services/Redshift/Redshift_api.swift @@ -1778,7 +1778,7 @@ public struct Redshift: AWSService { ) } - /// Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc1.large (if your cluster is in a VPC) dc1.8xlarge (if your cluster is in a VPC) dc2.large dc2.8xlarge ds2.xlarge ds2.8xlarge ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster. + /// Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc2.large dc2.8xlarge ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster. @Sendable public func resizeCluster(_ input: ResizeClusterMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> ResizeClusterResult { return try await self.client.execute( diff --git a/Sources/Soto/Services/Redshift/Redshift_shapes.swift b/Sources/Soto/Services/Redshift/Redshift_shapes.swift index 8ec83ea084..c960c9bec9 100644 --- a/Sources/Soto/Services/Redshift/Redshift_shapes.swift +++ b/Sources/Soto/Services/Redshift/Redshift_shapes.swift @@ -1689,11 +1689,11 @@ extension Redshift { public let masterUserPassword: String? /// If true, Amazon Redshift will deploy the cluster in two Availability Zones (AZ). public let multiAZ: Bool? - /// The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge + /// The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge public let nodeType: String? /// The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. For information about determining how many nodes you need, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. If you don't specify this parameter, you get a single-node cluster. When requesting a multi-node cluster, you must specify the number of nodes that you want in the cluster. Default: 1 Constraints: Value must be at least 1 and no more than 100. public let numberOfNodes: Int? - /// The port number on which the cluster accepts incoming connections. The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections. Default: 5439 Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.) For clusters with ds2 or dc2 nodes - Select a port within the range 1150-65535. + /// The port number on which the cluster accepts incoming connections. The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections. Default: 5439 Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.) For clusters with dc2 nodes - Select a port within the range 1150-65535. public let port: Int? /// The weekly time range (in UTC) during which automated cluster maintenance can occur. Format: ddd:hh24:mi-ddd:hh24:mi Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Minimum 30-minute window. public let preferredMaintenanceWindow: String? @@ -6170,14 +6170,14 @@ extension Redshift { /// The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter. /// For more information about resizing clusters, go to /// Resizing Clusters in Amazon Redshift - /// in the Amazon Redshift Cluster Management Guide. Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge + /// in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge public let nodeType: String? /// The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter. /// For more information about resizing clusters, go to /// Resizing Clusters in Amazon Redshift /// in the Amazon Redshift Cluster Management Guide. Valid Values: Integer greater than 0. public let numberOfNodes: Int? - /// The option to change the port of an Amazon Redshift cluster. Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.) For clusters with ds2 or dc2 nodes - Select a port within the range 1150-65535. + /// The option to change the port of an Amazon Redshift cluster. Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.) For clusters with dc2 nodes - Select a port within the range 1150-65535. public let port: Int? /// The weekly time range (in UTC) during which system maintenance can occur, if necessary. If system maintenance is necessary during the window, it may result in an outage. This maintenance window change is made immediately. If the new maintenance window indicates the current time, there must be at least 120 minutes between the current time and end of the window in order to ensure that pending changes are applied. Default: Uses existing setting. Format: ddd:hh24:mi-ddd:hh24:mi, for example wed:07:30-wed:08:00. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Must be at least 30 minutes. public let preferredMaintenanceWindow: String? @@ -6805,7 +6805,7 @@ extension Redshift { public let estimatedDiskUtilizationPercent: Double? /// The category of the node configuration recommendation. public let mode: Mode? - /// The node type, such as, "ds2.8xlarge". + /// The node type, such as, "ra3.4xlarge". public let nodeType: String? /// The number of nodes. public let numberOfNodes: Int? @@ -7527,7 +7527,7 @@ extension Redshift { public let sourceReservedNodeCount: Int? /// The identifier of the source reserved node. public let sourceReservedNodeId: String? - /// The source reserved-node type, for example ds2.xlarge. + /// The source reserved-node type, for example ra3.4xlarge. public let sourceReservedNodeType: String? /// The status of the reserved-node exchange request. Statuses include in-progress and requested. public let status: ReservedNodeExchangeStatusType? @@ -7903,13 +7903,13 @@ extension Redshift { public let masterPasswordSecretKmsKeyId: String? /// If true, the snapshot will be restored to a cluster deployed in two Availability Zones. public let multiAZ: Bool? - /// The node type that the restored cluster will be provisioned with. Default: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds1.xlarge into ds2.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type or dc2.large instance type. You can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlarge cluster, then resize to a dc2.8large cluster. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide. + /// The node type that the restored cluster will be provisioned with. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc2.large node type into another dc2 type. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide. public let nodeType: String? /// The number of nodes specified when provisioning the restored cluster. public let numberOfNodes: Int? /// The Amazon Web Services account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot. public let ownerAccount: String? - /// The port number on which the cluster accepts connections. Default: The same port as the original cluster. Valid values: For clusters with ds2 or dc2 nodes, must be within the range 1150-65535. For clusters with ra3 nodes, must be within the ranges 5431-5455 or 8191-8215. + /// The port number on which the cluster accepts connections. Default: The same port as the original cluster. Valid values: For clusters with DC2 nodes, must be within the range 1150-65535. For clusters with ra3 nodes, must be within the ranges 5431-5455 or 8191-8215. public let port: Int? /// The weekly time range (in UTC) during which automated cluster maintenance can occur. Format: ddd:hh24:mi-ddd:hh24:mi Default: The value selected for the cluster from which the snapshot was taken. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Minimum 30-minute window. public let preferredMaintenanceWindow: String? @@ -8059,15 +8059,15 @@ extension Redshift { } public struct RestoreStatus: AWSDecodableShape { - /// The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types. + /// The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 node types. public let currentRestoreRateInMegaBytesPerSecond: Double? - /// The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types. + /// The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 node types. public let elapsedTimeInSeconds: Int64? - /// The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types. + /// The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 node types. public let estimatedTimeToCompletionInSeconds: Int64? - /// The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types. + /// The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 node types. public let progressInMegaBytes: Int64? - /// The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types. + /// The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 node types. public let snapshotSizeInMegaBytes: Int64? /// The status of the restore action. Returns starting, restoring, completed, or failed. public let status: String? @@ -8385,7 +8385,7 @@ extension Redshift { public let startTime: Date? /// The state of the scheduled action. For example, DISABLED. public let state: ScheduledActionState? - /// A JSON format string of the Amazon Redshift API operation with input parameters. "{\"ResizeCluster\":{\"NodeType\":\"ds2.8xlarge\",\"ClusterIdentifier\":\"my-test-cluster\",\"NumberOfNodes\":3}}". + /// A JSON format string of the Amazon Redshift API operation with input parameters. "{\"ResizeCluster\":{\"NodeType\":\"ra3.4xlarge\",\"ClusterIdentifier\":\"my-test-cluster\",\"NumberOfNodes\":3}}". public let targetAction: ScheduledActionType? public init(endTime: Date? = nil, iamRole: String? = nil, nextInvocations: [Date]? = nil, schedule: String? = nil, scheduledActionDescription: String? = nil, scheduledActionName: String? = nil, startTime: Date? = nil, state: ScheduledActionState? = nil, targetAction: ScheduledActionType? = nil) { diff --git a/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift b/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift index f1f5761529..9e851aeb3e 100644 --- a/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift +++ b/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift @@ -419,6 +419,19 @@ public struct Resiliencehub: AWSService { ) } + /// Indicates the list of resource drifts that were detected while running an assessment. + @Sendable + public func listAppAssessmentResourceDrifts(_ input: ListAppAssessmentResourceDriftsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAppAssessmentResourceDriftsResponse { + return try await self.client.execute( + operation: "ListAppAssessmentResourceDrifts", + path: "/list-app-assessment-resource-drifts", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists the assessments for an Resilience Hub application. You can use request parameters to refine the results for the response object. @Sendable public func listAppAssessments(_ input: ListAppAssessmentsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAppAssessmentsResponse { @@ -835,6 +848,25 @@ extension Resiliencehub { ) } + /// Indicates the list of resource drifts that were detected while running an assessment. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listAppAssessmentResourceDriftsPaginator( + _ input: ListAppAssessmentResourceDriftsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listAppAssessmentResourceDrifts, + inputKey: \ListAppAssessmentResourceDriftsRequest.nextToken, + outputKey: \ListAppAssessmentResourceDriftsResponse.nextToken, + logger: logger + ) + } + /// Lists the assessments for an Resilience Hub application. You can use request parameters to refine the results for the response object. /// Return PaginatorSequence for operation. /// @@ -1141,6 +1173,16 @@ extension Resiliencehub.ListAppAssessmentComplianceDriftsRequest: AWSPaginateTok } } +extension Resiliencehub.ListAppAssessmentResourceDriftsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> Resiliencehub.ListAppAssessmentResourceDriftsRequest { + return .init( + assessmentArn: self.assessmentArn, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension Resiliencehub.ListAppAssessmentsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Resiliencehub.ListAppAssessmentsRequest { return .init( diff --git a/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift b/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift index 90812ac9bb..782fc778ed 100644 --- a/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift +++ b/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift @@ -108,7 +108,9 @@ extension Resiliencehub { } public enum DifferenceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case added = "Added" case notEqual = "NotEqual" + case removed = "Removed" public var description: String { return self.rawValue } } @@ -128,6 +130,7 @@ extension Resiliencehub { } public enum DriftType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case appComponentResiliencyComplianceStatus = "AppComponentResiliencyComplianceStatus" case applicationCompliance = "ApplicationCompliance" public var description: String { return self.rawValue } } @@ -1102,7 +1105,7 @@ extension Resiliencehub { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[^\\x00-\\x1f\\x22]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^[^\\x00-\\x1f\\x22]+$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[^\\x00-\\x1f\\x22]*$") } @@ -1361,7 +1364,7 @@ extension Resiliencehub { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[^\\x00-\\x1f\\x22]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^[^\\x00-\\x1f\\x22]+$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[^\\x00-\\x1f\\x22]*$") } @@ -1434,7 +1437,7 @@ extension Resiliencehub { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[^\\x00-\\x1f\\x22]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^[^\\x00-\\x1f\\x22]+$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[^\\x00-\\x1f\\x22]*$") } @@ -2371,7 +2374,7 @@ extension Resiliencehub { public let eventType: EventType /// Unique name to identify an event subscription. public let name: String - /// Amazon Resource Name (ARN) of the Amazon Simple Notification Service topic. The format for this ARN is: arn:partition:sns:region:account:topic-name. For more information about ARNs, + /// Amazon Resource Name (ARN) of the Amazon Simple Notification Service topic. The format for this ARN is: arn:partition:sns:region:account:topic-name. For more information about ARNs, /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. public let snsTopicArn: String? @@ -2590,6 +2593,53 @@ extension Resiliencehub { } } + public struct ListAppAssessmentResourceDriftsRequest: AWSEncodableShape { + /// Amazon Resource Name (ARN) of the assessment. The format for this ARN is: + /// arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let assessmentArn: String + /// Indicates the maximum number of drift results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. + public let maxResults: Int? + /// Null, or the token from a previous call to get the next set of results. + public let nextToken: String? + + public init(assessmentArn: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.assessmentArn = assessmentArn + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.assessmentArn, name: "assessmentArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+.-]{0,1023}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S{1,2000}$") + } + + private enum CodingKeys: String, CodingKey { + case assessmentArn = "assessmentArn" + case maxResults = "maxResults" + case nextToken = "nextToken" + } + } + + public struct ListAppAssessmentResourceDriftsResponse: AWSDecodableShape { + /// Null, or the token from a previous call to get the next set of results. + public let nextToken: String? + /// Indicates all the resource drifts detected for an assessed entity. + public let resourceDrifts: [ResourceDrift] + + public init(nextToken: String? = nil, resourceDrifts: [ResourceDrift]) { + self.nextToken = nextToken + self.resourceDrifts = resourceDrifts + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case resourceDrifts = "resourceDrifts" + } + } + public struct ListAppAssessmentsRequest: AWSEncodableShape { /// Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, @@ -3121,7 +3171,7 @@ extension Resiliencehub { /// Amazon Resource Name (ARN) of the assessment. The format for this ARN is: /// arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. - public let assessmentArn: String + public let assessmentArn: String? /// Maximum number of results to include in the response. If more results exist than the specified /// MaxResults value, a token is included in the response so that the remaining results can be retrieved. public let maxResults: Int? @@ -3137,7 +3187,7 @@ extension Resiliencehub { /// Status of the action. public let status: [RecommendationTemplateStatus]? - public init(assessmentArn: String, maxResults: Int? = nil, name: String? = nil, nextToken: String? = nil, recommendationTemplateArn: String? = nil, reverseOrder: Bool? = nil, status: [RecommendationTemplateStatus]? = nil) { + public init(assessmentArn: String? = nil, maxResults: Int? = nil, name: String? = nil, nextToken: String? = nil, recommendationTemplateArn: String? = nil, reverseOrder: Bool? = nil, status: [RecommendationTemplateStatus]? = nil) { self.assessmentArn = assessmentArn self.maxResults = maxResults self.name = name @@ -3564,7 +3614,7 @@ extension Resiliencehub { public let physicalResourceId: PhysicalResourceId /// The name of the resource. public let resourceName: String? - /// The type of resource. + /// Type of resource. public let resourceType: String /// Type of input source. public let sourceType: ResourceSourceType? @@ -4056,6 +4106,37 @@ extension Resiliencehub { } } + public struct ResourceDrift: AWSDecodableShape { + /// Amazon Resource Name (ARN) of the application whose resources have drifted. The format for this ARN is: + /// arn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let appArn: String? + /// Version of the application whose resources have drifted. + public let appVersion: String? + /// Indicates if the resource was added or removed. + public let diffType: DifferenceType? + /// Reference identifier of the resource drift. + public let referenceId: String? + /// Identifier of the drifted resource. + public let resourceIdentifier: ResourceIdentifier? + + public init(appArn: String? = nil, appVersion: String? = nil, diffType: DifferenceType? = nil, referenceId: String? = nil, resourceIdentifier: ResourceIdentifier? = nil) { + self.appArn = appArn + self.appVersion = appVersion + self.diffType = diffType + self.referenceId = referenceId + self.resourceIdentifier = resourceIdentifier + } + + private enum CodingKeys: String, CodingKey { + case appArn = "appArn" + case appVersion = "appVersion" + case diffType = "diffType" + case referenceId = "referenceId" + case resourceIdentifier = "resourceIdentifier" + } + } + public struct ResourceError: AWSDecodableShape { /// Identifier of the logical resource. public let logicalResourceId: String? @@ -4094,22 +4175,39 @@ extension Resiliencehub { } } + public struct ResourceIdentifier: AWSDecodableShape { + /// Logical identifier of the drifted resource. + public let logicalResourceId: LogicalResourceId? + /// Type of the drifted resource. + public let resourceType: String? + + public init(logicalResourceId: LogicalResourceId? = nil, resourceType: String? = nil) { + self.logicalResourceId = logicalResourceId + self.resourceType = resourceType + } + + private enum CodingKeys: String, CodingKey { + case logicalResourceId = "logicalResourceId" + case resourceType = "resourceType" + } + } + public struct ResourceMapping: AWSEncodableShape & AWSDecodableShape { - /// The name of the application this resource is mapped to. + /// Name of the application this resource is mapped to when the mappingType is AppRegistryApp. public let appRegistryAppName: String? - /// Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to. This parameter accepts values in "eks-cluster/namespace" format. + /// Name of the Amazon Elastic Kubernetes Service cluster and namespace that this resource is mapped to when the mappingType is EKS. This parameter accepts values in "eks-cluster/namespace" format. public let eksSourceName: String? - /// The name of the CloudFormation stack this resource is mapped to. + /// Name of the CloudFormation stack this resource is mapped to when the mappingType is CfnStack. public let logicalStackName: String? - /// Specifies the type of resource mapping. AppRegistryApp The resource is mapped to another application. The name of the application is contained in the appRegistryAppName property. CfnStack The resource is mapped to a CloudFormation stack. The name of the CloudFormation stack is contained in the logicalStackName property. Resource The resource is mapped to another resource. The name of the resource is contained in the resourceName property. ResourceGroup The resource is mapped to Resource Groups. The name of the resource group is contained in the resourceGroupName property. + /// Specifies the type of resource mapping. public let mappingType: ResourceMappingType /// Identifier of the physical resource. public let physicalResourceId: PhysicalResourceId - /// Name of the resource group that the resource is mapped to. + /// Name of the Resource Groups that this resource is mapped to when the mappingType is ResourceGroup. public let resourceGroupName: String? - /// Name of the resource that the resource is mapped to. + /// Name of the resource that this resource is mapped to when the mappingType is Resource. public let resourceName: String? - /// The short name of the Terraform source. + /// Name of the Terraform source that this resource is mapped to when the mappingType is Terraform. public let terraformSourceName: String? public init(appRegistryAppName: String? = nil, eksSourceName: String? = nil, logicalStackName: String? = nil, mappingType: ResourceMappingType, physicalResourceId: PhysicalResourceId, resourceGroupName: String? = nil, resourceName: String? = nil, terraformSourceName: String? = nil) { @@ -4166,13 +4264,13 @@ extension Resiliencehub { } public struct ScoringComponentResiliencyScore: AWSDecodableShape { - /// Number of recommendations that were excluded from the assessment. For example, if the Excluded count for Resilience Hub recommended Amazon CloudWatch alarms is 7, it indicates that 7 Amazon CloudWatch alarms are excluded from the assessment. + /// Number of recommendations that were excluded from the assessment. For example, if the excludedCount for Alarms coverage scoring component is 7, it indicates that 7 Amazon CloudWatch alarms are excluded from the assessment. public let excludedCount: Int64? - /// Number of issues that must be resolved to obtain the maximum possible score for the scoring component. For SOPs, alarms, and FIS experiments, these are the number of recommendations that must be implemented. For compliance, it is the number of Application Components that has breached the resiliency policy. For example, if the Outstanding count for Resilience Hub recommended Amazon CloudWatch alarms is 5, it indicates that 5 Amazon CloudWatch alarms must be fixed to achieve the maximum possible score. + /// Number of recommendations that must be implemented to obtain the maximum possible score for the scoring component. For SOPs, alarms, and tests, these are the number of recommendations that must be implemented. For compliance, these are the number of Application Components that have breached the resiliency policy. For example, if the outstandingCount for Alarms coverage scoring component is 5, it indicates that 5 Amazon CloudWatch alarms need to be implemented to achieve the maximum possible score. public let outstandingCount: Int64? - /// Maximum possible score that can be obtained for the scoring component. If the Possible score is 20 points, it indicates the maximum possible score you can achieve for your application when you run a new assessment after implementing all the Resilience Hub recommendations. + /// Maximum possible score that can be obtained for the scoring component. For example, if the possibleScore is 20 points, it indicates the maximum possible score you can achieve for the scoring component when you run a new assessment after implementing all the Resilience Hub recommendations. public let possibleScore: Double? - /// Resiliency score of your application. + /// Resiliency score points given for the scoring component. The score is always less than or equal to the possibleScore. public let score: Double? public init(excludedCount: Int64? = nil, outstandingCount: Int64? = nil, possibleScore: Double? = nil, score: Double? = nil) { @@ -4269,7 +4367,7 @@ extension Resiliencehub { try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[^\\x00-\\x1f\\x22]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^[^\\x00-\\x1f\\x22]+$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[^\\x00-\\x1f\\x22]*$") } @@ -4322,7 +4420,7 @@ extension Resiliencehub { try self.tags.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[^\\x00-\\x1f\\x22]+$") + try validate($0.key, name: "tags.key", parent: name, pattern: "^[^\\x00-\\x1f\\x22]+$") try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, pattern: "^[^\\x00-\\x1f\\x22]*$") } @@ -4462,7 +4560,7 @@ extension Resiliencehub { try self.tagKeys.forEach { try validate($0, name: "tagKeys[]", parent: name, max: 128) try validate($0, name: "tagKeys[]", parent: name, min: 1) - try validate($0, name: "tagKeys[]", parent: name, pattern: "^(?!aws:)[^\\x00-\\x1f\\x22]+$") + try validate($0, name: "tagKeys[]", parent: name, pattern: "^[^\\x00-\\x1f\\x22]+$") } try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 50) try self.validate(self.tagKeys, name: "tagKeys", parent: name, min: 1) diff --git a/Sources/Soto/Services/Route53Profiles/Route53Profiles_api.swift b/Sources/Soto/Services/Route53Profiles/Route53Profiles_api.swift index 876e872681..bcc908bc0a 100644 --- a/Sources/Soto/Services/Route53Profiles/Route53Profiles_api.swift +++ b/Sources/Soto/Services/Route53Profiles/Route53Profiles_api.swift @@ -73,7 +73,7 @@ public struct Route53Profiles: AWSService { // MARK: API Calls - /// Associates a Route 53 Profiles profile with a VPC. A VPC can have only one Profile associated with it, but a Profile can be associated with up to 5000 VPCs. + /// Associates a Route 53 Profiles profile with a VPC. A VPC can have only one Profile associated with it, but a Profile can be associated with 1000 of VPCs (and you can request a higher quota). For more information, see https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-entities. @Sendable public func associateProfile(_ input: AssociateProfileRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateProfileResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/Route53Profiles/Route53Profiles_shapes.swift b/Sources/Soto/Services/Route53Profiles/Route53Profiles_shapes.swift index 30f4e0693d..81d0e4826b 100644 --- a/Sources/Soto/Services/Route53Profiles/Route53Profiles_shapes.swift +++ b/Sources/Soto/Services/Route53Profiles/Route53Profiles_shapes.swift @@ -103,8 +103,8 @@ extension Route53Profiles { public let profileId: String /// Amazon resource number, ARN, of the DNS resource. public let resourceArn: String - /// If you are adding a DNS Firewall rule group, include also a priority in this format: - /// Key=FirewallRuleGroupPriority,Value=100 + /// If you are adding a DNS Firewall rule group, include also a priority. The priority indicates the processing order for the rule groups, starting with the priority assinged the lowest value. + /// The allowed values for priority are between 100 and 9900. public let resourceProperties: String? public init(name: String, profileId: String, resourceArn: String, resourceProperties: String? = nil) { @@ -871,7 +871,7 @@ extension Route53Profiles { public let name: String? /// ID of the resource association. public let profileResourceAssociationId: String - /// If you are adding a DNS Firewall rule group, include also a priority in this format: Key=FirewallRuleGroupPriority,Value=100. + /// If you are adding a DNS Firewall rule group, include also a priority. The priority indicates the processing order for the rule groups, starting with the priority assinged the lowest value. The allowed values for priority are between 100 and 9900. public let resourceProperties: String? public init(name: String? = nil, profileResourceAssociationId: String, resourceProperties: String? = nil) { diff --git a/Sources/Soto/Services/Route53Resolver/Route53Resolver_shapes.swift b/Sources/Soto/Services/Route53Resolver/Route53Resolver_shapes.swift index b718b3601c..c20e2274dd 100644 --- a/Sources/Soto/Services/Route53Resolver/Route53Resolver_shapes.swift +++ b/Sources/Soto/Services/Route53Resolver/Route53Resolver_shapes.swift @@ -554,10 +554,10 @@ extension Route53Resolver { public let creatorRequestId: String /// The ID of the domain list that you want to use in the rule. public let firewallDomainListId: String - /// How you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME, DNAME, ot ALIAS. + /// How you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME or DNAME. /// Inspect_Redirection_Domain (Default) inspects all domains in the redirection chain. The individual domains in the redirection chain must be - /// added to the allow domain list. Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the redirection list to - /// the domain alloww list. + /// added to the domain list. Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the domain in the redirection list to + /// the domain list. public let firewallDomainRedirectionAction: FirewallDomainRedirectionAction? /// The unique identifier of the firewall rule group where you want to create the rule. public let firewallRuleGroupId: String @@ -1509,10 +1509,10 @@ extension Route53Resolver { public let creatorRequestId: String? /// The ID of the domain list that's used in the rule. public let firewallDomainListId: String? - /// How you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME, DNAME, ot ALIAS. + /// How you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME or DNAME. /// Inspect_Redirection_Domain (Default) inspects all domains in the redirection chain. The individual domains in the redirection chain must be - /// added to the allow domain list. Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the domain in the redirection list to - /// the domain alloww list. + /// added to the domain list. Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the domain in the redirection list to + /// the domain list. public let firewallDomainRedirectionAction: FirewallDomainRedirectionAction? /// The unique identifier of the firewall rule group of the rule. public let firewallRuleGroupId: String? @@ -4044,10 +4044,10 @@ extension Route53Resolver { public let blockResponse: BlockResponse? /// The ID of the domain list to use in the rule. public let firewallDomainListId: String - /// How you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME, DNAME, ot ALIAS. + /// How you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME or DNAME. /// Inspect_Redirection_Domain (Default) inspects all domains in the redirection chain. The individual domains in the redirection chain must be - /// added to the allow domain list. Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the domain in the redirection list to - /// the domain alloww list. + /// added to the domain list. Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the domain in the redirection list to + /// the domain list. public let firewallDomainRedirectionAction: FirewallDomainRedirectionAction? /// The unique identifier of the firewall rule group for the rule. public let firewallRuleGroupId: String diff --git a/Sources/Soto/Services/S3/S3_api.swift b/Sources/Soto/Services/S3/S3_api.swift index f0ae008fb1..6b8c2d1bf6 100644 --- a/Sources/Soto/Services/S3/S3_api.swift +++ b/Sources/Soto/Services/S3/S3_api.swift @@ -233,7 +233,7 @@ public struct S3: AWSService { public func completeMultipartUpload(_ input: CompleteMultipartUploadRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CompleteMultipartUploadOutput { return try await self.client.execute( operation: "CompleteMultipartUpload", - path: "/{Bucket}/{Key+}?x-id=CompleteMultipartUpload", + path: "/{Bucket}/{Key+}", httpMethod: .POST, serviceConfig: self.config, input: input, @@ -274,7 +274,7 @@ public struct S3: AWSService { public func createMultipartUpload(_ input: CreateMultipartUploadRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMultipartUploadOutput { return try await self.client.execute( operation: "CreateMultipartUpload", - path: "/{Bucket}/{Key+}?uploads&x-id=CreateMultipartUpload", + path: "/{Bucket}/{Key+}?uploads", httpMethod: .POST, serviceConfig: self.config, input: input, @@ -499,7 +499,7 @@ public struct S3: AWSService { public func deleteObjects(_ input: DeleteObjectsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteObjectsOutput { return try await self.client.execute( operation: "DeleteObjects", - path: "/{Bucket}?delete&x-id=DeleteObjects", + path: "/{Bucket}?delete", httpMethod: .POST, serviceConfig: self.config, input: input, @@ -1414,7 +1414,7 @@ public struct S3: AWSService { public func restoreObject(_ input: RestoreObjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RestoreObjectOutput { return try await self.client.execute( operation: "RestoreObject", - path: "/{Bucket}/{Key+}?restore&x-id=RestoreObject", + path: "/{Bucket}/{Key+}?restore", httpMethod: .POST, serviceConfig: self.config, input: input, @@ -1427,7 +1427,7 @@ public struct S3: AWSService { public func selectObjectContent(_ input: SelectObjectContentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SelectObjectContentOutput { return try await self.client.execute( operation: "SelectObjectContent", - path: "/{Bucket}/{Key+}?select&select-type=2&x-id=SelectObjectContent", + path: "/{Bucket}/{Key+}?select&select-type=2", httpMethod: .POST, serviceConfig: self.config, input: input, @@ -1467,7 +1467,7 @@ public struct S3: AWSService { public func writeGetObjectResponse(_ input: WriteGetObjectResponseRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( operation: "WriteGetObjectResponse", - path: "/WriteGetObjectResponse?x-id=WriteGetObjectResponse", + path: "/WriteGetObjectResponse", httpMethod: .POST, serviceConfig: self.config, input: input, diff --git a/Sources/Soto/Services/S3Control/S3Control_api.swift b/Sources/Soto/Services/S3Control/S3Control_api.swift index 4024b32ef1..440b11c194 100644 --- a/Sources/Soto/Services/S3Control/S3Control_api.swift +++ b/Sources/Soto/Services/S3Control/S3Control_api.swift @@ -86,6 +86,7 @@ public struct S3Control: AWSService { "ap-southeast-3": "s3-control.ap-southeast-3.amazonaws.com", "ap-southeast-4": "s3-control.ap-southeast-4.amazonaws.com", "ca-central-1": "s3-control.ca-central-1.amazonaws.com", + "ca-west-1": "s3-control.ca-west-1.amazonaws.com", "cn-north-1": "s3-control.cn-north-1.amazonaws.com.cn", "cn-northwest-1": "s3-control.cn-northwest-1.amazonaws.com.cn", "eu-central-1": "s3-control.eu-central-1.amazonaws.com", @@ -127,6 +128,7 @@ public struct S3Control: AWSService { "ap-southeast-3": "s3-control.dualstack.ap-southeast-3.amazonaws.com", "ap-southeast-4": "s3-control.dualstack.ap-southeast-4.amazonaws.com", "ca-central-1": "s3-control.dualstack.ca-central-1.amazonaws.com", + "ca-west-1": "s3-control.dualstack.ca-west-1.amazonaws.com", "cn-north-1": "s3-control.dualstack.cn-north-1.amazonaws.com.cn", "cn-northwest-1": "s3-control.dualstack.cn-northwest-1.amazonaws.com.cn", "eu-central-1": "s3-control.dualstack.eu-central-1.amazonaws.com", @@ -150,6 +152,7 @@ public struct S3Control: AWSService { ]), [.dualstack, .fips]: .init(endpoints: [ "ca-central-1": "s3-control-fips.dualstack.ca-central-1.amazonaws.com", + "ca-west-1": "s3-control-fips.dualstack.ca-west-1.amazonaws.com", "us-east-1": "s3-control-fips.dualstack.us-east-1.amazonaws.com", "us-east-2": "s3-control-fips.dualstack.us-east-2.amazonaws.com", "us-gov-east-1": "s3-control-fips.dualstack.us-gov-east-1.amazonaws.com", @@ -159,6 +162,7 @@ public struct S3Control: AWSService { ]), [.fips]: .init(endpoints: [ "ca-central-1": "s3-control-fips.ca-central-1.amazonaws.com", + "ca-west-1": "s3-control-fips.ca-west-1.amazonaws.com", "us-east-1": "s3-control-fips.us-east-1.amazonaws.com", "us-east-2": "s3-control-fips.us-east-2.amazonaws.com", "us-gov-east-1": "s3-control-fips.us-gov-east-1.amazonaws.com", diff --git a/Sources/Soto/Services/SESv2/SESv2_api.swift b/Sources/Soto/Services/SESv2/SESv2_api.swift index 343601eb53..34203d24df 100644 --- a/Sources/Soto/Services/SESv2/SESv2_api.swift +++ b/Sources/Soto/Services/SESv2/SESv2_api.swift @@ -126,7 +126,7 @@ public struct SESv2: AWSService { ) } - /// Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage. A single configuration set can include more than one event destination. + /// Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target. A single configuration set can include more than one event destination. @Sendable public func createConfigurationSetEventDestination(_ input: CreateConfigurationSetEventDestinationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateConfigurationSetEventDestinationResponse { return try await self.client.execute( @@ -282,7 +282,7 @@ public struct SESv2: AWSService { ) } - /// Delete an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage. + /// Delete an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target. @Sendable public func deleteConfigurationSetEventDestination(_ input: DeleteConfigurationSetEventDestinationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteConfigurationSetEventDestinationResponse { return try await self.client.execute( @@ -438,7 +438,7 @@ public struct SESv2: AWSService { ) } - /// Retrieve a list of event destinations that are associated with a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage. + /// Retrieve a list of event destinations that are associated with a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target. @Sendable public func getConfigurationSetEventDestinations(_ input: GetConfigurationSetEventDestinationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetConfigurationSetEventDestinationsResponse { return try await self.client.execute( @@ -1204,7 +1204,7 @@ public struct SESv2: AWSService { ) } - /// Update the configuration of an event destination for a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage. + /// Update the configuration of an event destination for a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target. @Sendable public func updateConfigurationSetEventDestination(_ input: UpdateConfigurationSetEventDestinationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateConfigurationSetEventDestinationResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/SESv2/SESv2_shapes.swift b/Sources/Soto/Services/SESv2/SESv2_shapes.swift index e77e1bee8e..0e7cdcdba1 100644 --- a/Sources/Soto/Services/SESv2/SESv2_shapes.swift +++ b/Sources/Soto/Services/SESv2/SESv2_shapes.swift @@ -1859,11 +1859,26 @@ extension SESv2 { } } + public struct EventBridgeDestination: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Amazon EventBridge bus to publish email events to. Only the default bus is supported. + public let eventBusArn: String + + public init(eventBusArn: String) { + self.eventBusArn = eventBusArn + } + + private enum CodingKeys: String, CodingKey { + case eventBusArn = "EventBusArn" + } + } + public struct EventDestination: AWSDecodableShape { /// An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics. public let cloudWatchDestination: CloudWatchDestination? /// If true, the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this EventDestinationDefinition. If false, the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations. public let enabled: Bool? + /// An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur. + public let eventBridgeDestination: EventBridgeDestination? /// An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift. public let kinesisFirehoseDestination: KinesisFirehoseDestination? /// The types of events that Amazon SES sends to the specified event destinations. SEND - The send request was successful and SES will attempt to deliver the message to the recipient’s mail server. (If account-level or global suppression is being used, SES will still count it as a send, but delivery is suppressed.) REJECT - SES accepted the email, but determined that it contained a virus and didn’t attempt to deliver it to the recipient’s mail server. BOUNCE - (Hard bounce) The recipient's mail server permanently rejected the email. (Soft bounces are only included when SES fails to deliver the email after retrying for a period of time.) COMPLAINT - The email was successfully delivered to the recipient’s mail server, but the recipient marked it as spam. DELIVERY - SES successfully delivered the email to the recipient's mail server. OPEN - The recipient received the message and opened it in their email client. CLICK - The recipient clicked one or more links in the email. RENDERING_FAILURE - The email wasn't sent because of a template rendering issue. This event type can occur when template data is missing, or when there is a mismatch between template parameters and data. (This event type only occurs when you send email using the SendTemplatedEmail or SendBulkTemplatedEmail API operations.) DELIVERY_DELAY - The email couldn't be delivered to the recipient’s mail server because a temporary issue occurred. Delivery delays can occur, for example, when the recipient's inbox is full, or when the receiving email server experiences a transient issue. SUBSCRIPTION - The email was successfully delivered, but the recipient updated their subscription preferences by clicking on an unsubscribe link as part of your subscription management. @@ -1872,12 +1887,13 @@ extension SESv2 { public let name: String /// An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide. public let pinpointDestination: PinpointDestination? - /// An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur. + /// An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notifications when certain email events occur. public let snsDestination: SnsDestination? - public init(cloudWatchDestination: CloudWatchDestination? = nil, enabled: Bool? = nil, kinesisFirehoseDestination: KinesisFirehoseDestination? = nil, matchingEventTypes: [EventType], name: String, pinpointDestination: PinpointDestination? = nil, snsDestination: SnsDestination? = nil) { + public init(cloudWatchDestination: CloudWatchDestination? = nil, enabled: Bool? = nil, eventBridgeDestination: EventBridgeDestination? = nil, kinesisFirehoseDestination: KinesisFirehoseDestination? = nil, matchingEventTypes: [EventType], name: String, pinpointDestination: PinpointDestination? = nil, snsDestination: SnsDestination? = nil) { self.cloudWatchDestination = cloudWatchDestination self.enabled = enabled + self.eventBridgeDestination = eventBridgeDestination self.kinesisFirehoseDestination = kinesisFirehoseDestination self.matchingEventTypes = matchingEventTypes self.name = name @@ -1888,6 +1904,7 @@ extension SESv2 { private enum CodingKeys: String, CodingKey { case cloudWatchDestination = "CloudWatchDestination" case enabled = "Enabled" + case eventBridgeDestination = "EventBridgeDestination" case kinesisFirehoseDestination = "KinesisFirehoseDestination" case matchingEventTypes = "MatchingEventTypes" case name = "Name" @@ -1901,18 +1918,21 @@ extension SESv2 { public let cloudWatchDestination: CloudWatchDestination? /// If true, the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this EventDestinationDefinition. If false, the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations. public let enabled: Bool? + /// An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur. + public let eventBridgeDestination: EventBridgeDestination? /// An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift. public let kinesisFirehoseDestination: KinesisFirehoseDestination? /// An array that specifies which events the Amazon SES API v2 should send to the destinations in this EventDestinationDefinition. public let matchingEventTypes: [EventType]? /// An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide. public let pinpointDestination: PinpointDestination? - /// An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur. + /// An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notifications when certain email events occur. public let snsDestination: SnsDestination? - public init(cloudWatchDestination: CloudWatchDestination? = nil, enabled: Bool? = nil, kinesisFirehoseDestination: KinesisFirehoseDestination? = nil, matchingEventTypes: [EventType]? = nil, pinpointDestination: PinpointDestination? = nil, snsDestination: SnsDestination? = nil) { + public init(cloudWatchDestination: CloudWatchDestination? = nil, enabled: Bool? = nil, eventBridgeDestination: EventBridgeDestination? = nil, kinesisFirehoseDestination: KinesisFirehoseDestination? = nil, matchingEventTypes: [EventType]? = nil, pinpointDestination: PinpointDestination? = nil, snsDestination: SnsDestination? = nil) { self.cloudWatchDestination = cloudWatchDestination self.enabled = enabled + self.eventBridgeDestination = eventBridgeDestination self.kinesisFirehoseDestination = kinesisFirehoseDestination self.matchingEventTypes = matchingEventTypes self.pinpointDestination = pinpointDestination @@ -1922,6 +1942,7 @@ extension SESv2 { private enum CodingKeys: String, CodingKey { case cloudWatchDestination = "CloudWatchDestination" case enabled = "Enabled" + case eventBridgeDestination = "EventBridgeDestination" case kinesisFirehoseDestination = "KinesisFirehoseDestination" case matchingEventTypes = "MatchingEventTypes" case pinpointDestination = "PinpointDestination" diff --git a/Sources/Soto/Services/SNS/SNS_api.swift b/Sources/Soto/Services/SNS/SNS_api.swift index d4ece79760..20e65b490f 100644 --- a/Sources/Soto/Services/SNS/SNS_api.swift +++ b/Sources/Soto/Services/SNS/SNS_api.swift @@ -126,7 +126,7 @@ public struct SNS: AWSService { ) } - /// Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action. PlatformPrincipal and PlatformCredential are received from the notification service. For ADM, PlatformPrincipal is client id and PlatformCredential is client secret. For Baidu, PlatformPrincipal is API key and PlatformCredential is secret key. For APNS and APNS_SANDBOX using certificate credentials, PlatformPrincipal is SSL certificate and PlatformCredential is private key. For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal is signing key ID and PlatformCredential is signing key. For GCM (Firebase Cloud Messaging) using key credentials, there is no PlatformPrincipal. The PlatformCredential is API key. For GCM (Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal. The PlatformCredential is a JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in string format and special characters must be ignored. To format the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq @json . For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential is private key. For WNS, PlatformPrincipal is Package Security Identifier and PlatformCredential is secret key. You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action. + /// Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action. PlatformPrincipal and PlatformCredential are received from the notification service. For ADM, PlatformPrincipal is client id and PlatformCredential is client secret. For APNS and APNS_SANDBOX using certificate credentials, PlatformPrincipal is SSL certificate and PlatformCredential is private key. For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal is signing key ID and PlatformCredential is signing key. For Baidu, PlatformPrincipal is API key and PlatformCredential is secret key. For GCM (Firebase Cloud Messaging) using key credentials, there is no PlatformPrincipal. The PlatformCredential is API key. For GCM (Firebase Cloud Messaging) using token credentials, there is no PlatformPrincipal. The PlatformCredential is a JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in string format and special characters must be ignored. To format the file correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq @json . For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential is private key. For WNS, PlatformPrincipal is Package Security Identifier and PlatformCredential is secret key. You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action. @Sendable public func createPlatformApplication(_ input: CreatePlatformApplicationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePlatformApplicationResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/SNS/SNS_shapes.swift b/Sources/Soto/Services/SNS/SNS_shapes.swift index 58beaa64da..08a4d4cf01 100644 --- a/Sources/Soto/Services/SNS/SNS_shapes.swift +++ b/Sources/Soto/Services/SNS/SNS_shapes.swift @@ -278,7 +278,7 @@ extension SNS { } public struct CreateTopicInput: AWSEncodableShape { - /// A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that the CreateTopic action uses: DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. DisplayName – The display name to use for a topic with SMS subscriptions. FifoTopic – Set to true to create a FIFO topic. Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic. SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1. TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics. The following attribute applies only to server-side encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference. The following attributes apply only to FIFO topics: ArchivePolicy – Adds or updates an inline policy document to archive messages stored in the specified Amazon SNS topic. BeginningArchiveTime – The earliest starting point at which a message in the topic’s archive can be replayed from. This point in time is based on the configured message retention period set by the topic’s message archiving policy. ContentBasedDeduplication – Enables content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action. When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action. + /// A map of attributes with their corresponding values. The following lists names, descriptions, and values of the special request parameters that the CreateTopic action uses: DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. DisplayName – The display name to use for a topic with SMS subscriptions. FifoTopic – Set to true to create a FIFO topic. Policy – The policy that defines who can access your topic. By default, only the topic owner can publish or subscribe to the topic. SignatureVersion – The signature version corresponds to the hashing algorithm used while creating the signature of the notifications, subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS. By default, SignatureVersion is set to 1. TracingConfig – Tracing mode of an Amazon SNS topic. By default TracingConfig is set to PassThrough, and the topic passes through the tracing header it receives from an Amazon SNS publisher to its subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data to topic owner account if the sampled flag in the tracing header is true. This is only supported on standard topics. The following attribute applies only to server-side encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SNS or a custom CMK. For more information, see Key Terms. For more examples, see KeyId in the Key Management Service API Reference. The following attributes apply only to FIFO topics: ArchivePolicy – Adds or updates an inline policy document to archive messages stored in the specified Amazon SNS topic. BeginningArchiveTime – The earliest starting point at which a message in the topic’s archive can be replayed from. This point in time is based on the configured message retention period set by the topic’s message archiving policy. ContentBasedDeduplication – Enables content-based deduplication for FIFO topics. By default, ContentBasedDeduplication is set to false. If you create a FIFO topic and this attribute is false, you must specify a value for the MessageDeduplicationId parameter for the Publish action. When you set ContentBasedDeduplication to true, Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). (Optional) To override the generated value, you can specify a value for the MessageDeduplicationId parameter for the Publish action. @OptionalCustomCoding> public var attributes: [String: String]? /// The body of the policy document you want to use for this topic. You can only add one policy per topic. The policy must be in JSON string format. Length Constraints: Maximum length of 30,720. @@ -541,7 +541,7 @@ extension SNS { } public struct GetSubscriptionAttributesResponse: AWSDecodableShape { - /// A map of the subscription's attributes. Attributes in this map include the following: ConfirmationWasAuthenticated – true if the subscription confirmation request was authenticated. DeliveryPolicy – The JSON serialization of the subscription's delivery policy. EffectiveDeliveryPolicy – The JSON serialization of the effective delivery policy that takes into account the topic delivery policy and account system defaults. FilterPolicy – The filter policy JSON that is assigned to the subscription. For more information, see Amazon SNS Message Filtering in the Amazon SNS Developer Guide. FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types: MessageAttributes (default) – The filter is applied on the message attributes. MessageBody – The filter is applied on the message body. Owner – The Amazon Web Services account ID of the subscription's owner. PendingConfirmation – true if the subscription hasn't been confirmed. To confirm a pending subscription, call the ConfirmSubscription action with a confirmation token. RawMessageDelivery – true if raw message delivery is enabled for the subscription. Raw messages are free of JSON formatting and can be sent to HTTP/S and Amazon SQS endpoints. RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing. SubscriptionArn – The subscription's ARN. TopicArn – The topic ARN that the subscription is associated with. The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions: SubscriptionRoleArn – The ARN of the IAM role that has the following: Permission to write to the Kinesis Data Firehose delivery stream Amazon SNS listed as a trusted entity Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide. + /// A map of the subscription's attributes. Attributes in this map include the following: ConfirmationWasAuthenticated – true if the subscription confirmation request was authenticated. DeliveryPolicy – The JSON serialization of the subscription's delivery policy. EffectiveDeliveryPolicy – The JSON serialization of the effective delivery policy that takes into account the topic delivery policy and account system defaults. FilterPolicy – The filter policy JSON that is assigned to the subscription. For more information, see Amazon SNS Message Filtering in the Amazon SNS Developer Guide. FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types: MessageAttributes (default) – The filter is applied on the message attributes. MessageBody – The filter is applied on the message body. Owner – The Amazon Web Services account ID of the subscription's owner. PendingConfirmation – true if the subscription hasn't been confirmed. To confirm a pending subscription, call the ConfirmSubscription action with a confirmation token. RawMessageDelivery – true if raw message delivery is enabled for the subscription. Raw messages are free of JSON formatting and can be sent to HTTP/S and Amazon SQS endpoints. RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing. SubscriptionArn – The subscription's ARN. TopicArn – The topic ARN that the subscription is associated with. The following attribute applies only to Amazon Data Firehose delivery stream subscriptions: SubscriptionRoleArn – The ARN of the IAM role that has the following: Permission to write to the Firehose delivery stream Amazon SNS listed as a trusted entity Specifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. For more information, see Fanout to Firehose delivery streams in the Amazon SNS Developer Guide. @OptionalCustomCoding> public var attributes: [String: String]? @@ -1099,7 +1099,7 @@ extension SNS { public let messageStructure: String? /// The phone number to which you want to deliver an SMS message. Use E.164 format. If you don't specify a value for the PhoneNumber parameter, you must specify a value for the TargetArn or TopicArn parameters. public let phoneNumber: String? - /// Optional parameter to be used as the "Subject" line when the message is delivered to email endpoints. This field will also be included, if present, in the standard JSON messages delivered to other endpoints. Constraints: Subjects must be ASCII text that begins with a letter, number, or punctuation mark; must not include line breaks or control characters; and must be less than 100 characters long. + /// Optional parameter to be used as the "Subject" line when the message is delivered to email endpoints. This field will also be included, if present, in the standard JSON messages delivered to other endpoints. Constraints: Subjects must be UTF-8 text with no line breaks or control characters, and less than 100 characters long. public let subject: String? /// If you don't specify a value for the TargetArn parameter, you must specify a value for the PhoneNumber or TopicArn parameters. public let targetArn: String? @@ -1254,7 +1254,7 @@ extension SNS { } public struct SetSubscriptionAttributesInput: AWSEncodableShape { - /// A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that this action uses: DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. FilterPolicy – The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic. FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types: MessageAttributes (default) – The filter is applied on the message attributes. MessageBody – The filter is applied on the message body. RawMessageDelivery – When set to true, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata. RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing. The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions: SubscriptionRoleArn – The ARN of the IAM role that has the following: Permission to write to the Kinesis Data Firehose delivery stream Amazon SNS listed as a trusted entity Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide. + /// A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that this action uses: DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. FilterPolicy – The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic. FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types: MessageAttributes (default) – The filter is applied on the message attributes. MessageBody – The filter is applied on the message body. RawMessageDelivery – When set to true, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata. RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing. The following attribute applies only to Amazon Data Firehose delivery stream subscriptions: SubscriptionRoleArn – The ARN of the IAM role that has the following: Permission to write to the Firehose delivery stream Amazon SNS listed as a trusted entity Specifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. For more information, see Fanout to Firehose delivery streams in the Amazon SNS Developer Guide. public let attributeName: String /// The new value for the attribute in JSON format. public let attributeValue: String? @@ -1296,7 +1296,7 @@ extension SNS { } public struct SubscribeInput: AWSEncodableShape { - /// A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that the Subscribe action uses: DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. FilterPolicy – The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic. FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types: MessageAttributes (default) – The filter is applied on the message attributes. MessageBody – The filter is applied on the message body. RawMessageDelivery – When set to true, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata. RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing. The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions: SubscriptionRoleArn – The ARN of the IAM role that has the following: Permission to write to the Kinesis Data Firehose delivery stream Amazon SNS listed as a trusted entity Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see Fanout to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide. The following attributes apply only to FIFO topics: ReplayPolicy – Adds or updates an inline policy document for a subscription to replay messages stored in the specified Amazon SNS topic. ReplayStatus – Retrieves the status of the subscription message replay, which can be one of the following: Completed – The replay has successfully redelivered all messages, and is now delivering newly published messages. If an ending point was specified in the ReplayPolicy then the subscription will no longer receive newly published messages. In progress – The replay is currently replaying the selected messages. Failed – The replay was unable to complete. Pending – The default state while the replay initiates. + /// A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that the Subscribe action uses: DeliveryPolicy – The policy that defines how Amazon SNS retries failed deliveries to HTTP/S endpoints. FilterPolicy – The simple JSON object that lets your subscriber receive only a subset of messages, rather than receiving every message published to the topic. FilterPolicyScope – This attribute lets you choose the filtering scope by using one of the following string value types: MessageAttributes (default) – The filter is applied on the message attributes. MessageBody – The filter is applied on the message body. RawMessageDelivery – When set to true, enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process JSON formatting, which is otherwise created for Amazon SNS metadata. RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing. The following attribute applies only to Amazon Data Firehose delivery stream subscriptions: SubscriptionRoleArn – The ARN of the IAM role that has the following: Permission to write to the Firehose delivery stream Amazon SNS listed as a trusted entity Specifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. For more information, see Fanout to Firehose delivery streams in the Amazon SNS Developer Guide. The following attributes apply only to FIFO topics: ReplayPolicy – Adds or updates an inline policy document for a subscription to replay messages stored in the specified Amazon SNS topic. ReplayStatus – Retrieves the status of the subscription message replay, which can be one of the following: Completed – The replay has successfully redelivered all messages, and is now delivering newly published messages. If an ending point was specified in the ReplayPolicy then the subscription will no longer receive newly published messages. In progress – The replay is currently replaying the selected messages. Failed – The replay was unable to complete. Pending – The default state while the replay initiates. @OptionalCustomCoding> public var attributes: [String: String]? /// The endpoint that you want to receive notifications. Endpoints vary by protocol: For the http protocol, the (public) endpoint is a URL beginning with http://. For the https protocol, the (public) endpoint is a URL beginning with https://. For the email protocol, the endpoint is an email address. For the email-json protocol, the endpoint is an email address. For the sms protocol, the endpoint is a phone number of an SMS-enabled device. For the sqs protocol, the endpoint is the ARN of an Amazon SQS queue. For the application protocol, the endpoint is the EndpointArn of a mobile app and device. For the lambda protocol, the endpoint is the ARN of an Lambda function. For the firehose protocol, the endpoint is the ARN of an Amazon Kinesis Data Firehose delivery stream. diff --git a/Sources/Soto/Services/SQS/SQS_api.swift b/Sources/Soto/Services/SQS/SQS_api.swift index e64d4a1916..5d88148015 100644 --- a/Sources/Soto/Services/SQS/SQS_api.swift +++ b/Sources/Soto/Services/SQS/SQS_api.swift @@ -92,7 +92,9 @@ public struct SQS: AWSService { // MARK: API Calls - /// Adds a permission to a queue for a specific principal. This allows sharing access to the queue. When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon SQS Developer Guide. AddPermission generates a policy for you. You can use SetQueueAttributes to upload your policy. For more information, see Using Custom Policies with the Amazon SQS Access Policy Language in the Amazon SQS Developer Guide. An Amazon SQS policy can have a maximum of seven actions per statement. To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy. Amazon SQS AddPermission does not support adding a non-account principal. Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide. + /// Adds a permission to a queue for a specific principal. This allows sharing access to the queue. When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon SQS Developer Guide. AddPermission generates a policy for you. You can use SetQueueAttributes to upload your policy. For more information, see Using Custom Policies with the Amazon SQS Access Policy Language in the Amazon SQS Developer Guide. An Amazon SQS policy can have a maximum of seven actions per statement. To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy. Amazon SQS AddPermission does not support adding a non-account principal. Cross-account permissions don't apply to this action. For more information, + /// see Grant + /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. @Sendable public func addPermission(_ input: AddPermissionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -105,7 +107,7 @@ public struct SQS: AWSService { ) } - /// Cancels a specified message movement task. A message movement can only be cancelled when the current status is RUNNING. Cancelling a message movement task does not revert the messages that have already been moved. It can only stop the messages that have not been moved yet. This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue. Currently, only standard queues are supported. Only one active message movement task is supported per queue at any given time. + /// Cancels a specified message movement task. A message movement can only be cancelled when the current status is RUNNING. Cancelling a message movement task does not revert the messages that have already been moved. It can only stop the messages that have not been moved yet. This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue. Only one active message movement task is supported per queue at any given time. @Sendable public func cancelMessageMoveTask(_ input: CancelMessageMoveTaskRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CancelMessageMoveTaskResult { return try await self.client.execute( @@ -118,7 +120,7 @@ public struct SQS: AWSService { ) } - /// Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. For example, if the default timeout for a queue is 60 seconds, 15 seconds have elapsed since you received the message, and you send a ChangeMessageVisibility call with VisibilityTimeout set to 10 seconds, the 10 seconds begin to count from the time that you make the ChangeMessageVisibility call. Thus, any attempt to change the visibility timeout or to delete that message 10 seconds after you initially change the visibility timeout (a total of 25 seconds) might result in an error. An Amazon SQS message has three basic states: Sent to a queue by a producer. Received from the queue by a consumer. Deleted from the queue. A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of in flight messages. Limits that apply to in flight messages are unrelated to the unlimited number of stored messages. For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request. For FIFO queues, there can be a maximum of 20,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages. If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time. Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received. + /// Changes the visibility timeout of a specified message in a queue to a new value. The default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. For example, if the default timeout for a queue is 60 seconds, 15 seconds have elapsed since you received the message, and you send a ChangeMessageVisibility call with VisibilityTimeout set to 10 seconds, the 10 seconds begin to count from the time that you make the ChangeMessageVisibility call. Thus, any attempt to change the visibility timeout or to delete that message 10 seconds after you initially change the visibility timeout (a total of 25 seconds) might result in an error. An Amazon SQS message has three basic states: Sent to a queue by a producer. Received from the queue by a consumer. Deleted from the queue. A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages. A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of in flight messages. Limits that apply to in flight messages are unrelated to the unlimited number of stored messages. For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages. To request a limit increase, file a support request. For FIFO queues, there can be a maximum of 20,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages. If you attempt to set the VisibilityTimeout to a value greater than the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically recalculate and increase the timeout to the maximum remaining time. Unlike with a queue, when you change the visibility timeout for a specific message the timeout value is applied immediately but isn't saved in memory for that message. If you don't delete a message after it is received, the visibility timeout for the message reverts to the original timeout value (not to the value you set using the ChangeMessageVisibility action) the next time the message is received. @Sendable public func changeMessageVisibility(_ input: ChangeMessageVisibilityRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -144,7 +146,9 @@ public struct SQS: AWSService { ) } - /// Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following in mind: If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon SQS Developer Guide. If you don't provide a value for an attribute, the queue is created with the default value for the attribute. If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name. To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues. After you create a queue, you must wait at least one second after the queue is created to be able to use the queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names: If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue. If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error. Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide. + /// Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following in mind: If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the Amazon SQS Developer Guide. If you don't provide a value for an attribute, the queue is created with the default value for the attribute. If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name. To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues. After you create a queue, you must wait at least one second after the queue is created to be able to use the queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only the QueueName parameter. be aware of existing queue names: If you provide the name of an existing queue along with the exact names and values of all the queue's attributes, CreateQueue returns the queue URL for the existing queue. If the queue name, attribute names, or attribute values don't match an existing queue, CreateQueue returns an error. Cross-account permissions don't apply to this action. For more information, + /// see Grant + /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. @Sendable public func createQueue(_ input: CreateQueueRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateQueueResult { return try await self.client.execute( @@ -183,7 +187,9 @@ public struct SQS: AWSService { ) } - /// Deletes the queue specified by the QueueUrl, regardless of the queue's contents. Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available. When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist. When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name. Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide. The delete operation uses the HTTP GET verb. + /// Deletes the queue specified by the QueueUrl, regardless of the queue's contents. Be careful with the DeleteQueue action: When you delete a queue, any messages in the queue are no longer available. When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a SendMessage request might succeed, but after 60 seconds the queue and the message you sent no longer exist. When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name. Cross-account permissions don't apply to this action. For more information, + /// see Grant + /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. The delete operation uses the HTTP GET verb. @Sendable public func deleteQueue(_ input: DeleteQueueRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -235,7 +241,7 @@ public struct SQS: AWSService { ) } - /// Gets the most recent message movement tasks (up to 10) under a specific source queue. This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue. Currently, only standard queues are supported. Only one active message movement task is supported per queue at any given time. + /// Gets the most recent message movement tasks (up to 10) under a specific source queue. This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source queue is the dead-letter queue (DLQ), while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue. Only one active message movement task is supported per queue at any given time. @Sendable public func listMessageMoveTasks(_ input: ListMessageMoveTasksRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListMessageMoveTasksResult { return try await self.client.execute( @@ -248,7 +254,10 @@ public struct SQS: AWSService { ) } - /// List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide. Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide. + /// List all cost allocation tags added to the specified Amazon SQS queue. For an overview, see Tagging + /// Your Amazon SQS Queues in the Amazon SQS Developer Guide. Cross-account permissions don't apply to this action. For more information, + /// see Grant + /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. @Sendable public func listQueueTags(_ input: ListQueueTagsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListQueueTagsResult { return try await self.client.execute( @@ -261,7 +270,9 @@ public struct SQS: AWSService { ) } - /// Returns a list of your queues in the current region. The response includes a maximum of 1,000 results. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned. The listQueues methods supports pagination. Set parameter MaxResults in the request to specify the maximum number of results to be returned in the response. If you do not set MaxResults, the response includes a maximum of 1,000 results. If you set MaxResults and there are additional results to display, the response includes a value for NextToken. Use NextToken as a parameter in your next request to listQueues to receive the next page of results. Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide. + /// Returns a list of your queues in the current region. The response includes a maximum of 1,000 results. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned. The listQueues methods supports pagination. Set parameter MaxResults in the request to specify the maximum number of results to be returned in the response. If you do not set MaxResults, the response includes a maximum of 1,000 results. If you set MaxResults and there are additional results to display, the response includes a value for NextToken. Use NextToken as a parameter in your next request to listQueues to receive the next page of results. Cross-account permissions don't apply to this action. For more information, + /// see Grant + /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. @Sendable public func listQueues(_ input: ListQueuesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListQueuesResult { return try await self.client.execute( @@ -300,7 +311,9 @@ public struct SQS: AWSService { ) } - /// Revokes any permissions in the queue policy that matches the specified Label parameter. Only the owner of a queue can remove permissions from it. Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide. To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy. + /// Revokes any permissions in the queue policy that matches the specified Label parameter. Only the owner of a queue can remove permissions from it. Cross-account permissions don't apply to this action. For more information, + /// see Grant + /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy. @Sendable public func removePermission(_ input: RemovePermissionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -313,7 +326,7 @@ public struct SQS: AWSService { ) } - /// Delivers a message to the specified queue. A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed: #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF Any characters not included in this list will be rejected. For more information, see the W3C specification for characters. + /// Delivers a message to the specified queue. A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed. For more information, see the W3C specification for characters. #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF Amazon SQS does not throw an exception or completely reject the message if it contains invalid characters. Instead, it replaces those invalid characters with U+FFFD before storing the message in the queue, as long as the message body contains at least one valid character. @Sendable public func sendMessage(_ input: SendMessageRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SendMessageResult { return try await self.client.execute( @@ -326,7 +339,7 @@ public struct SQS: AWSService { ) } - /// You can use SendMessageBatch to send up to 10 messages to the specified queue by assigning either identical or different values to each message (or by not assigning values at all). This is a batch version of SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent. The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200. The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KiB (262,144 bytes). A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed: #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF Any characters not included in this list will be rejected. For more information, see the W3C specification for characters. If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue. + /// You can use SendMessageBatch to send up to 10 messages to the specified queue by assigning either identical or different values to each message (or by not assigning values at all). This is a batch version of SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent. The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200. The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KiB (262,144 bytes). A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed. For more information, see the W3C specification for characters. #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF Amazon SQS does not throw an exception or completely reject the message if it contains invalid characters. Instead, it replaces those invalid characters with U+FFFD before storing the message in the queue, as long as the message body contains at least one valid character. If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue. @Sendable public func sendMessageBatch(_ input: SendMessageBatchRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SendMessageBatchResult { return try await self.client.execute( @@ -339,7 +352,9 @@ public struct SQS: AWSService { ) } - /// Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages. In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully. Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide. To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy. + /// Sets the value of one or more queue attributes, like a policy. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages. In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully. Cross-account permissions don't apply to this action. For more information, + /// see Grant + /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy. @Sendable public func setQueueAttributes(_ input: SetQueueAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -352,7 +367,7 @@ public struct SQS: AWSService { ) } - /// Starts an asynchronous task to move messages from a specified source queue to a specified destination queue. This action is currently limited to supporting message redrive from queues that are configured as dead-letter queues (DLQs) of other Amazon SQS queues only. Non-SQS queue sources of dead-letter queues, such as Lambda or Amazon SNS topics, are currently not supported. In dead-letter queues redrive context, the StartMessageMoveTask the source queue is the DLQ, while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue. Currently, only standard queues support redrive. FIFO queues don't support redrive. Only one active message movement task is supported per queue at any given time. + /// Starts an asynchronous task to move messages from a specified source queue to a specified destination queue. This action is currently limited to supporting message redrive from queues that are configured as dead-letter queues (DLQs) of other Amazon SQS queues only. Non-SQS queue sources of dead-letter queues, such as Lambda or Amazon SNS topics, are currently not supported. In dead-letter queues redrive context, the StartMessageMoveTask the source queue is the DLQ, while the destination queue can be the original source queue (from which the messages were driven to the dead-letter-queue), or a custom destination queue. Only one active message movement task is supported per queue at any given time. @Sendable public func startMessageMoveTask(_ input: StartMessageMoveTaskRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartMessageMoveTaskResult { return try await self.client.execute( @@ -365,7 +380,12 @@ public struct SQS: AWSService { ) } - /// Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide. When you use queue tags, keep the following guidelines in mind: Adding more than 50 tags to a queue isn't recommended. Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings. Tags are case-sensitive. A new tag with a key identical to that of an existing tag overwrites the existing tag. For a full list of tag restrictions, see Quotas related to queues in the Amazon SQS Developer Guide. Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide. + /// Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging + /// Your Amazon SQS Queues in the Amazon SQS Developer Guide. When you use queue tags, keep the following guidelines in mind: Adding more than 50 tags to a queue isn't recommended. Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings. Tags are case-sensitive. A new tag with a key identical to that of an existing tag overwrites the existing tag. For a full list of tag restrictions, see + /// Quotas related to queues + /// in the Amazon SQS Developer Guide. Cross-account permissions don't apply to this action. For more information, + /// see Grant + /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. @Sendable public func tagQueue(_ input: TagQueueRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -378,7 +398,10 @@ public struct SQS: AWSService { ) } - /// Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide. Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide. + /// Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging + /// Your Amazon SQS Queues in the Amazon SQS Developer Guide. Cross-account permissions don't apply to this action. For more information, + /// see Grant + /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. @Sendable public func untagQueue(_ input: UntagQueueRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -424,7 +447,9 @@ extension SQS { ) } - /// Returns a list of your queues in the current region. The response includes a maximum of 1,000 results. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned. The listQueues methods supports pagination. Set parameter MaxResults in the request to specify the maximum number of results to be returned in the response. If you do not set MaxResults, the response includes a maximum of 1,000 results. If you set MaxResults and there are additional results to display, the response includes a value for NextToken. Use NextToken as a parameter in your next request to listQueues to receive the next page of results. Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide. + /// Returns a list of your queues in the current region. The response includes a maximum of 1,000 results. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned. The listQueues methods supports pagination. Set parameter MaxResults in the request to specify the maximum number of results to be returned in the response. If you do not set MaxResults, the response includes a maximum of 1,000 results. If you set MaxResults and there are additional results to display, the response includes a value for NextToken. Use NextToken as a parameter in your next request to listQueues to receive the next page of results. Cross-account permissions don't apply to this action. For more information, + /// see Grant + /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/SQS/SQS_shapes.swift b/Sources/Soto/Services/SQS/SQS_shapes.swift index 0ca96f61d3..d50f2cf82b 100644 --- a/Sources/Soto/Services/SQS/SQS_shapes.swift +++ b/Sources/Soto/Services/SQS/SQS_shapes.swift @@ -27,6 +27,7 @@ extension SQS { // MARK: Enums public enum MessageSystemAttributeName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case all = "All" case approximateFirstReceiveTimestamp = "ApproximateFirstReceiveTimestamp" case approximateReceiveCount = "ApproximateReceiveCount" case awsTraceHeader = "AWSTraceHeader" @@ -238,11 +239,18 @@ extension SQS { } public struct CreateQueueRequest: AWSEncodableShape { - /// A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses: DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). Default: 0. MaximumMessageSize – The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). MessageRetentionPeriod – The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days). When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages. Policy – The queue's policy. A valid Amazon Web Services policy. For more information about policy structure, see Overview of Amazon Web Services IAM Policies in the IAM User Guide. ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0. VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide. The following attributes apply only to dead-letter queues: RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows: deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue. RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows: redrivePermission – The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are: allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can specify this queue as the dead-letter queue. denyAll – No source queues can specify this queue as the dead-letter queue. byQueue – Only queues specified by the sourceQueueArns parameter can specify this queue as the dead-letter queue. sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter to allowAll. The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue. The following attributes apply only to server-side-encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the Amazon Web Services managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the Key Management Service API Reference. KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work? SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). The following attributes apply only to FIFO (first-in-first-out) queues: FifoQueue – Designates a queue as FIFO. Valid values are true and false. If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly. For more information, see FIFO queue logic in the Amazon SQS Developer Guide. ContentBasedDeduplication – Enables content-based deduplication. Valid values are true and false. For more information, see Exactly-once processing in the Amazon SQS Developer Guide. Note the following: Every message must have a unique MessageDeduplicationId. You may provide a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error. If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered. If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered. The following attributes apply only to high throughput for FIFO queues: DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue. FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup. To enable high throughput for FIFO queues, do the following: Set DeduplicationScope to messageGroup. Set FifoThroughputLimit to perMessageGroupId. If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified. For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide. + /// A map of attributes with their corresponding values. The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses: DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). Default: 0. MaximumMessageSize – The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). MessageRetentionPeriod – The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days). When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages. Policy – The queue's policy. A valid Amazon Web Services policy. For more information about policy structure, see Overview of Amazon Web Services IAM Policies in the IAM User Guide. ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0. VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide. The following attributes apply only to dead-letter queues: RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows: deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue. RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows: redrivePermission – The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are: allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can specify this queue as the dead-letter queue. denyAll – No source queues can specify this queue as the dead-letter queue. byQueue – Only queues specified by the sourceQueueArns parameter can specify this queue as the dead-letter queue. sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter to allowAll. The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue. The following attributes apply only to server-side-encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the Amazon Web Services managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the Key Management Service API Reference. KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work? SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). The following attributes apply only to FIFO (first-in-first-out) queues: FifoQueue – Designates a queue as FIFO. Valid values are true and false. If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly. For more information, see FIFO queue logic in the Amazon SQS Developer Guide. ContentBasedDeduplication – Enables content-based deduplication. Valid values are true and false. For more information, see Exactly-once processing in the Amazon SQS Developer Guide. Note the following: Every message must have a unique MessageDeduplicationId. You may provide a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error. If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered. If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered. The following attributes apply only to + /// high throughput + /// for FIFO queues: DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue. FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup. To enable high throughput for FIFO queues, do the following: Set DeduplicationScope to messageGroup. Set FifoThroughputLimit to perMessageGroupId. If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified. For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide. public let attributes: [QueueAttributeName: String]? /// The name of the new queue. The following limits apply to this name: A queue name can have up to 80 characters. Valid values: alphanumeric characters, hyphens (-), and underscores (_). A FIFO queue name must end with the .fifo suffix. Queue URLs and names are case-sensitive. public let queueName: String - /// Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging Your Amazon SQS Queues in the Amazon SQS Developer Guide. When you use queue tags, keep the following guidelines in mind: Adding more than 50 tags to a queue isn't recommended. Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings. Tags are case-sensitive. A new tag with a key identical to that of an existing tag overwrites the existing tag. For a full list of tag restrictions, see Quotas related to queues in the Amazon SQS Developer Guide. To be able to tag a queue on creation, you must have the sqs:CreateQueue and sqs:TagQueue permissions. Cross-account permissions don't apply to this action. For more information, see Grant cross-account permissions to a role and a username in the Amazon SQS Developer Guide. + /// Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging + /// Your Amazon SQS Queues in the Amazon SQS Developer Guide. When you use queue tags, keep the following guidelines in mind: Adding more than 50 tags to a queue isn't recommended. Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings. Tags are case-sensitive. A new tag with a key identical to that of an existing tag overwrites the existing tag. For a full list of tag restrictions, see + /// Quotas related to queues + /// in the Amazon SQS Developer Guide. To be able to tag a queue on creation, you must have the sqs:CreateQueue and sqs:TagQueue permissions. Cross-account permissions don't apply to this action. For more information, + /// see Grant + /// cross-account permissions to a role and a username in the Amazon SQS Developer Guide. public let tags: [String: String]? public init(attributes: [QueueAttributeName: String]? = nil, queueName: String, tags: [String: String]? = nil) { @@ -366,7 +374,9 @@ extension SQS { } public struct GetQueueAttributesRequest: AWSEncodableShape { - /// A list of attributes for which to retrieve information. The AttributeNames parameter is optional, but if you don't specify values for this parameter, the request returns empty results. In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully. The following attributes are supported: The ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, and ApproximateNumberOfMessages metrics may not achieve consistency until at least 1 minute after the producers stop sending messages. This period is required for the queue metadata to reach eventual consistency. All – Returns all values. ApproximateNumberOfMessages – Returns the approximate number of messages available for retrieval from the queue. ApproximateNumberOfMessagesDelayed – Returns the approximate number of messages in the queue that are delayed and not available for reading immediately. This can happen when the queue is configured as a delay queue or when a message has been sent with a delay parameter. ApproximateNumberOfMessagesNotVisible – Returns the approximate number of messages that are in flight. Messages are considered to be in flight if they have been sent to a client but have not yet been deleted or have not yet reached the end of their visibility window. CreatedTimestamp – Returns the time when the queue was created in seconds (epoch time). DelaySeconds – Returns the default delay on the queue in seconds. LastModifiedTimestamp – Returns the time when the queue was last changed in seconds (epoch time). MaximumMessageSize – Returns the limit of how many bytes a message can contain before Amazon SQS rejects it. MessageRetentionPeriod – Returns the length of time, in seconds, for which Amazon SQS retains a message. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages. Policy – Returns the policy of the queue. QueueArn – Returns the Amazon resource name (ARN) of the queue. ReceiveMessageWaitTimeSeconds – Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive. VisibilityTimeout – Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide. The following attributes apply only to dead-letter queues: RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows: deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue. RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows: redrivePermission – The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are: allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can specify this queue as the dead-letter queue. denyAll – No source queues can specify this queue as the dead-letter queue. byQueue – Only queues specified by the sourceQueueArns parameter can specify this queue as the dead-letter queue. sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter to allowAll. The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue. The following attributes apply only to server-side-encryption: KmsMasterKeyId – Returns the ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. For more information, see How Does the Data Key Reuse Period Work?. SqsManagedSseEnabled – Returns information about whether the queue is using SSE-SQS encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). The following attributes apply only to FIFO (first-in-first-out) queues: FifoQueue – Returns information about whether the queue is FIFO. For more information, see FIFO queue logic in the Amazon SQS Developer Guide. To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix. ContentBasedDeduplication – Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-once processing in the Amazon SQS Developer Guide. The following attributes apply only to high throughput for FIFO queues: DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue. FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup. To enable high throughput for FIFO queues, do the following: Set DeduplicationScope to messageGroup. Set FifoThroughputLimit to perMessageGroupId. If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified. For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide. + /// A list of attributes for which to retrieve information. The AttributeNames parameter is optional, but if you don't specify values for this parameter, the request returns empty results. In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully. The following attributes are supported: The ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, and ApproximateNumberOfMessages metrics may not achieve consistency until at least 1 minute after the producers stop sending messages. This period is required for the queue metadata to reach eventual consistency. All – Returns all values. ApproximateNumberOfMessages – Returns the approximate number of messages available for retrieval from the queue. ApproximateNumberOfMessagesDelayed – Returns the approximate number of messages in the queue that are delayed and not available for reading immediately. This can happen when the queue is configured as a delay queue or when a message has been sent with a delay parameter. ApproximateNumberOfMessagesNotVisible – Returns the approximate number of messages that are in flight. Messages are considered to be in flight if they have been sent to a client but have not yet been deleted or have not yet reached the end of their visibility window. CreatedTimestamp – Returns the time when the queue was created in seconds (epoch time). DelaySeconds – Returns the default delay on the queue in seconds. LastModifiedTimestamp – Returns the time when the queue was last changed in seconds (epoch time). MaximumMessageSize – Returns the limit of how many bytes a message can contain before Amazon SQS rejects it. MessageRetentionPeriod – Returns the length of time, in seconds, for which Amazon SQS retains a message. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages. Policy – Returns the policy of the queue. QueueArn – Returns the Amazon resource name (ARN) of the queue. ReceiveMessageWaitTimeSeconds – Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive. VisibilityTimeout – Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide. The following attributes apply only to dead-letter queues: RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows: deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue. RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows: redrivePermission – The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are: allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can specify this queue as the dead-letter queue. denyAll – No source queues can specify this queue as the dead-letter queue. byQueue – Only queues specified by the sourceQueueArns parameter can specify this queue as the dead-letter queue. sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter to allowAll. The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue. The following attributes apply only to server-side-encryption: KmsMasterKeyId – Returns the ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. For more information, see How Does the Data Key Reuse Period Work?. SqsManagedSseEnabled – Returns information about whether the queue is using SSE-SQS encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). The following attributes apply only to FIFO (first-in-first-out) queues: FifoQueue – Returns information about whether the queue is FIFO. For more information, see FIFO queue logic in the Amazon SQS Developer Guide. To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix. ContentBasedDeduplication – Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-once processing in the Amazon SQS Developer Guide. The following attributes apply only to + /// high throughput + /// for FIFO queues: DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue. FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup. To enable high throughput for FIFO queues, do the following: Set DeduplicationScope to messageGroup. Set FifoThroughputLimit to perMessageGroupId. If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified. For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide. public let attributeNames: [QueueAttributeName]? /// The URL of the Amazon SQS queue whose attribute information is retrieved. Queue URLs and names are case-sensitive. public let queueUrl: String @@ -496,7 +506,7 @@ extension SQS { public struct ListMessageMoveTasksResultEntry: AWSDecodableShape { /// The approximate number of messages already moved to the destination queue. public let approximateNumberOfMessagesMoved: Int64? - /// The number of messages to be moved from the source queue. This number is obtained at the time of starting the message movement task. + /// The number of messages to be moved from the source queue. This number is obtained at the time of starting the message movement task and is only included after the message movement task is selected to start. public let approximateNumberOfMessagesToMove: Int64? /// The ARN of the destination queue if it has been specified in the StartMessageMoveTask request. If a DestinationArn has not been specified in the StartMessageMoveTask request, this field value will be NULL. public let destinationArn: String? @@ -611,7 +621,10 @@ extension SQS { public let md5OfBody: String? /// An MD5 digest of the non-URL-encoded message attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321. public let md5OfMessageAttributes: String? - /// Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS message attributes in the Amazon SQS Developer Guide. + /// Each message attribute consists of a Name, Type, + /// and Value. For more information, see + /// Amazon SQS + /// message attributes in the Amazon SQS Developer Guide. public let messageAttributes: [String: MessageAttributeValue]? /// A unique identifier for the message. A MessageIdis considered unique across all Amazon Web Services accounts for an extended period of time. public let messageId: String? @@ -711,25 +724,40 @@ extension SQS { } public struct ReceiveMessageRequest: AWSEncodableShape { - /// A list of attributes that need to be returned along with each message. These attributes include: All – Returns all values. ApproximateFirstReceiveTimestamp – Returns the time the message was first received from the queue (epoch time in milliseconds). ApproximateReceiveCount – Returns the number of times a message has been received across all queues but not deleted. AWSTraceHeader – Returns the X-Ray trace header string. SenderId For a user, returns the user ID, for example ABCDEFGHI1JKLMNOPQ23R. For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. SentTimestamp – Returns the time the message was sent to the queue (epoch time in milliseconds). SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). MessageDeduplicationId – Returns the value provided by the producer that calls the SendMessage action. MessageGroupId – Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence. SequenceNumber – Returns the value provided by Amazon SQS. + /// This parameter has been deprecated but will be supported for backward compatibility. To provide attribute names, you are encouraged to use MessageSystemAttributeNames. A list of attributes that need to be returned along with each message. These attributes include: All – Returns all values. ApproximateFirstReceiveTimestamp – Returns the time the message was first received from the queue (epoch time in milliseconds). ApproximateReceiveCount – Returns the number of times a message has been received across all queues but not deleted. AWSTraceHeader – Returns the X-Ray trace header string. SenderId For a user, returns the user ID, for example ABCDEFGHI1JKLMNOPQ23R. For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. SentTimestamp – Returns the time the message was sent to the queue (epoch time in milliseconds). SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). MessageDeduplicationId – Returns the value provided by the producer that calls the SendMessage action. MessageGroupId – Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence. SequenceNumber – Returns the value provided by Amazon SQS. public let attributeNames: [QueueAttributeName]? /// The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values: 1 to 10. Default: 1. public let maxNumberOfMessages: Int? /// The name of the message attribute, where N is the index. The name can contain alphanumeric characters and the underscore (_), hyphen (-), and period (.). The name is case-sensitive and must be unique among all attribute names for the message. The name must not start with AWS-reserved prefixes such as AWS. or Amazon. (or any casing variants). The name must not start or end with a period (.), and it should not have periods in succession (..). The name can be up to 256 characters long. When using ReceiveMessage, you can send a list of attribute names to receive, or you can return all of the attributes by specifying All or .* in your request. You can also use all message attributes starting with a prefix, for example bar.*. public let messageAttributeNames: [String]? + /// A list of attributes that need to be returned along with each message. These attributes include: All – Returns all values. ApproximateFirstReceiveTimestamp – Returns the time the message was first received from the queue (epoch time in milliseconds). ApproximateReceiveCount – Returns the number of times a message has been received across all queues but not deleted. AWSTraceHeader – Returns the X-Ray trace header string. SenderId For a user, returns the user ID, for example ABCDEFGHI1JKLMNOPQ23R. For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. SentTimestamp – Returns the time the message was sent to the queue (epoch time in milliseconds). SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). MessageDeduplicationId – Returns the value provided by the producer that calls the SendMessage action. MessageGroupId – Returns the value provided by the producer that calls the SendMessage action. Messages with the same MessageGroupId are returned in sequence. SequenceNumber – Returns the value provided by Amazon SQS. + public let messageSystemAttributeNames: [MessageSystemAttributeName]? /// The URL of the Amazon SQS queue from which messages are received. Queue URLs and names are case-sensitive. public let queueUrl: String - /// This parameter applies only to FIFO (first-in-first-out) queues. The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, it is possible to retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired. You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action. When you set FifoQueue, a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly. If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId, Amazon SQS generates a ReceiveRequestAttemptId. It is possible to retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes). During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. If a caller of the ReceiveMessage action still processes messages when the visibility timeout expires and messages become visible, another worker consuming from the same queue can receive the same messages and therefore process duplicates. Also, if a consumer whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error. To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary. While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible. If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order. The maximum length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon SQS Developer Guide. + /// This parameter applies only to FIFO (first-in-first-out) queues. The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, it is possible to retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired. You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action. When you set FifoQueue, a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly. It is possible to retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes). During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon SQS Developer Guide. If a caller of the ReceiveMessage action still processes messages when the visibility timeout expires and messages become visible, another worker consuming from the same queue can receive the same messages and therefore process duplicates. Also, if a consumer whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error. To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary. While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible. If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId, no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order. The maximum length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon SQS Developer Guide. public let receiveRequestAttemptId: String? /// The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request. public let visibilityTimeout: Int? - /// The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds. If no messages are available and the wait time expires, the call returns successfully with an empty list of messages. To avoid HTTP errors, ensure that the HTTP response timeout for ReceiveMessage requests is longer than the WaitTimeSeconds parameter. For example, with the Java SDK, you can set HTTP transport settings using the NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous clients. + /// The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds. If no messages are available and the wait time expires, the call does not return a message list. To avoid HTTP errors, ensure that the HTTP response timeout for ReceiveMessage requests is longer than the WaitTimeSeconds parameter. For example, with the Java SDK, you can set HTTP transport settings using the NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous clients. public let waitTimeSeconds: Int? - public init(attributeNames: [QueueAttributeName]? = nil, maxNumberOfMessages: Int? = nil, messageAttributeNames: [String]? = nil, queueUrl: String, receiveRequestAttemptId: String? = nil, visibilityTimeout: Int? = nil, waitTimeSeconds: Int? = nil) { + public init(maxNumberOfMessages: Int? = nil, messageAttributeNames: [String]? = nil, messageSystemAttributeNames: [MessageSystemAttributeName]? = nil, queueUrl: String, receiveRequestAttemptId: String? = nil, visibilityTimeout: Int? = nil, waitTimeSeconds: Int? = nil) { + self.attributeNames = nil + self.maxNumberOfMessages = maxNumberOfMessages + self.messageAttributeNames = messageAttributeNames + self.messageSystemAttributeNames = messageSystemAttributeNames + self.queueUrl = queueUrl + self.receiveRequestAttemptId = receiveRequestAttemptId + self.visibilityTimeout = visibilityTimeout + self.waitTimeSeconds = waitTimeSeconds + } + + @available(*, deprecated, message: "Members attributeNames have been deprecated") + public init(attributeNames: [QueueAttributeName]? = nil, maxNumberOfMessages: Int? = nil, messageAttributeNames: [String]? = nil, messageSystemAttributeNames: [MessageSystemAttributeName]? = nil, queueUrl: String, receiveRequestAttemptId: String? = nil, visibilityTimeout: Int? = nil, waitTimeSeconds: Int? = nil) { self.attributeNames = attributeNames self.maxNumberOfMessages = maxNumberOfMessages self.messageAttributeNames = messageAttributeNames + self.messageSystemAttributeNames = messageSystemAttributeNames self.queueUrl = queueUrl self.receiveRequestAttemptId = receiveRequestAttemptId self.visibilityTimeout = visibilityTimeout @@ -740,6 +768,7 @@ extension SQS { case attributeNames = "AttributeNames" case maxNumberOfMessages = "MaxNumberOfMessages" case messageAttributeNames = "MessageAttributeNames" + case messageSystemAttributeNames = "MessageSystemAttributeNames" case queueUrl = "QueueUrl" case receiveRequestAttemptId = "ReceiveRequestAttemptId" case visibilityTimeout = "VisibilityTimeout" @@ -799,7 +828,10 @@ extension SQS { public let delaySeconds: Int? /// An identifier for a message in this batch used to communicate the result. The Ids of a batch request need to be unique within a request. This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_). public let id: String - /// Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS message attributes in the Amazon SQS Developer Guide. + /// Each message attribute consists of a Name, Type, + /// and Value. For more information, see + /// Amazon SQS + /// message attributes in the Amazon SQS Developer Guide. public let messageAttributes: [String: MessageAttributeValue]? /// The body of the message. public let messageBody: String @@ -855,7 +887,8 @@ extension SQS { public let md5OfMessageAttributes: String? /// An MD5 digest of the non-URL-encoded message body string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321. public let md5OfMessageBody: String - /// An MD5 digest of the non-URL-encoded message system attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321. + /// An MD5 digest of the non-URL-encoded message system attribute string. You can use this + /// attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321. public let md5OfMessageSystemAttributes: String? /// An identifier for the message. public let messageId: String @@ -884,13 +917,16 @@ extension SQS { public struct SendMessageRequest: AWSEncodableShape { /// The length of time, in seconds, for which to delay a specific message. Valid values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds value become available for processing after the delay period is finished. If you don't specify a value, the default value for the queue applies. When you set FifoQueue, you can't set DelaySeconds per message. You can set this parameter only on a queue level. public let delaySeconds: Int? - /// Each message attribute consists of a Name, Type, and Value. For more information, see Amazon SQS message attributes in the Amazon SQS Developer Guide. + /// Each message attribute consists of a Name, Type, + /// and Value. For more information, see + /// Amazon SQS + /// message attributes in the Amazon SQS Developer Guide. public let messageAttributes: [String: MessageAttributeValue]? - /// The message to send. The minimum size is one character. The maximum size is 256 KiB. A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed: #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF Any characters not included in this list will be rejected. For more information, see the W3C specification for characters. + /// The message to send. The minimum size is one character. The maximum size is 256 KiB. A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed. For more information, see the W3C specification for characters. #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF Amazon SQS does not throw an exception or completely reject the message if it contains invalid characters. Instead, it replaces those invalid characters with U+FFFD before storing the message in the queue, as long as the message body contains at least one valid character. public let messageBody: String /// This parameter applies only to FIFO (first-in-first-out) queues. The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-once processing in the Amazon SQS Developer Guide. Every message must have a unique MessageDeduplicationId, You may provide a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error. If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered. If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered. The MessageDeduplicationId is available to the consumer of the message (this can be useful for troubleshooting delivery issues). If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages. Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted. The maximum length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon SQS Developer Guide. public let messageDeduplicationId: String? - /// This parameter applies only to FIFO (first-in-first-out) queues. The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion. You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails. ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId, the messages are sorted by time sent. The caller can't specify a MessageGroupId. The length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon SQS Developer Guide. MessageGroupId is required for FIFO queues. You can't use it for Standard queues. + /// This parameter applies only to FIFO (first-in-first-out) queues. The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple consumers can process the queue, but the session data of each user is processed in a FIFO fashion. You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId, the action fails. ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId, the messages are sorted by time sent. The caller can't specify a MessageGroupId. The maximum length of MessageGroupId is 128 characters. Valid values: alphanumeric characters and punctuation (!"#$%&'()*+,-./:;?@[\]^_`{|}~). For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon SQS Developer Guide. MessageGroupId is required for FIFO queues. You can't use it for Standard queues. public let messageGroupId: String? /// The message system attribute to send. Each message system attribute consists of a Name, Type, and Value. Currently, the only supported message system attribute is AWSTraceHeader. Its type must be String and its value must be a correctly formatted X-Ray trace header string. The size of a message system attribute doesn't count towards the total size of a message. public let messageSystemAttributes: [MessageSystemAttributeNameForSends: MessageSystemAttributeValue]? @@ -923,7 +959,8 @@ extension SQS { public let md5OfMessageAttributes: String? /// An MD5 digest of the non-URL-encoded message body string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321. public let md5OfMessageBody: String? - /// An MD5 digest of the non-URL-encoded message system attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. + /// An MD5 digest of the non-URL-encoded message system attribute string. You can use this + /// attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. public let md5OfMessageSystemAttributes: String? /// An attribute containing the MessageId of the message sent to the queue. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide. public let messageId: String? @@ -948,7 +985,9 @@ extension SQS { } public struct SetQueueAttributesRequest: AWSEncodableShape { - /// A map of attributes to set. The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses: DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). Default: 0. MaximumMessageSize – The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). MessageRetentionPeriod – The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages. Policy – The queue's policy. A valid Amazon Web Services policy. For more information about policy structure, see Overview of Amazon Web Services IAM Policies in the Identity and Access Management User Guide. ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0. VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide. The following attributes apply only to dead-letter queues: RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows: deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue. RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows: redrivePermission – The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are: allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can specify this queue as the dead-letter queue. denyAll – No source queues can specify this queue as the dead-letter queue. byQueue – Only queues specified by the sourceQueueArns parameter can specify this queue as the dead-letter queue. sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter to allowAll. The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue. The following attributes apply only to server-side-encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the Key Management Service API Reference. KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?. SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). The following attribute applies only to FIFO (first-in-first-out) queues: ContentBasedDeduplication – Enables content-based deduplication. For more information, see Exactly-once processing in the Amazon SQS Developer Guide. Note the following: Every message must have a unique MessageDeduplicationId. You may provide a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error. If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered. If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered. The following attributes apply only to high throughput for FIFO queues: DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue. FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup. To enable high throughput for FIFO queues, do the following: Set DeduplicationScope to messageGroup. Set FifoThroughputLimit to perMessageGroupId. If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified. For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide. + /// A map of attributes to set. The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses: DelaySeconds – The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). Default: 0. MaximumMessageSize – The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). MessageRetentionPeriod – The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes and will impact existing messages in the queue potentially causing them to be expired and deleted if the MessageRetentionPeriod is reduced below the age of existing messages. Policy – The queue's policy. A valid Amazon Web Services policy. For more information about policy structure, see Overview of Amazon Web Services IAM Policies in the Identity and Access Management User Guide. ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for which a ReceiveMessage action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0. VisibilityTimeout – The visibility timeout for the queue, in seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide. The following attributes apply only to dead-letter queues: RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows: deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount – The number of times a message is delivered to the source queue before being moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon SQS moves the message to the dead-letter-queue. RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows: redrivePermission – The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are: allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can specify this queue as the dead-letter queue. denyAll – No source queues can specify this queue as the dead-letter queue. byQueue – Only queues specified by the sourceQueueArns parameter can specify this queue as the dead-letter queue. sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter to allowAll. The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue. The following attributes apply only to server-side-encryption: KmsMasterKeyId – The ID of an Amazon Web Services managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a custom CMK can, for example, be alias/MyAlias . For more examples, see KeyId in the Key Management Service API Reference. KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default: 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which might incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work?. SqsManagedSseEnabled – Enables server-side queue encryption using SQS owned encryption keys. Only one server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS). The following attribute applies only to FIFO (first-in-first-out) queues: ContentBasedDeduplication – Enables content-based deduplication. For more information, see Exactly-once processing in the Amazon SQS Developer Guide. Note the following: Every message must have a unique MessageDeduplicationId. You may provide a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message). If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error. If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered. If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId, the two messages are treated as duplicates and only one copy of the message is delivered. The following attributes apply only to + /// high throughput + /// for FIFO queues: DeduplicationScope – Specifies whether message deduplication occurs at the message group or queue level. Valid values are messageGroup and queue. FifoThroughputLimit – Specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup. To enable high throughput for FIFO queues, do the following: Set DeduplicationScope to messageGroup. Set FifoThroughputLimit to perMessageGroupId. If you set these attributes to anything other than the values shown for enabling high throughput, normal throughput is in effect and deduplication occurs as specified. For information on throughput quotas, see Quotas related to messages in the Amazon SQS Developer Guide. public let attributes: [QueueAttributeName: String] /// The URL of the Amazon SQS queue whose attributes are set. Queue URLs and names are case-sensitive. public let queueUrl: String diff --git a/Sources/Soto/Services/SSOOIDC/SSOOIDC_api.swift b/Sources/Soto/Services/SSOOIDC/SSOOIDC_api.swift index 4645bd1414..bc8ef8db70 100644 --- a/Sources/Soto/Services/SSOOIDC/SSOOIDC_api.swift +++ b/Sources/Soto/Services/SSOOIDC/SSOOIDC_api.swift @@ -123,7 +123,7 @@ public struct SSOOIDC: AWSService { ) } - /// Creates and returns access and refresh tokens for clients and applications that are authenticated using IAM entities. The access token can be used to fetch short-term credentials for the assigned AWS accounts or to access application APIs using bearer authentication. + /// Creates and returns access and refresh tokens for clients and applications that are authenticated using IAM entities. The access token can be used to fetch short-term credentials for the assigned Amazon Web Services accounts or to access application APIs using bearer authentication. @Sendable public func createTokenWithIAM(_ input: CreateTokenWithIAMRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTokenWithIAMResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/SSOOIDC/SSOOIDC_shapes.swift b/Sources/Soto/Services/SSOOIDC/SSOOIDC_shapes.swift index d144d6b8e0..df4ca6fc36 100644 --- a/Sources/Soto/Services/SSOOIDC/SSOOIDC_shapes.swift +++ b/Sources/Soto/Services/SSOOIDC/SSOOIDC_shapes.swift @@ -35,6 +35,8 @@ extension SSOOIDC { public let clientSecret: String /// Used only when calling this API for the Authorization Code grant type. The short-term code is used to identify this authorization request. This grant type is currently unsupported for the CreateToken API. public let code: String? + /// Used only when calling this API for the Authorization Code grant type. This value is generated by the client and presented to validate the original code challenge value the client passed at authorization time. + public let codeVerifier: String? /// Used only when calling this API for the Device Code grant type. This short-term code is used to identify this authorization request. This comes from the result of the StartDeviceAuthorization API. public let deviceCode: String? /// Supports the following OAuth grant types: Device Code and Refresh Token. Specify either of the following values, depending on the grant type that you want: * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh Token - refresh_token For information about how to obtain the device code, see the StartDeviceAuthorization topic. @@ -46,10 +48,11 @@ extension SSOOIDC { /// The list of scopes for which authorization is requested. The access token that is issued is limited to the scopes that are granted. If this value is not specified, IAM Identity Center authorizes all scopes that are configured for the client during the call to RegisterClient. public let scope: [String]? - public init(clientId: String, clientSecret: String, code: String? = nil, deviceCode: String? = nil, grantType: String, redirectUri: String? = nil, refreshToken: String? = nil, scope: [String]? = nil) { + public init(clientId: String, clientSecret: String, code: String? = nil, codeVerifier: String? = nil, deviceCode: String? = nil, grantType: String, redirectUri: String? = nil, refreshToken: String? = nil, scope: [String]? = nil) { self.clientId = clientId self.clientSecret = clientSecret self.code = code + self.codeVerifier = codeVerifier self.deviceCode = deviceCode self.grantType = grantType self.redirectUri = redirectUri @@ -61,6 +64,7 @@ extension SSOOIDC { case clientId = "clientId" case clientSecret = "clientSecret" case code = "code" + case codeVerifier = "codeVerifier" case deviceCode = "deviceCode" case grantType = "grantType" case redirectUri = "redirectUri" @@ -70,7 +74,7 @@ extension SSOOIDC { } public struct CreateTokenResponse: AWSDecodableShape { - /// A bearer token to access AWS accounts and applications assigned to a user. + /// A bearer token to access Amazon Web Services accounts and applications assigned to a user. public let accessToken: String? /// Indicates the time in seconds when an access token will expire. public let expiresIn: Int? @@ -105,6 +109,8 @@ extension SSOOIDC { public let clientId: String /// Used only when calling this API for the Authorization Code grant type. This short-term code is used to identify this authorization request. The code is obtained through a redirect from IAM Identity Center to a redirect URI persisted in the Authorization Code GrantOptions for the application. public let code: String? + /// Used only when calling this API for the Authorization Code grant type. This value is generated by the client and presented to validate the original code challenge value the client passed at authorization time. + public let codeVerifier: String? /// Supports the following OAuth grant types: Authorization Code, Refresh Token, JWT Bearer, and Token Exchange. Specify one of the following values, depending on the grant type that you want: * Authorization Code - authorization_code * Refresh Token - refresh_token * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange public let grantType: String /// Used only when calling this API for the Authorization Code grant type. This value specifies the location of the client or application that has registered to receive the authorization code. @@ -120,10 +126,11 @@ extension SSOOIDC { /// Used only when calling this API for the Token Exchange grant type. This value specifies the type of token that is passed as the subject of the exchange. The following value is supported: * Access Token - urn:ietf:params:oauth:token-type:access_token public let subjectTokenType: String? - public init(assertion: String? = nil, clientId: String, code: String? = nil, grantType: String, redirectUri: String? = nil, refreshToken: String? = nil, requestedTokenType: String? = nil, scope: [String]? = nil, subjectToken: String? = nil, subjectTokenType: String? = nil) { + public init(assertion: String? = nil, clientId: String, code: String? = nil, codeVerifier: String? = nil, grantType: String, redirectUri: String? = nil, refreshToken: String? = nil, requestedTokenType: String? = nil, scope: [String]? = nil, subjectToken: String? = nil, subjectTokenType: String? = nil) { self.assertion = assertion self.clientId = clientId self.code = code + self.codeVerifier = codeVerifier self.grantType = grantType self.redirectUri = redirectUri self.refreshToken = refreshToken @@ -137,6 +144,7 @@ extension SSOOIDC { case assertion = "assertion" case clientId = "clientId" case code = "code" + case codeVerifier = "codeVerifier" case grantType = "grantType" case redirectUri = "redirectUri" case refreshToken = "refreshToken" @@ -148,7 +156,7 @@ extension SSOOIDC { } public struct CreateTokenWithIAMResponse: AWSDecodableShape { - /// A bearer token to access AWS accounts and applications assigned to a user. + /// A bearer token to access Amazon Web Services accounts and applications assigned to a user. public let accessToken: String? /// Indicates the time in seconds when an access token will expire. public let expiresIn: Int? @@ -189,18 +197,34 @@ extension SSOOIDC { public let clientName: String /// The type of client. The service supports only public as a client type. Anything other than public will be rejected by the service. public let clientType: String + /// This IAM Identity Center application ARN is used to define administrator-managed configuration for public client access to resources. At authorization, the scopes, grants, and redirect URI available to this client will be restricted by this application resource. + public let entitledApplicationArn: String? + /// The list of OAuth 2.0 grant types that are defined by the client. This list is used to restrict the token granting flows available to the client. + public let grantTypes: [String]? + /// The IAM Identity Center Issuer URL associated with an instance of IAM Identity Center. This value is needed for user access to resources through the client. + public let issuerUrl: String? + /// The list of redirect URI that are defined by the client. At completion of authorization, this list is used to restrict what locations the user agent can be redirected back to. + public let redirectUris: [String]? /// The list of scopes that are defined by the client. Upon authorization, this list is used to restrict permissions when granting an access token. public let scopes: [String]? - public init(clientName: String, clientType: String, scopes: [String]? = nil) { + public init(clientName: String, clientType: String, entitledApplicationArn: String? = nil, grantTypes: [String]? = nil, issuerUrl: String? = nil, redirectUris: [String]? = nil, scopes: [String]? = nil) { self.clientName = clientName self.clientType = clientType + self.entitledApplicationArn = entitledApplicationArn + self.grantTypes = grantTypes + self.issuerUrl = issuerUrl + self.redirectUris = redirectUris self.scopes = scopes } private enum CodingKeys: String, CodingKey { case clientName = "clientName" case clientType = "clientType" + case entitledApplicationArn = "entitledApplicationArn" + case grantTypes = "grantTypes" + case issuerUrl = "issuerUrl" + case redirectUris = "redirectUris" case scopes = "scopes" } } @@ -305,6 +329,7 @@ public struct SSOOIDCErrorType: AWSErrorType { case invalidClientException = "InvalidClientException" case invalidClientMetadataException = "InvalidClientMetadataException" case invalidGrantException = "InvalidGrantException" + case invalidRedirectUriException = "InvalidRedirectUriException" case invalidRequestException = "InvalidRequestException" case invalidRequestRegionException = "InvalidRequestRegionException" case invalidScopeException = "InvalidScopeException" @@ -345,6 +370,8 @@ public struct SSOOIDCErrorType: AWSErrorType { public static var invalidClientMetadataException: Self { .init(.invalidClientMetadataException) } /// Indicates that a request contains an invalid grant. This can occur if a client makes a CreateToken request with an invalid grant type. public static var invalidGrantException: Self { .init(.invalidGrantException) } + /// Indicates that one or more redirect URI in the request is not supported for this operation. + public static var invalidRedirectUriException: Self { .init(.invalidRedirectUriException) } /// Indicates that something is wrong with the input to the request. For example, a required parameter might be missing or out of range. public static var invalidRequestException: Self { .init(.invalidRequestException) } /// Indicates that a token provided as input to the request was issued by and is only usable by calling IAM Identity Center endpoints in another region. diff --git a/Sources/Soto/Services/SWF/SWF_api.swift b/Sources/Soto/Services/SWF/SWF_api.swift index e4093843df..8920d60d1f 100644 --- a/Sources/Soto/Services/SWF/SWF_api.swift +++ b/Sources/Soto/Services/SWF/SWF_api.swift @@ -145,7 +145,33 @@ public struct SWF: AWSService { ) } - /// Deprecates the specified activity type. After an activity type has been deprecated, you cannot create new tasks of that activity type. Tasks of this type that were scheduled before the type was deprecated continue to run. This operation is eventually consistent. The results are best effort and may not exactly reflect recent updates and changes. Access Control You can use IAM policies to control this action's access to Amazon SWF resources as follows: Use a Resource element with the domain name to limit the action to only specified domains. Use an Action element to allow or deny permission to call this action. Constrain the following parameters by using a Condition element with the appropriate keys. activityType.name: String constraint. The key is swf:activityType.name. activityType.version: String constraint. The key is swf:activityType.version. If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide. + /// Deletes the specified activity type. Note: Prior to deletion, activity types must first be deprecated. After an activity type has been deleted, you cannot schedule new activities of that type. Activities that started before the type was deleted will continue to run. Access Control You can use IAM policies to control this action's access to Amazon SWF resources as follows: Use a Resource element with the domain name to limit the action to only specified domains. Use an Action element to allow or deny permission to call this action. Constrain the following parameters by using a Condition element with the appropriate keys. activityType.name: String constraint. The key is swf:activityType.name. activityType.version: String constraint. The key is swf:activityType.version. If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide. + @Sendable + public func deleteActivityType(_ input: DeleteActivityTypeInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteActivityType", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes the specified workflow type. Note: Prior to deletion, workflow types must first be deprecated. After a workflow type has been deleted, you cannot create new executions of that type. Executions that started before the type was deleted will continue to run. Access Control You can use IAM policies to control this action's access to Amazon SWF resources as follows: Use a Resource element with the domain name to limit the action to only specified domains. Use an Action element to allow or deny permission to call this action. Constrain the following parameters by using a Condition element with the appropriate keys. workflowType.name: String constraint. The key is swf:workflowType.name. workflowType.version: String constraint. The key is swf:workflowType.version. If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide. + @Sendable + public func deleteWorkflowType(_ input: DeleteWorkflowTypeInput, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteWorkflowType", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deprecates the specified activity type. After an activity type has been deprecated, you cannot create new tasks of that activity type. Tasks of this type that were scheduled before the type was deprecated continue to run. Access Control You can use IAM policies to control this action's access to Amazon SWF resources as follows: Use a Resource element with the domain name to limit the action to only specified domains. Use an Action element to allow or deny permission to call this action. Constrain the following parameters by using a Condition element with the appropriate keys. activityType.name: String constraint. The key is swf:activityType.name. activityType.version: String constraint. The key is swf:activityType.version. If the caller doesn't have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows in the Amazon SWF Developer Guide. @Sendable public func deprecateActivityType(_ input: DeprecateActivityTypeInput, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( diff --git a/Sources/Soto/Services/SWF/SWF_shapes.swift b/Sources/Soto/Services/SWF/SWF_shapes.swift index 33e3f0258f..090a70ce2a 100644 --- a/Sources/Soto/Services/SWF/SWF_shapes.swift +++ b/Sources/Soto/Services/SWF/SWF_shapes.swift @@ -1330,6 +1330,52 @@ extension SWF { } } + public struct DeleteActivityTypeInput: AWSEncodableShape { + /// The activity type to delete. + public let activityType: ActivityType + /// The name of the domain in which the activity type is registered. + public let domain: String + + public init(activityType: ActivityType, domain: String) { + self.activityType = activityType + self.domain = domain + } + + public func validate(name: String) throws { + try self.activityType.validate(name: "\(name).activityType") + try self.validate(self.domain, name: "domain", parent: name, max: 256) + try self.validate(self.domain, name: "domain", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case activityType = "activityType" + case domain = "domain" + } + } + + public struct DeleteWorkflowTypeInput: AWSEncodableShape { + /// The name of the domain in which the workflow type is registered. + public let domain: String + /// The workflow type to delete. + public let workflowType: WorkflowType + + public init(domain: String, workflowType: WorkflowType) { + self.domain = domain + self.workflowType = workflowType + } + + public func validate(name: String) throws { + try self.validate(self.domain, name: "domain", parent: name, max: 256) + try self.validate(self.domain, name: "domain", parent: name, min: 1) + try self.workflowType.validate(name: "\(name).workflowType") + } + + private enum CodingKeys: String, CodingKey { + case domain = "domain" + case workflowType = "workflowType" + } + } + public struct DeprecateActivityTypeInput: AWSEncodableShape { /// The activity type to deprecate. public let activityType: ActivityType @@ -4432,6 +4478,7 @@ public struct SWFErrorType: AWSErrorType { case tooManyTagsFault = "TooManyTagsFault" case typeAlreadyExistsFault = "TypeAlreadyExistsFault" case typeDeprecatedFault = "TypeDeprecatedFault" + case typeNotDeprecatedFault = "TypeNotDeprecatedFault" case unknownResourceFault = "UnknownResourceFault" case workflowExecutionAlreadyStartedFault = "WorkflowExecutionAlreadyStartedFault" } @@ -4470,6 +4517,8 @@ public struct SWFErrorType: AWSErrorType { public static var typeAlreadyExistsFault: Self { .init(.typeAlreadyExistsFault) } /// Returned when the specified activity or workflow type was already deprecated. public static var typeDeprecatedFault: Self { .init(.typeDeprecatedFault) } + /// Returned when the resource type has not been deprecated. + public static var typeNotDeprecatedFault: Self { .init(.typeNotDeprecatedFault) } /// Returned when the named resource cannot be found with in the scope of this operation (region or domain). This could happen if the named resource was never created or is no longer available for this operation. public static var unknownResourceFault: Self { .init(.unknownResourceFault) } /// Returned by StartWorkflowExecution when an open execution with the same workflowId is already running in the specified domain. diff --git a/Sources/Soto/Services/SageMaker/SageMaker_api.swift b/Sources/Soto/Services/SageMaker/SageMaker_api.swift index a166a92301..99594d830b 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_api.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_api.swift @@ -451,7 +451,7 @@ public struct SageMaker: AWSService { ) } - /// Create a hub. Hub APIs are only callable through SageMaker Studio. + /// Create a hub. @Sendable public func createHub(_ input: CreateHubRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateHubResponse { return try await self.client.execute( @@ -464,6 +464,19 @@ public struct SageMaker: AWSService { ) } + /// Create a hub content reference in order to add a model in the JumpStart public hub to a private hub. + @Sendable + public func createHubContentReference(_ input: CreateHubContentReferenceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateHubContentReferenceResponse { + return try await self.client.execute( + operation: "CreateHubContentReference", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Defines the settings you will use for the human review workflow user interface. Reviewers will see a three-panel interface with an instruction area, the item to review, and an input area. @Sendable public func createHumanTaskUi(_ input: CreateHumanTaskUiRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateHumanTaskUiResponse { @@ -568,6 +581,19 @@ public struct SageMaker: AWSService { ) } + /// Creates an MLflow Tracking Server using a general purpose Amazon S3 bucket as the artifact store. For more information, see Create an MLflow Tracking Server. + @Sendable + public func createMlflowTrackingServer(_ input: CreateMlflowTrackingServerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMlflowTrackingServerResponse { + return try await self.client.execute( + operation: "CreateMlflowTrackingServer", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a model in SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the Docker image that contains inference code, artifacts (from prior training), and a custom environment map that the inference code uses when you deploy the model for predictions. Use this API to create a model if you want to use SageMaker hosting services or run a batch transform job. To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. SageMaker then deploys all of the containers that you defined for the model in the hosting environment. To run a batch transform using your model, you start a job with the CreateTransformJob API. SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location. In the request, you also provide an IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other Amazon Web Services resources, you grant necessary permissions via this role. @Sendable public func createModel(_ input: CreateModelInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateModelOutput { @@ -737,6 +763,19 @@ public struct SageMaker: AWSService { ) } + /// Returns a presigned URL that you can use to connect to the MLflow UI attached to your tracking server. For more information, see Launch the MLflow UI using a presigned URL. + @Sendable + public func createPresignedMlflowTrackingServerUrl(_ input: CreatePresignedMlflowTrackingServerUrlRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePresignedMlflowTrackingServerUrlResponse { + return try await self.client.execute( + operation: "CreatePresignedMlflowTrackingServerUrl", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the SageMaker console, when you choose Open next to a notebook instance, SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page. The IAM role or user used to call this API defines the permissions to access the notebook instance. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance. You can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address. The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the Amazon Web Services console sign-in page. @Sendable public func createPresignedNotebookInstanceUrl(_ input: CreatePresignedNotebookInstanceUrlInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreatePresignedNotebookInstanceUrlOutput { @@ -776,7 +815,7 @@ public struct SageMaker: AWSService { ) } - /// Creates a space used for real time collaboration in a domain. + /// Creates a private space or a space used for real time collaboration in a domain. @Sendable public func createSpace(_ input: CreateSpaceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSpaceResponse { return try await self.client.execute( @@ -1154,7 +1193,7 @@ public struct SageMaker: AWSService { ) } - /// Delete a hub. Hub APIs are only callable through SageMaker Studio. + /// Delete a hub. @Sendable public func deleteHub(_ input: DeleteHubRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -1167,7 +1206,7 @@ public struct SageMaker: AWSService { ) } - /// Delete the contents of a hub. Hub APIs are only callable through SageMaker Studio. + /// Delete the contents of a hub. @Sendable public func deleteHubContent(_ input: DeleteHubContentRequest, logger: Logger = AWSClient.loggingDisabled) async throws { return try await self.client.execute( @@ -1180,6 +1219,19 @@ public struct SageMaker: AWSService { ) } + /// Delete a hub content reference in order to remove a model from a private hub. + @Sendable + public func deleteHubContentReference(_ input: DeleteHubContentReferenceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + return try await self.client.execute( + operation: "DeleteHubContentReference", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Use this operation to delete a human task user interface (worker task template). To see a list of human task user interfaces (work task templates) in your account, use ListHumanTaskUis. When you delete a worker task template, it no longer appears when you call ListHumanTaskUis. @Sendable public func deleteHumanTaskUi(_ input: DeleteHumanTaskUiRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteHumanTaskUiResponse { @@ -1258,6 +1310,19 @@ public struct SageMaker: AWSService { ) } + /// Deletes an MLflow Tracking Server. For more information, see Clean up MLflow resources. + @Sendable + public func deleteMlflowTrackingServer(_ input: DeleteMlflowTrackingServerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteMlflowTrackingServerResponse { + return try await self.client.execute( + operation: "DeleteMlflowTrackingServer", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes a model. The DeleteModel API deletes only the model entry that was created in SageMaker when you called the CreateModel API. It does not delete model artifacts, inference code, or the IAM role that you specified when creating the model. @Sendable public func deleteModel(_ input: DeleteModelInput, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1505,7 +1570,7 @@ public struct SageMaker: AWSService { ) } - /// Use this operation to delete a workforce. If you want to create a new workforce in an Amazon Web Services Region where a workforce already exists, use this operation to delete the existing workforce and then use CreateWorkforce to create a new workforce. If a private workforce contains one or more work teams, you must use the DeleteWorkteam operation to delete all work teams before you delete the workforce. If you try to delete a workforce that contains one or more work teams, you will recieve a ResourceInUse error. + /// Use this operation to delete a workforce. If you want to create a new workforce in an Amazon Web Services Region where a workforce already exists, use this operation to delete the existing workforce and then use CreateWorkforce to create a new workforce. If a private workforce contains one or more work teams, you must use the DeleteWorkteam operation to delete all work teams before you delete the workforce. If you try to delete a workforce that contains one or more work teams, you will receive a ResourceInUse error. @Sendable public func deleteWorkforce(_ input: DeleteWorkforceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteWorkforceResponse { return try await self.client.execute( @@ -1648,7 +1713,7 @@ public struct SageMaker: AWSService { ) } - /// Retrieves information of an instance (also called a node interchangeably) of a SageMaker HyperPod cluster. + /// Retrieves information of a node (also called a instance interchangeably) of a SageMaker HyperPod cluster. @Sendable public func describeClusterNode(_ input: DescribeClusterNodeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeClusterNodeResponse { return try await self.client.execute( @@ -1856,7 +1921,7 @@ public struct SageMaker: AWSService { ) } - /// Describe a hub. Hub APIs are only callable through SageMaker Studio. + /// Describes a hub. @Sendable public func describeHub(_ input: DescribeHubRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeHubResponse { return try await self.client.execute( @@ -1869,7 +1934,7 @@ public struct SageMaker: AWSService { ) } - /// Describe the content of a hub. Hub APIs are only callable through SageMaker Studio. + /// Describe the content of a hub. @Sendable public func describeHubContent(_ input: DescribeHubContentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeHubContentResponse { return try await self.client.execute( @@ -1999,6 +2064,19 @@ public struct SageMaker: AWSService { ) } + /// Returns information about an MLflow Tracking Server. + @Sendable + public func describeMlflowTrackingServer(_ input: DescribeMlflowTrackingServerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeMlflowTrackingServerResponse { + return try await self.client.execute( + operation: "DescribeMlflowTrackingServer", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Describes a model that you created using the CreateModel API. @Sendable public func describeModel(_ input: DescribeModelInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeModelOutput { @@ -2064,7 +2142,7 @@ public struct SageMaker: AWSService { ) } - /// Returns a description of the specified model package, which is used to create SageMaker models or list them on Amazon Web Services Marketplace. To create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services Marketplace. + /// Returns a description of the specified model package, which is used to create SageMaker models or list them on Amazon Web Services Marketplace. If you provided a KMS Key ID when you created your model package, you will see the KMS Decrypt API call in your CloudTrail logs when you use this API. To create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services Marketplace. @Sendable public func describeModelPackage(_ input: DescribeModelPackageInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeModelPackageOutput { return try await self.client.execute( @@ -2324,7 +2402,7 @@ public struct SageMaker: AWSService { ) } - /// Gets information about a specific work team. You can see information such as the create date, the last updated date, membership information, and the work team's Amazon Resource Name (ARN). + /// Gets information about a specific work team. You can see information such as the creation date, the last updated date, membership information, and the work team's Amazon Resource Name (ARN). @Sendable public func describeWorkteam(_ input: DescribeWorkteamRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeWorkteamResponse { return try await self.client.execute( @@ -2454,7 +2532,7 @@ public struct SageMaker: AWSService { ) } - /// Import hub content. Hub APIs are only callable through SageMaker Studio. + /// Import hub content. @Sendable public func importHubContent(_ input: ImportHubContentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ImportHubContentResponse { return try await self.client.execute( @@ -2792,7 +2870,7 @@ public struct SageMaker: AWSService { ) } - /// List hub content versions. Hub APIs are only callable through SageMaker Studio. + /// List hub content versions. @Sendable public func listHubContentVersions(_ input: ListHubContentVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListHubContentVersionsResponse { return try await self.client.execute( @@ -2805,7 +2883,7 @@ public struct SageMaker: AWSService { ) } - /// List the contents of a hub. Hub APIs are only callable through SageMaker Studio. + /// List the contents of a hub. @Sendable public func listHubContents(_ input: ListHubContentsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListHubContentsResponse { return try await self.client.execute( @@ -2818,7 +2896,7 @@ public struct SageMaker: AWSService { ) } - /// List all existing hubs. Hub APIs are only callable through SageMaker Studio. + /// List all existing hubs. @Sendable public func listHubs(_ input: ListHubsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListHubsResponse { return try await self.client.execute( @@ -2974,6 +3052,19 @@ public struct SageMaker: AWSService { ) } + /// Lists all MLflow Tracking Servers. + @Sendable + public func listMlflowTrackingServers(_ input: ListMlflowTrackingServersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListMlflowTrackingServersResponse { + return try await self.client.execute( + operation: "ListMlflowTrackingServers", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists model bias jobs definitions that satisfy various filters. @Sendable public func listModelBiasJobDefinitions(_ input: ListModelBiasJobDefinitionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListModelBiasJobDefinitionsResponse { @@ -3572,6 +3663,19 @@ public struct SageMaker: AWSService { ) } + /// Programmatically start an MLflow Tracking Server. + @Sendable + public func startMlflowTrackingServer(_ input: StartMlflowTrackingServerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartMlflowTrackingServerResponse { + return try await self.client.execute( + operation: "StartMlflowTrackingServer", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Starts a previously stopped monitoring schedule. By default, when you successfully create a new schedule, the status of a monitoring schedule is scheduled. @Sendable public func startMonitoringSchedule(_ input: StartMonitoringScheduleRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -3715,6 +3819,19 @@ public struct SageMaker: AWSService { ) } + /// Programmatically stop an MLflow Tracking Server. + @Sendable + public func stopMlflowTrackingServer(_ input: StopMlflowTrackingServerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopMlflowTrackingServerResponse { + return try await self.client.execute( + operation: "StopMlflowTrackingServer", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Stops a previously started monitoring schedule. @Sendable public func stopMonitoringSchedule(_ input: StopMonitoringScheduleRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -3988,7 +4105,7 @@ public struct SageMaker: AWSService { ) } - /// Update a hub. Hub APIs are only callable through SageMaker Studio. + /// Update a hub. @Sendable public func updateHub(_ input: UpdateHubRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateHubResponse { return try await self.client.execute( @@ -4066,6 +4183,19 @@ public struct SageMaker: AWSService { ) } + /// Updates properties of an existing MLflow Tracking Server. + @Sendable + public func updateMlflowTrackingServer(_ input: UpdateMlflowTrackingServerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateMlflowTrackingServerResponse { + return try await self.client.execute( + operation: "UpdateMlflowTrackingServer", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Update an Amazon SageMaker Model Card. You cannot update both model card content and model card status in a single call. @Sendable public func updateModelCard(_ input: UpdateModelCardRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateModelCardResponse { @@ -4934,6 +5064,25 @@ extension SageMaker { ) } + /// Lists all MLflow Tracking Servers. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listMlflowTrackingServersPaginator( + _ input: ListMlflowTrackingServersRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listMlflowTrackingServers, + inputKey: \ListMlflowTrackingServersRequest.nextToken, + outputKey: \ListMlflowTrackingServersResponse.nextToken, + logger: logger + ) + } + /// Lists model bias jobs definitions that satisfy various filters. /// Return PaginatorSequence for operation. /// @@ -6173,6 +6322,21 @@ extension SageMaker.ListLineageGroupsRequest: AWSPaginateToken { } } +extension SageMaker.ListMlflowTrackingServersRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> SageMaker.ListMlflowTrackingServersRequest { + return .init( + createdAfter: self.createdAfter, + createdBefore: self.createdBefore, + maxResults: self.maxResults, + mlflowVersion: self.mlflowVersion, + nextToken: token, + sortBy: self.sortBy, + sortOrder: self.sortOrder, + trackingServerStatus: self.trackingServerStatus + ) + } +} + extension SageMaker.ListModelBiasJobDefinitionsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> SageMaker.ListModelBiasJobDefinitionsRequest { return .init( @@ -6265,6 +6429,7 @@ extension SageMaker.ListModelPackageGroupsInput: AWSPaginateToken { return .init( creationTimeAfter: self.creationTimeAfter, creationTimeBefore: self.creationTimeBefore, + crossAccountFilterOption: self.crossAccountFilterOption, maxResults: self.maxResults, nameContains: self.nameContains, nextToken: token, diff --git a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift index ead0f72116..e267e81846 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift @@ -123,6 +123,14 @@ extension SageMaker { case mlG54Xlarge = "ml.g5.4xlarge" case mlG58Xlarge = "ml.g5.8xlarge" case mlG5Xlarge = "ml.g5.xlarge" + case mlG612Xlarge = "ml.g6.12xlarge" + case mlG616Xlarge = "ml.g6.16xlarge" + case mlG624Xlarge = "ml.g6.24xlarge" + case mlG62Xlarge = "ml.g6.2xlarge" + case mlG648Xlarge = "ml.g6.48xlarge" + case mlG64Xlarge = "ml.g6.4xlarge" + case mlG68Xlarge = "ml.g6.8xlarge" + case mlG6Xlarge = "ml.g6.xlarge" case mlGeospatialInteractive = "ml.geospatial.interactive" case mlM512Xlarge = "ml.m5.12xlarge" case mlM516Xlarge = "ml.m5.16xlarge" @@ -313,13 +321,19 @@ extension SageMaker { } public enum AutoMLAlgorithm: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case arima = "arima" case catboost = "catboost" + case cnnQr = "cnn-qr" + case deepar = "deepar" + case ets = "ets" case extraTrees = "extra-trees" case fastai = "fastai" case lightgbm = "lightgbm" case linearLearner = "linear-learner" case mlp = "mlp" case nnTorch = "nn-torch" + case npts = "npts" + case prophet = "prophet" case randomforest = "randomforest" case xgboost = "xgboost" public var description: String { return self.rawValue } @@ -829,6 +843,12 @@ extension SageMaker { public var description: String { return self.rawValue } } + public enum EnabledOrDisabled: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "Disabled" + case enabled = "Enabled" + public var description: String { return self.rawValue } + } + public enum EndpointConfigSortKey: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case creationTime = "CreationTime" case name = "Name" @@ -981,8 +1001,15 @@ extension SageMaker { public var description: String { return self.rawValue } } + public enum HubContentSupportStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case deprecated = "Deprecated" + case supported = "Supported" + public var description: String { return self.rawValue } + } + public enum HubContentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case model = "Model" + case modelReference = "ModelReference" case notebook = "Notebook" public var description: String { return self.rawValue } } @@ -1216,6 +1243,14 @@ extension SageMaker { case mlG54Xlarge = "ml.g5.4xlarge" case mlG58Xlarge = "ml.g5.8xlarge" case mlG5Xlarge = "ml.g5.xlarge" + case mlG612Xlarge = "ml.g6.12xlarge" + case mlG616Xlarge = "ml.g6.16xlarge" + case mlG624Xlarge = "ml.g6.24xlarge" + case mlG62Xlarge = "ml.g6.2xlarge" + case mlG648Xlarge = "ml.g6.48xlarge" + case mlG64Xlarge = "ml.g6.4xlarge" + case mlG68Xlarge = "ml.g6.8xlarge" + case mlG6Xlarge = "ml.g6.xlarge" case mlInf124Xlarge = "ml.inf1.24xlarge" case mlInf12Xlarge = "ml.inf1.2xlarge" case mlInf16Xlarge = "ml.inf1.6xlarge" @@ -1321,6 +1356,12 @@ extension SageMaker { public var description: String { return self.rawValue } } + public enum IsTrackingServerActive: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "Active" + case inactive = "Inactive" + public var description: String { return self.rawValue } + } + public enum JobType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case inference = "INFERENCE" case notebookKernel = "NOTEBOOK_KERNEL" @@ -1847,6 +1888,11 @@ extension SageMaker { public var description: String { return self.rawValue } } + public enum ProductionVariantInferenceAmiVersion: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case al2Gpu2 = "al2-ami-sagemaker-inference-gpu-2" + public var description: String { return self.rawValue } + } + public enum ProductionVariantInstanceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case mlC42Xlarge = "ml.c4.2xlarge" case mlC44Xlarge = "ml.c4.4xlarge" @@ -2346,6 +2392,13 @@ extension SageMaker { public var description: String { return self.rawValue } } + public enum SortTrackingServerBy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case creationTime = "CreationTime" + case name = "Name" + case status = "Status" + public var description: String { return self.rawValue } + } + public enum SortTrialComponentsBy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case creationTime = "CreationTime" case name = "Name" @@ -2518,6 +2571,34 @@ extension SageMaker { public var description: String { return self.rawValue } } + public enum TrackingServerSize: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case l = "Large" + case m = "Medium" + case s = "Small" + public var description: String { return self.rawValue } + } + + public enum TrackingServerStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case createFailed = "CreateFailed" + case created = "Created" + case creating = "Creating" + case deleteFailed = "DeleteFailed" + case deleting = "Deleting" + case maintenanceComplete = "MaintenanceComplete" + case maintenanceFailed = "MaintenanceFailed" + case maintenanceInProgress = "MaintenanceInProgress" + case startFailed = "StartFailed" + case started = "Started" + case starting = "Starting" + case stopFailed = "StopFailed" + case stopped = "Stopped" + case stopping = "Stopping" + case updateFailed = "UpdateFailed" + case updated = "Updated" + case updating = "Updating" + public var description: String { return self.rawValue } + } + public enum TrafficRoutingConfigType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case allAtOnce = "ALL_AT_ONCE" case canary = "CANARY" @@ -3937,7 +4018,7 @@ extension SageMaker { } public struct AutoMLAlgorithmConfig: AWSEncodableShape & AWSDecodableShape { - /// The selection of algorithms run on a dataset to train the model candidates of an Autopilot job. Selected algorithms must belong to the list corresponding to the training mode set in AutoMLJobConfig.Mode (ENSEMBLING or HYPERPARAMETER_TUNING). Choose a minimum of 1 algorithm. In ENSEMBLING mode: "catboost" "extra-trees" "fastai" "lightgbm" "linear-learner" "nn-torch" "randomforest" "xgboost" In HYPERPARAMETER_TUNING mode: "linear-learner" "mlp" "xgboost" + /// The selection of algorithms trained on your dataset to generate the model candidates for an Autopilot job. For the tabular problem type TabularJobConfig: Selected algorithms must belong to the list corresponding to the training mode set in AutoMLJobConfig.Mode (ENSEMBLING or HYPERPARAMETER_TUNING). Choose a minimum of 1 algorithm. In ENSEMBLING mode: "catboost" "extra-trees" "fastai" "lightgbm" "linear-learner" "nn-torch" "randomforest" "xgboost" In HYPERPARAMETER_TUNING mode: "linear-learner" "mlp" "xgboost" For the time-series forecasting problem type TimeSeriesForecastingJobConfig: Choose your algorithms from this list. "cnn-qr" "deepar" "prophet" "arima" "npts" "ets" public let autoMLAlgorithms: [AutoMLAlgorithm]? public init(autoMLAlgorithms: [AutoMLAlgorithm]? = nil) { @@ -4010,7 +4091,7 @@ extension SageMaker { } public struct AutoMLCandidateGenerationConfig: AWSEncodableShape & AWSDecodableShape { - /// Stores the configuration information for the selection of algorithms used to train the model candidates. The list of available algorithms to choose from depends on the training mode set in AutoMLJobConfig.Mode . AlgorithmsConfig should not be set in AUTO training mode. When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only. If the list of algorithms provided as values for AutoMLAlgorithms is empty, AutoMLCandidateGenerationConfig uses the full set of algorithms for the given training mode. When AlgorithmsConfig is not provided, AutoMLCandidateGenerationConfig uses the full set of algorithms for the given training mode. For the list of all algorithms per training mode, see AutoMLAlgorithmConfig. For more information on each algorithm, see the Algorithm support section in Autopilot developer guide. + /// Stores the configuration information for the selection of algorithms trained on tabular data. The list of available algorithms to choose from depends on the training mode set in TabularJobConfig.Mode . AlgorithmsConfig should not be set if the training mode is set on AUTO. When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only. If the list of algorithms provided as values for AutoMLAlgorithms is empty, CandidateGenerationConfig uses the full set of algorithms for the given training mode. When AlgorithmsConfig is not provided, CandidateGenerationConfig uses the full set of algorithms for the given training mode. For the list of all algorithms per problem type and training mode, see AutoMLAlgorithmConfig. For more information on each algorithm, see the Algorithm support section in Autopilot developer guide. public let algorithmsConfig: [AutoMLAlgorithmConfig]? /// A URL to the Amazon S3 data source containing selected features from the input data source to run an Autopilot job. You can input FeatureAttributeNames (optional) in JSON format as shown below: { "FeatureAttributeNames":["col1", "col2", ...] }. You can also specify the data type of the feature (optional) in the format shown below: { "FeatureDataTypes":{"col1":"numeric", "col2":"categorical" ... } } These column keys may not include the target column. In ensembling mode, Autopilot only supports the following data types: numeric, categorical, text, and datetime. In HPO mode, Autopilot can support numeric, categorical, text, datetime, and sequence. If only FeatureDataTypes is provided, the column keys (col1, col2,..) should be a subset of the column names in the input data. If both FeatureDataTypes and FeatureAttributeNames are provided, then the column keys should be a subset of the column names provided in FeatureAttributeNames. The key name FeatureAttributeNames is fixed. The values listed in ["col1", "col2", ...] are case sensitive and should be a list of strings containing unique values that are a subset of the column names in the input data. The list of columns provided must not include the target column. public let featureSpecificationS3Uri: String? @@ -4824,7 +4905,7 @@ extension SageMaker { } public struct CandidateGenerationConfig: AWSEncodableShape & AWSDecodableShape { - /// Stores the configuration information for the selection of algorithms used to train model candidates on tabular data. The list of available algorithms to choose from depends on the training mode set in TabularJobConfig.Mode . AlgorithmsConfig should not be set in AUTO training mode. When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only. If the list of algorithms provided as values for AutoMLAlgorithms is empty, CandidateGenerationConfig uses the full set of algorithms for the given training mode. When AlgorithmsConfig is not provided, CandidateGenerationConfig uses the full set of algorithms for the given training mode. For the list of all algorithms per problem type and training mode, see AutoMLAlgorithmConfig. For more information on each algorithm, see the Algorithm support section in Autopilot developer guide. + /// Your Autopilot job trains a default set of algorithms on your dataset. For tabular and time-series data, you can customize the algorithm list by selecting a subset of algorithms for your problem type. AlgorithmsConfig stores the customized selection of algorithms to train on your data. For the tabular problem type TabularJobConfig, the list of available algorithms to choose from depends on the training mode set in AutoMLJobConfig.Mode . AlgorithmsConfig should not be set when the training mode AutoMLJobConfig.Mode is set to AUTO. When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only. If the list of algorithms provided as values for AutoMLAlgorithms is empty, CandidateGenerationConfig uses the full set of algorithms for the given training mode. When AlgorithmsConfig is not provided, CandidateGenerationConfig uses the full set of algorithms for the given training mode. For the list of all algorithms per training mode, see AlgorithmConfig. For more information on each algorithm, see the Algorithm support section in the Autopilot developer guide. For the time-series forecasting problem type TimeSeriesForecastingJobConfig, choose your algorithms from the list provided in AlgorithmConfig. For more information on each algorithm, see the Algorithms support for time-series forecasting section in the Autopilot developer guide. When AlgorithmsConfig is provided, one AutoMLAlgorithms attribute must be set and one only. If the list of algorithms provided as values for AutoMLAlgorithms is empty, CandidateGenerationConfig uses the full set of algorithms for time-series forecasting. When AlgorithmsConfig is not provided, CandidateGenerationConfig uses the full set of algorithms for time-series forecasting. public let algorithmsConfig: [AutoMLAlgorithmConfig]? public init(algorithmsConfig: [AutoMLAlgorithmConfig]? = nil) { @@ -5409,6 +5490,24 @@ extension SageMaker { } } + public struct ClusterEbsVolumeConfig: AWSEncodableShape & AWSDecodableShape { + /// The size in gigabytes (GB) of the additional EBS volume to be attached to the instances in the SageMaker HyperPod cluster instance group. The additional EBS volume is attached to each instance within the SageMaker HyperPod cluster instance group and mounted to /opt/sagemaker. + public let volumeSizeInGB: Int? + + public init(volumeSizeInGB: Int? = nil) { + self.volumeSizeInGB = volumeSizeInGB + } + + public func validate(name: String) throws { + try self.validate(self.volumeSizeInGB, name: "volumeSizeInGB", parent: name, max: 16384) + try self.validate(self.volumeSizeInGB, name: "volumeSizeInGB", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case volumeSizeInGB = "VolumeSizeInGB" + } + } + public struct ClusterInstanceGroupDetails: AWSDecodableShape { /// The number of instances that are currently in the instance group of a SageMaker HyperPod cluster. public let currentCount: Int? @@ -5416,6 +5515,8 @@ extension SageMaker { public let executionRole: String? /// The name of the instance group of a SageMaker HyperPod cluster. public let instanceGroupName: String? + /// The additional storage configurations for the instances in the SageMaker HyperPod cluster instance group. + public let instanceStorageConfigs: [ClusterInstanceStorageConfig]? /// The instance type of the instance group of a SageMaker HyperPod cluster. public let instanceType: ClusterInstanceType? /// Details of LifeCycle configuration for the instance group. @@ -5425,10 +5526,11 @@ extension SageMaker { /// The number you specified to TreadsPerCore in CreateCluster for enabling or disabling multithreading. For instance types that support multithreading, you can specify 1 for disabling multithreading and 2 for enabling multithreading. For more information, see the reference table of CPU cores and threads per CPU core per instance type in the Amazon Elastic Compute Cloud User Guide. public let threadsPerCore: Int? - public init(currentCount: Int? = nil, executionRole: String? = nil, instanceGroupName: String? = nil, instanceType: ClusterInstanceType? = nil, lifeCycleConfig: ClusterLifeCycleConfig? = nil, targetCount: Int? = nil, threadsPerCore: Int? = nil) { + public init(currentCount: Int? = nil, executionRole: String? = nil, instanceGroupName: String? = nil, instanceStorageConfigs: [ClusterInstanceStorageConfig]? = nil, instanceType: ClusterInstanceType? = nil, lifeCycleConfig: ClusterLifeCycleConfig? = nil, targetCount: Int? = nil, threadsPerCore: Int? = nil) { self.currentCount = currentCount self.executionRole = executionRole self.instanceGroupName = instanceGroupName + self.instanceStorageConfigs = instanceStorageConfigs self.instanceType = instanceType self.lifeCycleConfig = lifeCycleConfig self.targetCount = targetCount @@ -5439,6 +5541,7 @@ extension SageMaker { case currentCount = "CurrentCount" case executionRole = "ExecutionRole" case instanceGroupName = "InstanceGroupName" + case instanceStorageConfigs = "InstanceStorageConfigs" case instanceType = "InstanceType" case lifeCycleConfig = "LifeCycleConfig" case targetCount = "TargetCount" @@ -5453,6 +5556,8 @@ extension SageMaker { public let instanceCount: Int? /// Specifies the name of the instance group. public let instanceGroupName: String? + /// Specifies the additional storage configurations for the instances in the SageMaker HyperPod cluster instance group. + public let instanceStorageConfigs: [ClusterInstanceStorageConfig]? /// Specifies the instance type of the instance group. public let instanceType: ClusterInstanceType? /// Specifies the LifeCycle configuration for the instance group. @@ -5460,10 +5565,11 @@ extension SageMaker { /// Specifies the value for Threads per core. For instance types that support multithreading, you can specify 1 for disabling multithreading and 2 for enabling multithreading. For instance types that doesn't support multithreading, specify 1. For more information, see the reference table of CPU cores and threads per CPU core per instance type in the Amazon Elastic Compute Cloud User Guide. public let threadsPerCore: Int? - public init(executionRole: String? = nil, instanceCount: Int? = nil, instanceGroupName: String? = nil, instanceType: ClusterInstanceType? = nil, lifeCycleConfig: ClusterLifeCycleConfig? = nil, threadsPerCore: Int? = nil) { + public init(executionRole: String? = nil, instanceCount: Int? = nil, instanceGroupName: String? = nil, instanceStorageConfigs: [ClusterInstanceStorageConfig]? = nil, instanceType: ClusterInstanceType? = nil, lifeCycleConfig: ClusterLifeCycleConfig? = nil, threadsPerCore: Int? = nil) { self.executionRole = executionRole self.instanceCount = instanceCount self.instanceGroupName = instanceGroupName + self.instanceStorageConfigs = instanceStorageConfigs self.instanceType = instanceType self.lifeCycleConfig = lifeCycleConfig self.threadsPerCore = threadsPerCore @@ -5477,6 +5583,10 @@ extension SageMaker { try self.validate(self.instanceGroupName, name: "instanceGroupName", parent: name, max: 63) try self.validate(self.instanceGroupName, name: "instanceGroupName", parent: name, min: 1) try self.validate(self.instanceGroupName, name: "instanceGroupName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$") + try self.instanceStorageConfigs?.forEach { + try $0.validate(name: "\(name).instanceStorageConfigs[]") + } + try self.validate(self.instanceStorageConfigs, name: "instanceStorageConfigs", parent: name, max: 1) try self.lifeCycleConfig?.validate(name: "\(name).lifeCycleConfig") try self.validate(self.threadsPerCore, name: "threadsPerCore", parent: name, max: 2) try self.validate(self.threadsPerCore, name: "threadsPerCore", parent: name, min: 1) @@ -5486,12 +5596,30 @@ extension SageMaker { case executionRole = "ExecutionRole" case instanceCount = "InstanceCount" case instanceGroupName = "InstanceGroupName" + case instanceStorageConfigs = "InstanceStorageConfigs" case instanceType = "InstanceType" case lifeCycleConfig = "LifeCycleConfig" case threadsPerCore = "ThreadsPerCore" } } + public struct ClusterInstancePlacement: AWSDecodableShape { + /// The Availability Zone where the node in the SageMaker HyperPod cluster is launched. + public let availabilityZone: String? + /// The unique identifier (ID) of the Availability Zone where the node in the SageMaker HyperPod cluster is launched. + public let availabilityZoneId: String? + + public init(availabilityZone: String? = nil, availabilityZoneId: String? = nil) { + self.availabilityZone = availabilityZone + self.availabilityZoneId = availabilityZoneId + } + + private enum CodingKeys: String, CodingKey { + case availabilityZone = "AvailabilityZone" + case availabilityZoneId = "AvailabilityZoneId" + } + } + public struct ClusterInstanceStatusDetails: AWSDecodableShape { /// The message from an instance in a SageMaker HyperPod cluster. public let message: String? @@ -5541,22 +5669,34 @@ extension SageMaker { public let instanceId: String? /// The status of the instance. public let instanceStatus: ClusterInstanceStatusDetails? + /// The configurations of additional storage specified to the instance group where the instance (node) is launched. + public let instanceStorageConfigs: [ClusterInstanceStorageConfig]? /// The type of the instance. public let instanceType: ClusterInstanceType? /// The time when the instance is launched. public let launchTime: Date? /// The LifeCycle configuration applied to the instance. public let lifeCycleConfig: ClusterLifeCycleConfig? + /// The placement details of the SageMaker HyperPod cluster node. + public let placement: ClusterInstancePlacement? + /// The private DNS hostname of the SageMaker HyperPod cluster node. + public let privateDnsHostname: String? + /// The private primary IP address of the SageMaker HyperPod cluster node. + public let privatePrimaryIp: String? /// The number of threads per CPU core you specified under CreateCluster. public let threadsPerCore: Int? - public init(instanceGroupName: String? = nil, instanceId: String? = nil, instanceStatus: ClusterInstanceStatusDetails? = nil, instanceType: ClusterInstanceType? = nil, launchTime: Date? = nil, lifeCycleConfig: ClusterLifeCycleConfig? = nil, threadsPerCore: Int? = nil) { + public init(instanceGroupName: String? = nil, instanceId: String? = nil, instanceStatus: ClusterInstanceStatusDetails? = nil, instanceStorageConfigs: [ClusterInstanceStorageConfig]? = nil, instanceType: ClusterInstanceType? = nil, launchTime: Date? = nil, lifeCycleConfig: ClusterLifeCycleConfig? = nil, placement: ClusterInstancePlacement? = nil, privateDnsHostname: String? = nil, privatePrimaryIp: String? = nil, threadsPerCore: Int? = nil) { self.instanceGroupName = instanceGroupName self.instanceId = instanceId self.instanceStatus = instanceStatus + self.instanceStorageConfigs = instanceStorageConfigs self.instanceType = instanceType self.launchTime = launchTime self.lifeCycleConfig = lifeCycleConfig + self.placement = placement + self.privateDnsHostname = privateDnsHostname + self.privatePrimaryIp = privatePrimaryIp self.threadsPerCore = threadsPerCore } @@ -5564,9 +5704,13 @@ extension SageMaker { case instanceGroupName = "InstanceGroupName" case instanceId = "InstanceId" case instanceStatus = "InstanceStatus" + case instanceStorageConfigs = "InstanceStorageConfigs" case instanceType = "InstanceType" case launchTime = "LaunchTime" case lifeCycleConfig = "LifeCycleConfig" + case placement = "Placement" + case privateDnsHostname = "PrivateDnsHostname" + case privatePrimaryIp = "PrivatePrimaryIp" case threadsPerCore = "ThreadsPerCore" } } @@ -6642,7 +6786,7 @@ extension SageMaker { try self.instanceGroups?.forEach { try $0.validate(name: "\(name).instanceGroups[]") } - try self.validate(self.instanceGroups, name: "instanceGroups", parent: name, max: 5) + try self.validate(self.instanceGroups, name: "instanceGroups", parent: name, max: 20) try self.validate(self.instanceGroups, name: "instanceGroups", parent: name, min: 1) try self.tags?.forEach { try $0.validate(name: "\(name).tags[]") @@ -7603,6 +7747,67 @@ extension SageMaker { } } + public struct CreateHubContentReferenceRequest: AWSEncodableShape { + /// The name of the hub content to reference. + public let hubContentName: String? + /// The name of the hub to add the hub content reference to. + public let hubName: String? + /// The minimum version of the hub content to reference. + public let minVersion: String? + /// The ARN of the public hub content to reference. + public let sageMakerPublicHubContentArn: String? + /// Any tags associated with the hub content to reference. + public let tags: [Tag]? + + public init(hubContentName: String? = nil, hubName: String? = nil, minVersion: String? = nil, sageMakerPublicHubContentArn: String? = nil, tags: [Tag]? = nil) { + self.hubContentName = hubContentName + self.hubName = hubName + self.minVersion = minVersion + self.sageMakerPublicHubContentArn = sageMakerPublicHubContentArn + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.hubContentName, name: "hubContentName", parent: name, max: 63) + try self.validate(self.hubContentName, name: "hubContentName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.validate(self.minVersion, name: "minVersion", parent: name, max: 14) + try self.validate(self.minVersion, name: "minVersion", parent: name, min: 5) + try self.validate(self.minVersion, name: "minVersion", parent: name, pattern: "^\\d{1,4}.\\d{1,4}.\\d{1,4}$") + try self.validate(self.sageMakerPublicHubContentArn, name: "sageMakerPublicHubContentArn", parent: name, max: 255) + try self.validate(self.sageMakerPublicHubContentArn, name: "sageMakerPublicHubContentArn", parent: name, pattern: "^arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:aws:hub-content\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}\\/Model\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}$") + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 50) + } + + private enum CodingKeys: String, CodingKey { + case hubContentName = "HubContentName" + case hubName = "HubName" + case minVersion = "MinVersion" + case sageMakerPublicHubContentArn = "SageMakerPublicHubContentArn" + case tags = "Tags" + } + } + + public struct CreateHubContentReferenceResponse: AWSDecodableShape { + /// The ARN of the hub that the hub content reference was added to. + public let hubArn: String? + /// The ARN of the hub content. + public let hubContentArn: String? + + public init(hubArn: String? = nil, hubContentArn: String? = nil) { + self.hubArn = hubArn + self.hubContentArn = hubContentArn + } + + private enum CodingKeys: String, CodingKey { + case hubArn = "HubArn" + case hubContentArn = "HubContentArn" + } + } + public struct CreateHubRequest: AWSEncodableShape { /// A description of the hub. public let hubDescription: String? @@ -8242,6 +8447,79 @@ extension SageMaker { } } + public struct CreateMlflowTrackingServerRequest: AWSEncodableShape { + /// The S3 URI for a general purpose bucket to use as the MLflow Tracking Server artifact store. + public let artifactStoreUri: String? + /// Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. To enable automatic model registration, set this value to True. To disable automatic model registration, set this value to False. If not specified, AutomaticModelRegistration defaults to False. + public let automaticModelRegistration: Bool? + /// The version of MLflow that the tracking server uses. To see which MLflow versions are available to use, see How it works. + public let mlflowVersion: String? + /// The Amazon Resource Name (ARN) for an IAM role in your account that the MLflow Tracking Server uses to access the artifact store in Amazon S3. The role should have AmazonS3FullAccess permissions. For more information on IAM permissions for tracking server creation, see Set up IAM permissions for MLflow. + public let roleArn: String? + /// Tags consisting of key-value pairs used to manage metadata for the tracking server. + public let tags: [Tag]? + /// A unique string identifying the tracking server name. This string is part of the tracking server ARN. + public let trackingServerName: String? + /// The size of the tracking server you want to create. You can choose between "Small", "Medium", and "Large". The default MLflow Tracking Server configuration size is "Small". You can choose a size depending on the projected use of the tracking server such as the volume of data logged, number of users, and frequency of use. We recommend using a small tracking server for teams of up to 25 users, a medium tracking server for teams of up to 50 users, and a large tracking server for teams of up to 100 users. + public let trackingServerSize: TrackingServerSize? + /// The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. For example: TUE:03:30. + public let weeklyMaintenanceWindowStart: String? + + public init(artifactStoreUri: String? = nil, automaticModelRegistration: Bool? = nil, mlflowVersion: String? = nil, roleArn: String? = nil, tags: [Tag]? = nil, trackingServerName: String? = nil, trackingServerSize: TrackingServerSize? = nil, weeklyMaintenanceWindowStart: String? = nil) { + self.artifactStoreUri = artifactStoreUri + self.automaticModelRegistration = automaticModelRegistration + self.mlflowVersion = mlflowVersion + self.roleArn = roleArn + self.tags = tags + self.trackingServerName = trackingServerName + self.trackingServerSize = trackingServerSize + self.weeklyMaintenanceWindowStart = weeklyMaintenanceWindowStart + } + + public func validate(name: String) throws { + try self.validate(self.artifactStoreUri, name: "artifactStoreUri", parent: name, max: 1024) + try self.validate(self.artifactStoreUri, name: "artifactStoreUri", parent: name, pattern: "^(https|s3)://([^/]+)/?(.*)$") + try self.validate(self.mlflowVersion, name: "mlflowVersion", parent: name, max: 16) + try self.validate(self.mlflowVersion, name: "mlflowVersion", parent: name, pattern: "^[0-9]*.[0-9]*.[0-9]*$") + try self.validate(self.roleArn, name: "roleArn", parent: name, max: 2048) + try self.validate(self.roleArn, name: "roleArn", parent: name, min: 20) + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 50) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, max: 256) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, min: 1) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}$") + try self.validate(self.weeklyMaintenanceWindowStart, name: "weeklyMaintenanceWindowStart", parent: name, max: 9) + try self.validate(self.weeklyMaintenanceWindowStart, name: "weeklyMaintenanceWindowStart", parent: name, pattern: "^(Mon|Tue|Wed|Thu|Fri|Sat|Sun):([01]\\d|2[0-3]):([0-5]\\d)$") + } + + private enum CodingKeys: String, CodingKey { + case artifactStoreUri = "ArtifactStoreUri" + case automaticModelRegistration = "AutomaticModelRegistration" + case mlflowVersion = "MlflowVersion" + case roleArn = "RoleArn" + case tags = "Tags" + case trackingServerName = "TrackingServerName" + case trackingServerSize = "TrackingServerSize" + case weeklyMaintenanceWindowStart = "WeeklyMaintenanceWindowStart" + } + } + + public struct CreateMlflowTrackingServerResponse: AWSDecodableShape { + /// The ARN of the tracking server. + public let trackingServerArn: String? + + public init(trackingServerArn: String? = nil) { + self.trackingServerArn = trackingServerArn + } + + private enum CodingKeys: String, CodingKey { + case trackingServerArn = "TrackingServerArn" + } + } + public struct CreateModelBiasJobDefinitionRequest: AWSEncodableShape { /// The name of the bias job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account. public let jobDefinitionName: String? @@ -8639,6 +8917,8 @@ extension SageMaker { public let metadataProperties: MetadataProperties? /// Whether the model is approved for deployment. This parameter is optional for versioned models, and does not apply to unversioned models. For versioned models, the value of this parameter must be set to Approved to deploy the model. public let modelApprovalStatus: ModelApprovalStatus? + /// The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model package model card schema, see Model package model card schema. For more information about the model card associated with the model package, see View the Details of a Model Version. + public let modelCard: ModelPackageModelCard? /// A structure that contains model metrics reports. public let modelMetrics: ModelMetrics? /// A description of the model package. @@ -8649,6 +8929,8 @@ extension SageMaker { public let modelPackageName: String? /// The Amazon Simple Storage Service (Amazon S3) path where the sample payload is stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). This archive can hold multiple files that are all equally used in the load test. Each file in the archive must satisfy the size constraints of the InvokeEndpoint call. public let samplePayloadUrl: String? + /// The KMS Key ID (KMSKeyId) used for encryption of model package information. + public let securityConfig: ModelPackageSecurityConfig? /// Indicates if you want to skip model validation. public let skipModelValidation: SkipModelValidation? /// Details about the algorithm that was used to create the model package. @@ -8664,7 +8946,7 @@ extension SageMaker { /// Specifies configurations for one or more transform jobs that SageMaker runs to test the model package. public let validationSpecification: ModelPackageValidationSpecification? - public init(additionalInferenceSpecifications: [AdditionalInferenceSpecificationDefinition]? = nil, certifyForMarketplace: Bool? = nil, clientToken: String? = CreateModelPackageInput.idempotencyToken(), customerMetadataProperties: [String: String]? = nil, domain: String? = nil, driftCheckBaselines: DriftCheckBaselines? = nil, inferenceSpecification: InferenceSpecification? = nil, metadataProperties: MetadataProperties? = nil, modelApprovalStatus: ModelApprovalStatus? = nil, modelMetrics: ModelMetrics? = nil, modelPackageDescription: String? = nil, modelPackageGroupName: String? = nil, modelPackageName: String? = nil, samplePayloadUrl: String? = nil, skipModelValidation: SkipModelValidation? = nil, sourceAlgorithmSpecification: SourceAlgorithmSpecification? = nil, sourceUri: String? = nil, tags: [Tag]? = nil, task: String? = nil, validationSpecification: ModelPackageValidationSpecification? = nil) { + public init(additionalInferenceSpecifications: [AdditionalInferenceSpecificationDefinition]? = nil, certifyForMarketplace: Bool? = nil, clientToken: String? = CreateModelPackageInput.idempotencyToken(), customerMetadataProperties: [String: String]? = nil, domain: String? = nil, driftCheckBaselines: DriftCheckBaselines? = nil, inferenceSpecification: InferenceSpecification? = nil, metadataProperties: MetadataProperties? = nil, modelApprovalStatus: ModelApprovalStatus? = nil, modelCard: ModelPackageModelCard? = nil, modelMetrics: ModelMetrics? = nil, modelPackageDescription: String? = nil, modelPackageGroupName: String? = nil, modelPackageName: String? = nil, samplePayloadUrl: String? = nil, securityConfig: ModelPackageSecurityConfig? = nil, skipModelValidation: SkipModelValidation? = nil, sourceAlgorithmSpecification: SourceAlgorithmSpecification? = nil, sourceUri: String? = nil, tags: [Tag]? = nil, task: String? = nil, validationSpecification: ModelPackageValidationSpecification? = nil) { self.additionalInferenceSpecifications = additionalInferenceSpecifications self.certifyForMarketplace = certifyForMarketplace self.clientToken = clientToken @@ -8674,11 +8956,13 @@ extension SageMaker { self.inferenceSpecification = inferenceSpecification self.metadataProperties = metadataProperties self.modelApprovalStatus = modelApprovalStatus + self.modelCard = modelCard self.modelMetrics = modelMetrics self.modelPackageDescription = modelPackageDescription self.modelPackageGroupName = modelPackageGroupName self.modelPackageName = modelPackageName self.samplePayloadUrl = samplePayloadUrl + self.securityConfig = securityConfig self.skipModelValidation = skipModelValidation self.sourceAlgorithmSpecification = sourceAlgorithmSpecification self.sourceUri = sourceUri @@ -8709,6 +8993,7 @@ extension SageMaker { try self.driftCheckBaselines?.validate(name: "\(name).driftCheckBaselines") try self.inferenceSpecification?.validate(name: "\(name).inferenceSpecification") try self.metadataProperties?.validate(name: "\(name).metadataProperties") + try self.modelCard?.validate(name: "\(name).modelCard") try self.modelMetrics?.validate(name: "\(name).modelMetrics") try self.validate(self.modelPackageDescription, name: "modelPackageDescription", parent: name, max: 1024) try self.validate(self.modelPackageDescription, name: "modelPackageDescription", parent: name, pattern: "^[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]*$") @@ -8720,6 +9005,7 @@ extension SageMaker { try self.validate(self.modelPackageName, name: "modelPackageName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") try self.validate(self.samplePayloadUrl, name: "samplePayloadUrl", parent: name, max: 1024) try self.validate(self.samplePayloadUrl, name: "samplePayloadUrl", parent: name, pattern: "^(https|s3)://([^/]+)/?(.*)$") + try self.securityConfig?.validate(name: "\(name).securityConfig") try self.sourceAlgorithmSpecification?.validate(name: "\(name).sourceAlgorithmSpecification") try self.validate(self.sourceUri, name: "sourceUri", parent: name, max: 1024) try self.validate(self.sourceUri, name: "sourceUri", parent: name, pattern: "^[\\p{L}\\p{M}\\p{Z}\\p{N}\\p{P}]{0,1024}$") @@ -8740,11 +9026,13 @@ extension SageMaker { case inferenceSpecification = "InferenceSpecification" case metadataProperties = "MetadataProperties" case modelApprovalStatus = "ModelApprovalStatus" + case modelCard = "ModelCard" case modelMetrics = "ModelMetrics" case modelPackageDescription = "ModelPackageDescription" case modelPackageGroupName = "ModelPackageGroupName" case modelPackageName = "ModelPackageName" case samplePayloadUrl = "SamplePayloadUrl" + case securityConfig = "SecurityConfig" case skipModelValidation = "SkipModelValidation" case sourceAlgorithmSpecification = "SourceAlgorithmSpecification" case sourceUri = "SourceUri" @@ -9204,6 +9492,50 @@ extension SageMaker { } } + public struct CreatePresignedMlflowTrackingServerUrlRequest: AWSEncodableShape { + /// The duration in seconds that your presigned URL is valid. The presigned URL can be used only once. + public let expiresInSeconds: Int? + /// The duration in seconds that your MLflow UI session is valid. + public let sessionExpirationDurationInSeconds: Int? + /// The name of the tracking server to connect to your MLflow UI. + public let trackingServerName: String? + + public init(expiresInSeconds: Int? = nil, sessionExpirationDurationInSeconds: Int? = nil, trackingServerName: String? = nil) { + self.expiresInSeconds = expiresInSeconds + self.sessionExpirationDurationInSeconds = sessionExpirationDurationInSeconds + self.trackingServerName = trackingServerName + } + + public func validate(name: String) throws { + try self.validate(self.expiresInSeconds, name: "expiresInSeconds", parent: name, max: 300) + try self.validate(self.expiresInSeconds, name: "expiresInSeconds", parent: name, min: 5) + try self.validate(self.sessionExpirationDurationInSeconds, name: "sessionExpirationDurationInSeconds", parent: name, max: 43200) + try self.validate(self.sessionExpirationDurationInSeconds, name: "sessionExpirationDurationInSeconds", parent: name, min: 1800) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, max: 256) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, min: 1) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}$") + } + + private enum CodingKeys: String, CodingKey { + case expiresInSeconds = "ExpiresInSeconds" + case sessionExpirationDurationInSeconds = "SessionExpirationDurationInSeconds" + case trackingServerName = "TrackingServerName" + } + } + + public struct CreatePresignedMlflowTrackingServerUrlResponse: AWSDecodableShape { + /// A presigned URL with an authorization token. + public let authorizedUrl: String? + + public init(authorizedUrl: String? = nil) { + self.authorizedUrl = authorizedUrl + } + + private enum CodingKeys: String, CodingKey { + case authorizedUrl = "AuthorizedUrl" + } + } + public struct CreatePresignedNotebookInstanceUrlInput: AWSEncodableShape { /// The name of the notebook instance. public let notebookInstanceName: String? @@ -10060,16 +10392,19 @@ extension SageMaker { public let notificationConfiguration: NotificationConfiguration? /// An array of key-value pairs. For more information, see Resource Tag and Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. public let tags: [Tag]? + /// Use this optional parameter to constrain access to an Amazon S3 resource based on the IP address using supported IAM global condition keys. The Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL. + public let workerAccessConfiguration: WorkerAccessConfiguration? /// The name of the workforce. public let workforceName: String? /// The name of the work team. Use this name to identify the work team. public let workteamName: String? - public init(description: String? = nil, memberDefinitions: [MemberDefinition]? = nil, notificationConfiguration: NotificationConfiguration? = nil, tags: [Tag]? = nil, workforceName: String? = nil, workteamName: String? = nil) { + public init(description: String? = nil, memberDefinitions: [MemberDefinition]? = nil, notificationConfiguration: NotificationConfiguration? = nil, tags: [Tag]? = nil, workerAccessConfiguration: WorkerAccessConfiguration? = nil, workforceName: String? = nil, workteamName: String? = nil) { self.description = description self.memberDefinitions = memberDefinitions self.notificationConfiguration = notificationConfiguration self.tags = tags + self.workerAccessConfiguration = workerAccessConfiguration self.workforceName = workforceName self.workteamName = workteamName } @@ -10101,6 +10436,7 @@ extension SageMaker { case memberDefinitions = "MemberDefinitions" case notificationConfiguration = "NotificationConfiguration" case tags = "Tags" + case workerAccessConfiguration = "WorkerAccessConfiguration" case workforceName = "WorkforceName" case workteamName = "WorkteamName" } @@ -10675,7 +11011,7 @@ extension SageMaker { try self.customFileSystemConfigs?.forEach { try $0.validate(name: "\(name).customFileSystemConfigs[]") } - try self.validate(self.customFileSystemConfigs, name: "customFileSystemConfigs", parent: name, max: 2) + try self.validate(self.customFileSystemConfigs, name: "customFileSystemConfigs", parent: name, max: 10) try self.customPosixUserConfig?.validate(name: "\(name).customPosixUserConfig") try self.validate(self.executionRole, name: "executionRole", parent: name, max: 2048) try self.validate(self.executionRole, name: "executionRole", parent: name, min: 20) @@ -11222,6 +11558,33 @@ extension SageMaker { public init() {} } + public struct DeleteHubContentReferenceRequest: AWSEncodableShape { + /// The name of the hub content to delete. + public let hubContentName: String? + /// The type of hub content to delete. + public let hubContentType: HubContentType? + /// The name of the hub to delete the hub content reference from. + public let hubName: String? + + public init(hubContentName: String? = nil, hubContentType: HubContentType? = nil, hubName: String? = nil) { + self.hubContentName = hubContentName + self.hubContentType = hubContentType + self.hubName = hubName + } + + public func validate(name: String) throws { + try self.validate(self.hubContentName, name: "hubContentName", parent: name, max: 63) + try self.validate(self.hubContentName, name: "hubContentName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + } + + private enum CodingKeys: String, CodingKey { + case hubContentName = "HubContentName" + case hubContentType = "HubContentType" + case hubName = "HubName" + } + } + public struct DeleteHubContentRequest: AWSEncodableShape { /// The name of the content that you want to delete from a hub. public let hubContentName: String? @@ -11245,8 +11608,7 @@ extension SageMaker { try self.validate(self.hubContentVersion, name: "hubContentVersion", parent: name, max: 14) try self.validate(self.hubContentVersion, name: "hubContentVersion", parent: name, min: 5) try self.validate(self.hubContentVersion, name: "hubContentVersion", parent: name, pattern: "^\\d{1,4}.\\d{1,4}.\\d{1,4}$") - try self.validate(self.hubName, name: "hubName", parent: name, max: 63) - try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") } private enum CodingKeys: String, CodingKey { @@ -11266,8 +11628,7 @@ extension SageMaker { } public func validate(name: String) throws { - try self.validate(self.hubName, name: "hubName", parent: name, max: 63) - try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") } private enum CodingKeys: String, CodingKey { @@ -11425,6 +11786,38 @@ extension SageMaker { } } + public struct DeleteMlflowTrackingServerRequest: AWSEncodableShape { + /// The name of the the tracking server to delete. + public let trackingServerName: String? + + public init(trackingServerName: String? = nil) { + self.trackingServerName = trackingServerName + } + + public func validate(name: String) throws { + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, max: 256) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, min: 1) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}$") + } + + private enum CodingKeys: String, CodingKey { + case trackingServerName = "TrackingServerName" + } + } + + public struct DeleteMlflowTrackingServerResponse: AWSDecodableShape { + /// A TrackingServerArn object, the ARN of the tracking server that is deleted if successfully found. + public let trackingServerArn: String? + + public init(trackingServerArn: String? = nil) { + self.trackingServerArn = trackingServerArn + } + + private enum CodingKeys: String, CodingKey { + case trackingServerArn = "TrackingServerArn" + } + } + public struct DeleteModelBiasJobDefinitionRequest: AWSEncodableShape { /// The name of the model bias job definition to delete. public let jobDefinitionName: String? @@ -12662,9 +13055,9 @@ extension SageMaker { } public struct DescribeClusterNodeRequest: AWSEncodableShape { - /// The string name or the Amazon Resource Name (ARN) of the SageMaker HyperPod cluster in which the instance is. + /// The string name or the Amazon Resource Name (ARN) of the SageMaker HyperPod cluster in which the node is. public let clusterName: String? - /// The ID of the instance. + /// The ID of the SageMaker HyperPod cluster node. public let nodeId: String? public init(clusterName: String? = nil, nodeId: String? = nil) { @@ -12677,7 +13070,7 @@ extension SageMaker { try self.validate(self.clusterName, name: "clusterName", parent: name, pattern: "^(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:cluster/[a-z0-9]{12})|([a-zA-Z0-9](-*[a-zA-Z0-9]){0,62})$") try self.validate(self.nodeId, name: "nodeId", parent: name, max: 256) try self.validate(self.nodeId, name: "nodeId", parent: name, min: 1) - try self.validate(self.nodeId, name: "nodeId", parent: name, pattern: "^[a-zA-Z][-a-zA-Z0-9]*$") + try self.validate(self.nodeId, name: "nodeId", parent: name, pattern: "^i-[a-f0-9]{8}(?:[a-f0-9]{9})?$") } private enum CodingKeys: String, CodingKey { @@ -12687,7 +13080,7 @@ extension SageMaker { } public struct DescribeClusterNodeResponse: AWSDecodableShape { - /// The details of the instance. + /// The details of the SageMaker HyperPod cluster node. public let nodeDetails: ClusterNodeDetails? public init(nodeDetails: ClusterNodeDetails? = nil) { @@ -13979,8 +14372,7 @@ extension SageMaker { try self.validate(self.hubContentVersion, name: "hubContentVersion", parent: name, max: 14) try self.validate(self.hubContentVersion, name: "hubContentVersion", parent: name, min: 5) try self.validate(self.hubContentVersion, name: "hubContentVersion", parent: name, pattern: "^\\d{1,4}.\\d{1,4}.\\d{1,4}$") - try self.validate(self.hubName, name: "hubName", parent: name, max: 63) - try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") } private enum CodingKeys: String, CodingKey { @@ -14024,8 +14416,14 @@ extension SageMaker { public let hubContentVersion: String? /// The name of the hub that contains the content. public let hubName: String? - - public init(creationTime: Date? = nil, documentSchemaVersion: String? = nil, failureReason: String? = nil, hubArn: String? = nil, hubContentArn: String? = nil, hubContentDependencies: [HubContentDependency]? = nil, hubContentDescription: String? = nil, hubContentDisplayName: String? = nil, hubContentDocument: String? = nil, hubContentMarkdown: String? = nil, hubContentName: String? = nil, hubContentSearchKeywords: [String]? = nil, hubContentStatus: HubContentStatus? = nil, hubContentType: HubContentType? = nil, hubContentVersion: String? = nil, hubName: String? = nil) { + /// The minimum version of the hub content. + public let referenceMinVersion: String? + /// The ARN of the public hub content. + public let sageMakerPublicHubContentArn: String? + /// The support status of the hub content. + public let supportStatus: HubContentSupportStatus? + + public init(creationTime: Date? = nil, documentSchemaVersion: String? = nil, failureReason: String? = nil, hubArn: String? = nil, hubContentArn: String? = nil, hubContentDependencies: [HubContentDependency]? = nil, hubContentDescription: String? = nil, hubContentDisplayName: String? = nil, hubContentDocument: String? = nil, hubContentMarkdown: String? = nil, hubContentName: String? = nil, hubContentSearchKeywords: [String]? = nil, hubContentStatus: HubContentStatus? = nil, hubContentType: HubContentType? = nil, hubContentVersion: String? = nil, hubName: String? = nil, referenceMinVersion: String? = nil, sageMakerPublicHubContentArn: String? = nil, supportStatus: HubContentSupportStatus? = nil) { self.creationTime = creationTime self.documentSchemaVersion = documentSchemaVersion self.failureReason = failureReason @@ -14042,6 +14440,9 @@ extension SageMaker { self.hubContentType = hubContentType self.hubContentVersion = hubContentVersion self.hubName = hubName + self.referenceMinVersion = referenceMinVersion + self.sageMakerPublicHubContentArn = sageMakerPublicHubContentArn + self.supportStatus = supportStatus } private enum CodingKeys: String, CodingKey { @@ -14061,6 +14462,9 @@ extension SageMaker { case hubContentType = "HubContentType" case hubContentVersion = "HubContentVersion" case hubName = "HubName" + case referenceMinVersion = "ReferenceMinVersion" + case sageMakerPublicHubContentArn = "SageMakerPublicHubContentArn" + case supportStatus = "SupportStatus" } } @@ -14073,8 +14477,7 @@ extension SageMaker { } public func validate(name: String) throws { - try self.validate(self.hubName, name: "hubName", parent: name, max: 63) - try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") } private enum CodingKeys: String, CodingKey { @@ -14850,6 +15253,92 @@ extension SageMaker { } } + public struct DescribeMlflowTrackingServerRequest: AWSEncodableShape { + /// The name of the MLflow Tracking Server to describe. + public let trackingServerName: String? + + public init(trackingServerName: String? = nil) { + self.trackingServerName = trackingServerName + } + + public func validate(name: String) throws { + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, max: 256) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, min: 1) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}$") + } + + private enum CodingKeys: String, CodingKey { + case trackingServerName = "TrackingServerName" + } + } + + public struct DescribeMlflowTrackingServerResponse: AWSDecodableShape { + /// The S3 URI of the general purpose bucket used as the MLflow Tracking Server artifact store. + public let artifactStoreUri: String? + /// Whether automatic registration of new MLflow models to the SageMaker Model Registry is enabled. + public let automaticModelRegistration: Bool? + public let createdBy: UserContext? + /// The timestamp of when the described MLflow Tracking Server was created. + public let creationTime: Date? + /// Whether the described MLflow Tracking Server is currently active. + public let isActive: IsTrackingServerActive? + public let lastModifiedBy: UserContext? + /// The timestamp of when the described MLflow Tracking Server was last modified. + public let lastModifiedTime: Date? + /// The MLflow version used for the described tracking server. + public let mlflowVersion: String? + /// The Amazon Resource Name (ARN) for an IAM role in your account that the described MLflow Tracking Server uses to access the artifact store in Amazon S3. + public let roleArn: String? + /// The ARN of the described tracking server. + public let trackingServerArn: String? + /// The name of the described tracking server. + public let trackingServerName: String? + /// The size of the described tracking server. + public let trackingServerSize: TrackingServerSize? + /// The current creation status of the described MLflow Tracking Server. + public let trackingServerStatus: TrackingServerStatus? + /// The URL to connect to the MLflow user interface for the described tracking server. + public let trackingServerUrl: String? + /// The day and time of the week when weekly maintenance occurs on the described tracking server. + public let weeklyMaintenanceWindowStart: String? + + public init(artifactStoreUri: String? = nil, automaticModelRegistration: Bool? = nil, createdBy: UserContext? = nil, creationTime: Date? = nil, isActive: IsTrackingServerActive? = nil, lastModifiedBy: UserContext? = nil, lastModifiedTime: Date? = nil, mlflowVersion: String? = nil, roleArn: String? = nil, trackingServerArn: String? = nil, trackingServerName: String? = nil, trackingServerSize: TrackingServerSize? = nil, trackingServerStatus: TrackingServerStatus? = nil, trackingServerUrl: String? = nil, weeklyMaintenanceWindowStart: String? = nil) { + self.artifactStoreUri = artifactStoreUri + self.automaticModelRegistration = automaticModelRegistration + self.createdBy = createdBy + self.creationTime = creationTime + self.isActive = isActive + self.lastModifiedBy = lastModifiedBy + self.lastModifiedTime = lastModifiedTime + self.mlflowVersion = mlflowVersion + self.roleArn = roleArn + self.trackingServerArn = trackingServerArn + self.trackingServerName = trackingServerName + self.trackingServerSize = trackingServerSize + self.trackingServerStatus = trackingServerStatus + self.trackingServerUrl = trackingServerUrl + self.weeklyMaintenanceWindowStart = weeklyMaintenanceWindowStart + } + + private enum CodingKeys: String, CodingKey { + case artifactStoreUri = "ArtifactStoreUri" + case automaticModelRegistration = "AutomaticModelRegistration" + case createdBy = "CreatedBy" + case creationTime = "CreationTime" + case isActive = "IsActive" + case lastModifiedBy = "LastModifiedBy" + case lastModifiedTime = "LastModifiedTime" + case mlflowVersion = "MlflowVersion" + case roleArn = "RoleArn" + case trackingServerArn = "TrackingServerArn" + case trackingServerName = "TrackingServerName" + case trackingServerSize = "TrackingServerSize" + case trackingServerStatus = "TrackingServerStatus" + case trackingServerUrl = "TrackingServerUrl" + case weeklyMaintenanceWindowStart = "WeeklyMaintenanceWindowStart" + } + } + public struct DescribeModelBiasJobDefinitionRequest: AWSEncodableShape { /// The name of the model bias job definition. The name must be unique within an Amazon Web Services Region in the Amazon Web Services account. public let jobDefinitionName: String? @@ -15290,6 +15779,8 @@ extension SageMaker { public let metadataProperties: MetadataProperties? /// The approval status of the model package. public let modelApprovalStatus: ModelApprovalStatus? + /// The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model package model card schema, see Model package model card schema. For more information about the model card associated with the model package, see View the Details of a Model Version. + public let modelCard: ModelPackageModelCard? /// Metrics for the model. public let modelMetrics: ModelMetrics? /// The Amazon Resource Name (ARN) of the model package. @@ -15308,6 +15799,8 @@ extension SageMaker { public let modelPackageVersion: Int? /// The Amazon Simple Storage Service (Amazon S3) path where the sample payload are stored. This path points to a single gzip compressed tar archive (.tar.gz suffix). public let samplePayloadUrl: String? + /// The KMS Key ID (KMSKeyId) used for encryption of model package information. + public let securityConfig: ModelPackageSecurityConfig? /// Indicates if you want to skip model validation. public let skipModelValidation: SkipModelValidation? /// Details about the algorithm that was used to create the model package. @@ -15319,7 +15812,7 @@ extension SageMaker { /// Configurations for one or more transform jobs that SageMaker runs to test the model package. public let validationSpecification: ModelPackageValidationSpecification? - public init(additionalInferenceSpecifications: [AdditionalInferenceSpecificationDefinition]? = nil, approvalDescription: String? = nil, certifyForMarketplace: Bool? = nil, createdBy: UserContext? = nil, creationTime: Date? = nil, customerMetadataProperties: [String: String]? = nil, domain: String? = nil, driftCheckBaselines: DriftCheckBaselines? = nil, inferenceSpecification: InferenceSpecification? = nil, lastModifiedBy: UserContext? = nil, lastModifiedTime: Date? = nil, metadataProperties: MetadataProperties? = nil, modelApprovalStatus: ModelApprovalStatus? = nil, modelMetrics: ModelMetrics? = nil, modelPackageArn: String? = nil, modelPackageDescription: String? = nil, modelPackageGroupName: String? = nil, modelPackageName: String? = nil, modelPackageStatus: ModelPackageStatus? = nil, modelPackageStatusDetails: ModelPackageStatusDetails? = nil, modelPackageVersion: Int? = nil, samplePayloadUrl: String? = nil, skipModelValidation: SkipModelValidation? = nil, sourceAlgorithmSpecification: SourceAlgorithmSpecification? = nil, sourceUri: String? = nil, task: String? = nil, validationSpecification: ModelPackageValidationSpecification? = nil) { + public init(additionalInferenceSpecifications: [AdditionalInferenceSpecificationDefinition]? = nil, approvalDescription: String? = nil, certifyForMarketplace: Bool? = nil, createdBy: UserContext? = nil, creationTime: Date? = nil, customerMetadataProperties: [String: String]? = nil, domain: String? = nil, driftCheckBaselines: DriftCheckBaselines? = nil, inferenceSpecification: InferenceSpecification? = nil, lastModifiedBy: UserContext? = nil, lastModifiedTime: Date? = nil, metadataProperties: MetadataProperties? = nil, modelApprovalStatus: ModelApprovalStatus? = nil, modelCard: ModelPackageModelCard? = nil, modelMetrics: ModelMetrics? = nil, modelPackageArn: String? = nil, modelPackageDescription: String? = nil, modelPackageGroupName: String? = nil, modelPackageName: String? = nil, modelPackageStatus: ModelPackageStatus? = nil, modelPackageStatusDetails: ModelPackageStatusDetails? = nil, modelPackageVersion: Int? = nil, samplePayloadUrl: String? = nil, securityConfig: ModelPackageSecurityConfig? = nil, skipModelValidation: SkipModelValidation? = nil, sourceAlgorithmSpecification: SourceAlgorithmSpecification? = nil, sourceUri: String? = nil, task: String? = nil, validationSpecification: ModelPackageValidationSpecification? = nil) { self.additionalInferenceSpecifications = additionalInferenceSpecifications self.approvalDescription = approvalDescription self.certifyForMarketplace = certifyForMarketplace @@ -15333,6 +15826,7 @@ extension SageMaker { self.lastModifiedTime = lastModifiedTime self.metadataProperties = metadataProperties self.modelApprovalStatus = modelApprovalStatus + self.modelCard = modelCard self.modelMetrics = modelMetrics self.modelPackageArn = modelPackageArn self.modelPackageDescription = modelPackageDescription @@ -15342,6 +15836,7 @@ extension SageMaker { self.modelPackageStatusDetails = modelPackageStatusDetails self.modelPackageVersion = modelPackageVersion self.samplePayloadUrl = samplePayloadUrl + self.securityConfig = securityConfig self.skipModelValidation = skipModelValidation self.sourceAlgorithmSpecification = sourceAlgorithmSpecification self.sourceUri = sourceUri @@ -15363,6 +15858,7 @@ extension SageMaker { case lastModifiedTime = "LastModifiedTime" case metadataProperties = "MetadataProperties" case modelApprovalStatus = "ModelApprovalStatus" + case modelCard = "ModelCard" case modelMetrics = "ModelMetrics" case modelPackageArn = "ModelPackageArn" case modelPackageDescription = "ModelPackageDescription" @@ -15372,6 +15868,7 @@ extension SageMaker { case modelPackageStatusDetails = "ModelPackageStatusDetails" case modelPackageVersion = "ModelPackageVersion" case samplePayloadUrl = "SamplePayloadUrl" + case securityConfig = "SecurityConfig" case skipModelValidation = "SkipModelValidation" case sourceAlgorithmSpecification = "SourceAlgorithmSpecification" case sourceUri = "SourceUri" @@ -17156,7 +17653,7 @@ extension SageMaker { try validate($0, name: "vpcOnlyTrustedAccounts[]", parent: name, min: 12) try validate($0, name: "vpcOnlyTrustedAccounts[]", parent: name, pattern: "^\\d+$") } - try self.validate(self.vpcOnlyTrustedAccounts, name: "vpcOnlyTrustedAccounts", parent: name, max: 10) + try self.validate(self.vpcOnlyTrustedAccounts, name: "vpcOnlyTrustedAccounts", parent: name, max: 20) } private enum CodingKeys: String, CodingKey { @@ -19149,8 +19646,14 @@ extension SageMaker { public let hubContentType: HubContentType? /// The version of the hub content. public let hubContentVersion: String? - - public init(creationTime: Date? = nil, documentSchemaVersion: String? = nil, hubContentArn: String? = nil, hubContentDescription: String? = nil, hubContentDisplayName: String? = nil, hubContentName: String? = nil, hubContentSearchKeywords: [String]? = nil, hubContentStatus: HubContentStatus? = nil, hubContentType: HubContentType? = nil, hubContentVersion: String? = nil) { + /// The date and time when the hub content was originally created, before any updates or revisions. + public let originalCreationTime: Date? + /// The ARN of the public hub content. + public let sageMakerPublicHubContentArn: String? + /// The support status of the hub content. + public let supportStatus: HubContentSupportStatus? + + public init(creationTime: Date? = nil, documentSchemaVersion: String? = nil, hubContentArn: String? = nil, hubContentDescription: String? = nil, hubContentDisplayName: String? = nil, hubContentName: String? = nil, hubContentSearchKeywords: [String]? = nil, hubContentStatus: HubContentStatus? = nil, hubContentType: HubContentType? = nil, hubContentVersion: String? = nil, originalCreationTime: Date? = nil, sageMakerPublicHubContentArn: String? = nil, supportStatus: HubContentSupportStatus? = nil) { self.creationTime = creationTime self.documentSchemaVersion = documentSchemaVersion self.hubContentArn = hubContentArn @@ -19161,6 +19664,9 @@ extension SageMaker { self.hubContentStatus = hubContentStatus self.hubContentType = hubContentType self.hubContentVersion = hubContentVersion + self.originalCreationTime = originalCreationTime + self.sageMakerPublicHubContentArn = sageMakerPublicHubContentArn + self.supportStatus = supportStatus } private enum CodingKeys: String, CodingKey { @@ -19174,6 +19680,9 @@ extension SageMaker { case hubContentStatus = "HubContentStatus" case hubContentType = "HubContentType" case hubContentVersion = "HubContentVersion" + case originalCreationTime = "OriginalCreationTime" + case sageMakerPublicHubContentArn = "SageMakerPublicHubContentArn" + case supportStatus = "SupportStatus" } } @@ -20097,6 +20606,23 @@ extension SageMaker { } } + public struct IamPolicyConstraints: AWSEncodableShape & AWSDecodableShape { + /// When SourceIp is Enabled the worker's IP address when a task is rendered in the worker portal is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. This IP address is checked by Amazon S3 and must match in order for the Amazon S3 resource to be rendered in the worker portal. + public let sourceIp: EnabledOrDisabled? + /// When VpcSourceIp is Enabled the worker's IP address when a task is rendered in private worker portal inside the VPC is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. To render the task successfully Amazon S3 checks that the presigned URL is being accessed over an Amazon S3 VPC Endpoint, and that the worker's IP address matches the IP address in the IAM policy. To learn more about configuring private worker portal, see Use Amazon VPC mode from a private worker portal. + public let vpcSourceIp: EnabledOrDisabled? + + public init(sourceIp: EnabledOrDisabled? = nil, vpcSourceIp: EnabledOrDisabled? = nil) { + self.sourceIp = sourceIp + self.vpcSourceIp = vpcSourceIp + } + + private enum CodingKeys: String, CodingKey { + case sourceIp = "SourceIp" + case vpcSourceIp = "VpcSourceIp" + } + } + public struct IdentityProviderOAuthSetting: AWSEncodableShape & AWSDecodableShape { /// The name of the data source that you're connecting to. Canvas currently supports OAuth for Snowflake and Salesforce Data Cloud. public let dataSourceName: DataSourceName? @@ -20289,7 +20815,6 @@ extension SageMaker { try self.validate(self.hubContentDocument, name: "hubContentDocument", parent: name, max: 65535) try self.validate(self.hubContentDocument, name: "hubContentDocument", parent: name, pattern: ".*") try self.validate(self.hubContentMarkdown, name: "hubContentMarkdown", parent: name, max: 65535) - try self.validate(self.hubContentMarkdown, name: "hubContentMarkdown", parent: name, pattern: ".*") try self.validate(self.hubContentName, name: "hubContentName", parent: name, max: 63) try self.validate(self.hubContentName, name: "hubContentName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") try self.hubContentSearchKeywords?.forEach { @@ -20300,8 +20825,7 @@ extension SageMaker { try self.validate(self.hubContentVersion, name: "hubContentVersion", parent: name, max: 14) try self.validate(self.hubContentVersion, name: "hubContentVersion", parent: name, min: 5) try self.validate(self.hubContentVersion, name: "hubContentVersion", parent: name, pattern: "^\\d{1,4}.\\d{1,4}.\\d{1,4}$") - try self.validate(self.hubName, name: "hubName", parent: name, max: 63) - try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") try self.tags?.forEach { try $0.validate(name: "\(name).tags[]") } @@ -20691,6 +21215,24 @@ extension SageMaker { } } + public struct InferenceHubAccessConfig: AWSEncodableShape & AWSDecodableShape { + /// The ARN of the hub content for which deployment access is allowed. + public let hubContentArn: String? + + public init(hubContentArn: String? = nil) { + self.hubContentArn = hubContentArn + } + + public func validate(name: String) throws { + try self.validate(self.hubContentArn, name: "hubContentArn", parent: name, max: 255) + try self.validate(self.hubContentArn, name: "hubContentArn", parent: name, pattern: ".*") + } + + private enum CodingKeys: String, CodingKey { + case hubContentArn = "HubContentArn" + } + } + public struct InferenceMetrics: AWSDecodableShape { /// The expected maximum number of requests per minute for the instance. public let maxInvocations: Int? @@ -21901,7 +22443,7 @@ extension SageMaker { public struct ListAppsRequest: AWSEncodableShape { /// A parameter to search for the domain ID. public let domainIdEquals: String? - /// The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10. + /// This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10. public let maxResults: Int? /// If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results. public let nextToken: String? @@ -22804,7 +23346,7 @@ extension SageMaker { } public struct ListDomainsRequest: AWSEncodableShape { - /// The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10. + /// This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10. public let maxResults: Int? /// If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results. public let nextToken: String? @@ -23360,8 +23902,7 @@ extension SageMaker { public func validate(name: String) throws { try self.validate(self.hubContentName, name: "hubContentName", parent: name, max: 63) try self.validate(self.hubContentName, name: "hubContentName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") - try self.validate(self.hubName, name: "hubName", parent: name, max: 63) - try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.maxSchemaVersion, name: "maxSchemaVersion", parent: name, max: 14) @@ -23442,8 +23983,7 @@ extension SageMaker { } public func validate(name: String) throws { - try self.validate(self.hubName, name: "hubName", parent: name, max: 63) - try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.maxSchemaVersion, name: "maxSchemaVersion", parent: name, max: 14) @@ -24340,6 +24880,73 @@ extension SageMaker { } } + public struct ListMlflowTrackingServersRequest: AWSEncodableShape { + /// Use the CreatedAfter filter to only list tracking servers created after a specific date and time. Listed tracking servers are shown with a date and time such as "2024-03-16T01:46:56+00:00". The CreatedAfter parameter takes in a Unix timestamp. To convert a date and time into a Unix timestamp, see EpochConverter. + public let createdAfter: Date? + /// Use the CreatedBefore filter to only list tracking servers created before a specific date and time. Listed tracking servers are shown with a date and time such as "2024-03-16T01:46:56+00:00". The CreatedBefore parameter takes in a Unix timestamp. To convert a date and time into a Unix timestamp, see EpochConverter. + public let createdBefore: Date? + /// The maximum number of tracking servers to list. + public let maxResults: Int? + /// Filter for tracking servers using the specified MLflow version. + public let mlflowVersion: String? + /// If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results. + public let nextToken: String? + /// Filter for trackings servers sorting by name, creation time, or creation status. + public let sortBy: SortTrackingServerBy? + /// Change the order of the listed tracking servers. By default, tracking servers are listed in Descending order by creation time. To change the list order, you can specify SortOrder to be Ascending. + public let sortOrder: SortOrder? + /// Filter for tracking servers with a specified creation status. + public let trackingServerStatus: TrackingServerStatus? + + public init(createdAfter: Date? = nil, createdBefore: Date? = nil, maxResults: Int? = nil, mlflowVersion: String? = nil, nextToken: String? = nil, sortBy: SortTrackingServerBy? = nil, sortOrder: SortOrder? = nil, trackingServerStatus: TrackingServerStatus? = nil) { + self.createdAfter = createdAfter + self.createdBefore = createdBefore + self.maxResults = maxResults + self.mlflowVersion = mlflowVersion + self.nextToken = nextToken + self.sortBy = sortBy + self.sortOrder = sortOrder + self.trackingServerStatus = trackingServerStatus + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.mlflowVersion, name: "mlflowVersion", parent: name, max: 16) + try self.validate(self.mlflowVersion, name: "mlflowVersion", parent: name, pattern: "^[0-9]*.[0-9]*.[0-9]*$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: ".*") + } + + private enum CodingKeys: String, CodingKey { + case createdAfter = "CreatedAfter" + case createdBefore = "CreatedBefore" + case maxResults = "MaxResults" + case mlflowVersion = "MlflowVersion" + case nextToken = "NextToken" + case sortBy = "SortBy" + case sortOrder = "SortOrder" + case trackingServerStatus = "TrackingServerStatus" + } + } + + public struct ListMlflowTrackingServersResponse: AWSDecodableShape { + /// If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results. + public let nextToken: String? + /// A list of tracking servers according to chosen filters. + public let trackingServerSummaries: [TrackingServerSummary]? + + public init(nextToken: String? = nil, trackingServerSummaries: [TrackingServerSummary]? = nil) { + self.nextToken = nextToken + self.trackingServerSummaries = trackingServerSummaries + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case trackingServerSummaries = "TrackingServerSummaries" + } + } + public struct ListModelBiasJobDefinitionsRequest: AWSEncodableShape { /// A filter that returns only model bias jobs created after a specified time. public let creationTimeAfter: Date? @@ -24744,6 +25351,10 @@ extension SageMaker { public let creationTimeAfter: Date? /// A filter that returns only model groups created before the specified time. public let creationTimeBefore: Date? + /// A filter that returns either model groups shared with you or model groups in + /// your own account. When the value is CrossAccount, the results show + /// the resources made discoverable to you from other accounts. When the value is SameAccount or null, the results show resources from your account. The default is SameAccount. + public let crossAccountFilterOption: CrossAccountFilterOption? /// The maximum number of results to return in the response. public let maxResults: Int? /// A string in the model group name. This filter returns only model groups whose name contains the specified string. @@ -24755,9 +25366,10 @@ extension SageMaker { /// The sort order for results. The default is Ascending. public let sortOrder: SortOrder? - public init(creationTimeAfter: Date? = nil, creationTimeBefore: Date? = nil, maxResults: Int? = nil, nameContains: String? = nil, nextToken: String? = nil, sortBy: ModelPackageGroupSortBy? = nil, sortOrder: SortOrder? = nil) { + public init(creationTimeAfter: Date? = nil, creationTimeBefore: Date? = nil, crossAccountFilterOption: CrossAccountFilterOption? = nil, maxResults: Int? = nil, nameContains: String? = nil, nextToken: String? = nil, sortBy: ModelPackageGroupSortBy? = nil, sortOrder: SortOrder? = nil) { self.creationTimeAfter = creationTimeAfter self.creationTimeBefore = creationTimeBefore + self.crossAccountFilterOption = crossAccountFilterOption self.maxResults = maxResults self.nameContains = nameContains self.nextToken = nextToken @@ -24777,6 +25389,7 @@ extension SageMaker { private enum CodingKeys: String, CodingKey { case creationTimeAfter = "CreationTimeAfter" case creationTimeBefore = "CreationTimeBefore" + case crossAccountFilterOption = "CrossAccountFilterOption" case maxResults = "MaxResults" case nameContains = "NameContains" case nextToken = "NextToken" @@ -25922,7 +26535,7 @@ extension SageMaker { public struct ListSpacesRequest: AWSEncodableShape { /// A parameter to search for the domain ID. public let domainIdEquals: String? - /// The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10. + /// This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10. public let maxResults: Int? /// If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results. public let nextToken: String? @@ -26571,7 +27184,7 @@ extension SageMaker { public struct ListUserProfilesRequest: AWSEncodableShape { /// A parameter by which to filter the results. public let domainIdEquals: String? - /// The total number of items to return in the response. If the total number of items available is more than the value specified, a NextToken is provided in the response. To resume pagination, provide the NextToken value in the as part of a subsequent call. The default value is 10. + /// This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10. public let maxResults: Int? /// If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results. public let nextToken: String? @@ -27853,6 +28466,7 @@ extension SageMaker { public let metadataProperties: MetadataProperties? /// The approval status of the model. This can be one of the following values. APPROVED - The model is approved REJECTED - The model is rejected. PENDING_MANUAL_APPROVAL - The model is waiting for manual approval. public let modelApprovalStatus: ModelApprovalStatus? + public let modelCard: ModelPackageModelCard? /// Metrics for the model. public let modelMetrics: ModelMetrics? /// The Amazon Resource Name (ARN) of the model package. @@ -27871,6 +28485,7 @@ extension SageMaker { public let modelPackageVersion: Int? /// The Amazon Simple Storage Service path where the sample payload are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix). public let samplePayloadUrl: String? + public let securityConfig: ModelPackageSecurityConfig? /// Indicates if you want to skip model validation. public let skipModelValidation: SkipModelValidation? /// A list of algorithms that were used to create a model package. @@ -27884,7 +28499,7 @@ extension SageMaker { /// Specifies batch transform jobs that SageMaker runs to validate your model package. public let validationSpecification: ModelPackageValidationSpecification? - public init(additionalInferenceSpecifications: [AdditionalInferenceSpecificationDefinition]? = nil, approvalDescription: String? = nil, certifyForMarketplace: Bool? = nil, createdBy: UserContext? = nil, creationTime: Date? = nil, customerMetadataProperties: [String: String]? = nil, domain: String? = nil, driftCheckBaselines: DriftCheckBaselines? = nil, inferenceSpecification: InferenceSpecification? = nil, lastModifiedBy: UserContext? = nil, lastModifiedTime: Date? = nil, metadataProperties: MetadataProperties? = nil, modelApprovalStatus: ModelApprovalStatus? = nil, modelMetrics: ModelMetrics? = nil, modelPackageArn: String? = nil, modelPackageDescription: String? = nil, modelPackageGroupName: String? = nil, modelPackageName: String? = nil, modelPackageStatus: ModelPackageStatus? = nil, modelPackageStatusDetails: ModelPackageStatusDetails? = nil, modelPackageVersion: Int? = nil, samplePayloadUrl: String? = nil, skipModelValidation: SkipModelValidation? = nil, sourceAlgorithmSpecification: SourceAlgorithmSpecification? = nil, sourceUri: String? = nil, tags: [Tag]? = nil, task: String? = nil, validationSpecification: ModelPackageValidationSpecification? = nil) { + public init(additionalInferenceSpecifications: [AdditionalInferenceSpecificationDefinition]? = nil, approvalDescription: String? = nil, certifyForMarketplace: Bool? = nil, createdBy: UserContext? = nil, creationTime: Date? = nil, customerMetadataProperties: [String: String]? = nil, domain: String? = nil, driftCheckBaselines: DriftCheckBaselines? = nil, inferenceSpecification: InferenceSpecification? = nil, lastModifiedBy: UserContext? = nil, lastModifiedTime: Date? = nil, metadataProperties: MetadataProperties? = nil, modelApprovalStatus: ModelApprovalStatus? = nil, modelCard: ModelPackageModelCard? = nil, modelMetrics: ModelMetrics? = nil, modelPackageArn: String? = nil, modelPackageDescription: String? = nil, modelPackageGroupName: String? = nil, modelPackageName: String? = nil, modelPackageStatus: ModelPackageStatus? = nil, modelPackageStatusDetails: ModelPackageStatusDetails? = nil, modelPackageVersion: Int? = nil, samplePayloadUrl: String? = nil, securityConfig: ModelPackageSecurityConfig? = nil, skipModelValidation: SkipModelValidation? = nil, sourceAlgorithmSpecification: SourceAlgorithmSpecification? = nil, sourceUri: String? = nil, tags: [Tag]? = nil, task: String? = nil, validationSpecification: ModelPackageValidationSpecification? = nil) { self.additionalInferenceSpecifications = additionalInferenceSpecifications self.approvalDescription = approvalDescription self.certifyForMarketplace = certifyForMarketplace @@ -27898,6 +28513,7 @@ extension SageMaker { self.lastModifiedTime = lastModifiedTime self.metadataProperties = metadataProperties self.modelApprovalStatus = modelApprovalStatus + self.modelCard = modelCard self.modelMetrics = modelMetrics self.modelPackageArn = modelPackageArn self.modelPackageDescription = modelPackageDescription @@ -27907,6 +28523,7 @@ extension SageMaker { self.modelPackageStatusDetails = modelPackageStatusDetails self.modelPackageVersion = modelPackageVersion self.samplePayloadUrl = samplePayloadUrl + self.securityConfig = securityConfig self.skipModelValidation = skipModelValidation self.sourceAlgorithmSpecification = sourceAlgorithmSpecification self.sourceUri = sourceUri @@ -27929,6 +28546,7 @@ extension SageMaker { case lastModifiedTime = "LastModifiedTime" case metadataProperties = "MetadataProperties" case modelApprovalStatus = "ModelApprovalStatus" + case modelCard = "ModelCard" case modelMetrics = "ModelMetrics" case modelPackageArn = "ModelPackageArn" case modelPackageDescription = "ModelPackageDescription" @@ -27938,6 +28556,7 @@ extension SageMaker { case modelPackageStatusDetails = "ModelPackageStatusDetails" case modelPackageVersion = "ModelPackageVersion" case samplePayloadUrl = "SamplePayloadUrl" + case securityConfig = "SecurityConfig" case skipModelValidation = "SkipModelValidation" case sourceAlgorithmSpecification = "SourceAlgorithmSpecification" case sourceUri = "SourceUri" @@ -28095,6 +28714,46 @@ extension SageMaker { } } + public struct ModelPackageModelCard: AWSEncodableShape & AWSDecodableShape { + /// The content of the model card. The content must follow the schema described in Model Package Model Card Schema. + public let modelCardContent: String? + /// The approval status of the model card within your organization. Different organizations might have different criteria for model card review and approval. Draft: The model card is a work in progress. PendingReview: The model card is pending review. Approved: The model card is approved. Archived: The model card is archived. No more updates can be made to the model card content. If you try to update the model card content, you will receive the message Model Card is in Archived state. + public let modelCardStatus: ModelCardStatus? + + public init(modelCardContent: String? = nil, modelCardStatus: ModelCardStatus? = nil) { + self.modelCardContent = modelCardContent + self.modelCardStatus = modelCardStatus + } + + public func validate(name: String) throws { + try self.validate(self.modelCardContent, name: "modelCardContent", parent: name, max: 100000) + try self.validate(self.modelCardContent, name: "modelCardContent", parent: name, pattern: ".*") + } + + private enum CodingKeys: String, CodingKey { + case modelCardContent = "ModelCardContent" + case modelCardStatus = "ModelCardStatus" + } + } + + public struct ModelPackageSecurityConfig: AWSEncodableShape & AWSDecodableShape { + /// The KMS Key ID (KMSKeyId) used for encryption of model package information. + public let kmsKeyId: String? + + public init(kmsKeyId: String? = nil) { + self.kmsKeyId = kmsKeyId + } + + public func validate(name: String) throws { + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2048) + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, pattern: "^[a-zA-Z0-9:/_-]*$") + } + + private enum CodingKeys: String, CodingKey { + case kmsKeyId = "KmsKeyId" + } + } + public struct ModelPackageStatusDetails: AWSDecodableShape { /// The status of the scan of the Docker image container for the model package. public let imageScanStatuses: [ModelPackageStatusItem]? @@ -29462,6 +30121,8 @@ extension SageMaker { } public struct OidcConfig: AWSEncodableShape { + /// A string to string map of identifiers specific to the custom identity provider (IdP) being used. + public let authenticationRequestExtraParams: [String: String]? /// The OIDC IdP authorization endpoint used to configure your private workforce. public let authorizationEndpoint: String? /// The OIDC IdP client ID used to configure your private workforce. @@ -29474,23 +30135,34 @@ extension SageMaker { public let jwksUri: String? /// The OIDC IdP logout endpoint used to configure your private workforce. public let logoutEndpoint: String? + /// An array of string identifiers used to refer to the specific pieces of user data or claims that the client application wants to access. + public let scope: String? /// The OIDC IdP token endpoint used to configure your private workforce. public let tokenEndpoint: String? /// The OIDC IdP user information endpoint used to configure your private workforce. public let userInfoEndpoint: String? - public init(authorizationEndpoint: String? = nil, clientId: String? = nil, clientSecret: String? = nil, issuer: String? = nil, jwksUri: String? = nil, logoutEndpoint: String? = nil, tokenEndpoint: String? = nil, userInfoEndpoint: String? = nil) { + public init(authenticationRequestExtraParams: [String: String]? = nil, authorizationEndpoint: String? = nil, clientId: String? = nil, clientSecret: String? = nil, issuer: String? = nil, jwksUri: String? = nil, logoutEndpoint: String? = nil, scope: String? = nil, tokenEndpoint: String? = nil, userInfoEndpoint: String? = nil) { + self.authenticationRequestExtraParams = authenticationRequestExtraParams self.authorizationEndpoint = authorizationEndpoint self.clientId = clientId self.clientSecret = clientSecret self.issuer = issuer self.jwksUri = jwksUri self.logoutEndpoint = logoutEndpoint + self.scope = scope self.tokenEndpoint = tokenEndpoint self.userInfoEndpoint = userInfoEndpoint } public func validate(name: String) throws { + try self.authenticationRequestExtraParams?.forEach { + try validate($0.key, name: "authenticationRequestExtraParams.key", parent: name, max: 512) + try validate($0.key, name: "authenticationRequestExtraParams.key", parent: name, pattern: ".*") + try validate($0.value, name: "authenticationRequestExtraParams[\"\($0.key)\"]", parent: name, max: 512) + try validate($0.value, name: "authenticationRequestExtraParams[\"\($0.key)\"]", parent: name, pattern: ".*") + } + try self.validate(self.authenticationRequestExtraParams, name: "authenticationRequestExtraParams", parent: name, max: 10) try self.validate(self.authorizationEndpoint, name: "authorizationEndpoint", parent: name, max: 500) try self.validate(self.authorizationEndpoint, name: "authorizationEndpoint", parent: name, pattern: "^https://\\S+$") try self.validate(self.clientId, name: "clientId", parent: name, max: 1024) @@ -29505,6 +30177,8 @@ extension SageMaker { try self.validate(self.jwksUri, name: "jwksUri", parent: name, pattern: "^https://\\S+$") try self.validate(self.logoutEndpoint, name: "logoutEndpoint", parent: name, max: 500) try self.validate(self.logoutEndpoint, name: "logoutEndpoint", parent: name, pattern: "^https://\\S+$") + try self.validate(self.scope, name: "scope", parent: name, max: 1024) + try self.validate(self.scope, name: "scope", parent: name, pattern: "^[!#-\\[\\]-~]+( [!#-\\[\\]-~]+)*$") try self.validate(self.tokenEndpoint, name: "tokenEndpoint", parent: name, max: 500) try self.validate(self.tokenEndpoint, name: "tokenEndpoint", parent: name, pattern: "^https://\\S+$") try self.validate(self.userInfoEndpoint, name: "userInfoEndpoint", parent: name, max: 500) @@ -29512,18 +30186,22 @@ extension SageMaker { } private enum CodingKeys: String, CodingKey { + case authenticationRequestExtraParams = "AuthenticationRequestExtraParams" case authorizationEndpoint = "AuthorizationEndpoint" case clientId = "ClientId" case clientSecret = "ClientSecret" case issuer = "Issuer" case jwksUri = "JwksUri" case logoutEndpoint = "LogoutEndpoint" + case scope = "Scope" case tokenEndpoint = "TokenEndpoint" case userInfoEndpoint = "UserInfoEndpoint" } } public struct OidcConfigForResponse: AWSDecodableShape { + /// A string to string map of identifiers specific to the custom identity provider (IdP) being used. + public let authenticationRequestExtraParams: [String: String]? /// The OIDC IdP authorization endpoint used to configure your private workforce. public let authorizationEndpoint: String? /// The OIDC IdP client ID used to configure your private workforce. @@ -29534,27 +30212,33 @@ extension SageMaker { public let jwksUri: String? /// The OIDC IdP logout endpoint used to configure your private workforce. public let logoutEndpoint: String? + /// An array of string identifiers used to refer to the specific pieces of user data or claims that the client application wants to access. + public let scope: String? /// The OIDC IdP token endpoint used to configure your private workforce. public let tokenEndpoint: String? /// The OIDC IdP user information endpoint used to configure your private workforce. public let userInfoEndpoint: String? - public init(authorizationEndpoint: String? = nil, clientId: String? = nil, issuer: String? = nil, jwksUri: String? = nil, logoutEndpoint: String? = nil, tokenEndpoint: String? = nil, userInfoEndpoint: String? = nil) { + public init(authenticationRequestExtraParams: [String: String]? = nil, authorizationEndpoint: String? = nil, clientId: String? = nil, issuer: String? = nil, jwksUri: String? = nil, logoutEndpoint: String? = nil, scope: String? = nil, tokenEndpoint: String? = nil, userInfoEndpoint: String? = nil) { + self.authenticationRequestExtraParams = authenticationRequestExtraParams self.authorizationEndpoint = authorizationEndpoint self.clientId = clientId self.issuer = issuer self.jwksUri = jwksUri self.logoutEndpoint = logoutEndpoint + self.scope = scope self.tokenEndpoint = tokenEndpoint self.userInfoEndpoint = userInfoEndpoint } private enum CodingKeys: String, CodingKey { + case authenticationRequestExtraParams = "AuthenticationRequestExtraParams" case authorizationEndpoint = "AuthorizationEndpoint" case clientId = "ClientId" case issuer = "Issuer" case jwksUri = "JwksUri" case logoutEndpoint = "LogoutEndpoint" + case scope = "Scope" case tokenEndpoint = "TokenEndpoint" case userInfoEndpoint = "UserInfoEndpoint" } @@ -30801,6 +31485,8 @@ extension SageMaker { public let coreDumpConfig: ProductionVariantCoreDumpConfig? /// You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoint. You can turn on or turn off SSM access for a production variant behind an existing endpoint by creating a new endpoint configuration and calling UpdateEndpoint. public let enableSSMAccess: Bool? + /// Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads. By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions. + public let inferenceAmiVersion: ProductionVariantInferenceAmiVersion? /// Number of instances to launch initially. public let initialInstanceCount: Int? /// Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. The traffic to a production variant is determined by the ratio of the VariantWeight to the sum of all VariantWeight values across all ProductionVariants. If unspecified, it defaults to 1.0. @@ -30822,11 +31508,12 @@ extension SageMaker { /// The size, in GB, of the ML storage volume attached to individual inference instance associated with the production variant. Currently only Amazon EBS gp2 storage volumes are supported. public let volumeSizeInGB: Int? - public init(acceleratorType: ProductionVariantAcceleratorType? = nil, containerStartupHealthCheckTimeoutInSeconds: Int? = nil, coreDumpConfig: ProductionVariantCoreDumpConfig? = nil, enableSSMAccess: Bool? = nil, initialInstanceCount: Int? = nil, initialVariantWeight: Float? = nil, instanceType: ProductionVariantInstanceType? = nil, managedInstanceScaling: ProductionVariantManagedInstanceScaling? = nil, modelDataDownloadTimeoutInSeconds: Int? = nil, modelName: String? = nil, routingConfig: ProductionVariantRoutingConfig? = nil, serverlessConfig: ProductionVariantServerlessConfig? = nil, variantName: String? = nil, volumeSizeInGB: Int? = nil) { + public init(acceleratorType: ProductionVariantAcceleratorType? = nil, containerStartupHealthCheckTimeoutInSeconds: Int? = nil, coreDumpConfig: ProductionVariantCoreDumpConfig? = nil, enableSSMAccess: Bool? = nil, inferenceAmiVersion: ProductionVariantInferenceAmiVersion? = nil, initialInstanceCount: Int? = nil, initialVariantWeight: Float? = nil, instanceType: ProductionVariantInstanceType? = nil, managedInstanceScaling: ProductionVariantManagedInstanceScaling? = nil, modelDataDownloadTimeoutInSeconds: Int? = nil, modelName: String? = nil, routingConfig: ProductionVariantRoutingConfig? = nil, serverlessConfig: ProductionVariantServerlessConfig? = nil, variantName: String? = nil, volumeSizeInGB: Int? = nil) { self.acceleratorType = acceleratorType self.containerStartupHealthCheckTimeoutInSeconds = containerStartupHealthCheckTimeoutInSeconds self.coreDumpConfig = coreDumpConfig self.enableSSMAccess = enableSSMAccess + self.inferenceAmiVersion = inferenceAmiVersion self.initialInstanceCount = initialInstanceCount self.initialVariantWeight = initialVariantWeight self.instanceType = instanceType @@ -30862,6 +31549,7 @@ extension SageMaker { case containerStartupHealthCheckTimeoutInSeconds = "ContainerStartupHealthCheckTimeoutInSeconds" case coreDumpConfig = "CoreDumpConfig" case enableSSMAccess = "EnableSSMAccess" + case inferenceAmiVersion = "InferenceAmiVersion" case initialInstanceCount = "InitialInstanceCount" case initialVariantWeight = "InitialVariantWeight" case instanceType = "InstanceType" @@ -32672,6 +33360,8 @@ extension SageMaker { public struct S3ModelDataSource: AWSEncodableShape & AWSDecodableShape { /// Specifies how the ML model data is prepared. If you choose Gzip and choose S3Object as the value of S3DataType, S3Uri identifies an object that is a gzip-compressed TAR archive. SageMaker will attempt to decompress and untar the object during model deployment. If you choose None and chooose S3Object as the value of S3DataType, S3Uri identifies an object that represents an uncompressed ML model to deploy. If you choose None and choose S3Prefix as the value of S3DataType, S3Uri identifies a key name prefix, under which all objects represents the uncompressed ML model to deploy. If you choose None, then SageMaker will follow rules below when creating model data files under /opt/ml/model directory for use by your inference code: If you choose S3Object as the value of S3DataType, then SageMaker will split the key of the S3 object referenced by S3Uri by slash (/), and use the last part as the filename of the file holding the content of the S3 object. If you choose S3Prefix as the value of S3DataType, then for each S3 object under the key name pefix referenced by S3Uri, SageMaker will trim its key by the prefix, and use the remainder as the path (relative to /opt/ml/model) of the file holding the content of the S3 object. SageMaker will split the remainder by slash (/), using intermediate parts as directory names and the last part as filename of the file holding the content of the S3 object. Do not use any of the following as file names or directory names: An empty or blank string A string which contains null bytes A string longer than 255 bytes A single dot (.) A double dot (..) Ambiguous file names will result in model deployment failure. For example, if your uncompressed ML model consists of two S3 objects s3://mybucket/model/weights and s3://mybucket/model/weights/part1 and you specify s3://mybucket/model/ as the value of S3Uri and S3Prefix as the value of S3DataType, then it will result in name clash between /opt/ml/model/weights (a regular file) and /opt/ml/model/weights/ (a directory). Do not organize the model artifacts in S3 console using folders. When you create a folder in S3 console, S3 creates a 0-byte object with a key set to the folder name you provide. They key of the 0-byte object ends with a slash (/) which violates SageMaker restrictions on model artifact file names, leading to model deployment failure. public let compressionType: ModelCompressionType? + /// Configuration information for hub access. + public let hubAccessConfig: InferenceHubAccessConfig? /// Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the ModelAccessConfig. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model. public let modelAccessConfig: ModelAccessConfig? /// Specifies the type of ML model data to deploy. If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix as part of the ML model data to deploy. A valid key name prefix identified by S3Uri always ends with a forward slash (/). If you choose S3Object, S3Uri identifies an object that is the ML model data to deploy. @@ -32679,26 +33369,42 @@ extension SageMaker { /// Specifies the S3 path of ML model data to deploy. public let s3Uri: String? - public init(compressionType: ModelCompressionType? = nil, modelAccessConfig: ModelAccessConfig? = nil, s3DataType: S3ModelDataType? = nil, s3Uri: String? = nil) { + public init(compressionType: ModelCompressionType? = nil, hubAccessConfig: InferenceHubAccessConfig? = nil, modelAccessConfig: ModelAccessConfig? = nil, s3DataType: S3ModelDataType? = nil, s3Uri: String? = nil) { self.compressionType = compressionType + self.hubAccessConfig = hubAccessConfig self.modelAccessConfig = modelAccessConfig self.s3DataType = s3DataType self.s3Uri = s3Uri } public func validate(name: String) throws { + try self.hubAccessConfig?.validate(name: "\(name).hubAccessConfig") try self.validate(self.s3Uri, name: "s3Uri", parent: name, max: 1024) try self.validate(self.s3Uri, name: "s3Uri", parent: name, pattern: "^(https|s3)://([^/]+)/?(.*)$") } private enum CodingKeys: String, CodingKey { case compressionType = "CompressionType" + case hubAccessConfig = "HubAccessConfig" case modelAccessConfig = "ModelAccessConfig" case s3DataType = "S3DataType" case s3Uri = "S3Uri" } } + public struct S3Presign: AWSEncodableShape & AWSDecodableShape { + /// Use this parameter to specify the allowed request source. Possible sources are either SourceIp or VpcSourceIp. + public let iamPolicyConstraints: IamPolicyConstraints? + + public init(iamPolicyConstraints: IamPolicyConstraints? = nil) { + self.iamPolicyConstraints = iamPolicyConstraints + } + + private enum CodingKeys: String, CodingKey { + case iamPolicyConstraints = "IamPolicyConstraints" + } + } + public struct S3StorageConfig: AWSEncodableShape & AWSDecodableShape { /// The Amazon Web Services Key Management Service (KMS) key ARN of the key used to encrypt any objects written into the OfflineStore S3 location. The IAM roleARN that is passed as a parameter to CreateFeatureGroup must have below permissions to the KmsKeyId: "kms:GenerateDataKey" public let kmsKeyId: String? @@ -33223,7 +33929,7 @@ extension SageMaker { } public struct SessionChainingConfig: AWSEncodableShape { - /// Set to True to allow SageMaker to extract session tags from a training job creation role and reuse these tags when assuming the training job execution role. + /// Set to True to allow SageMaker to extract session tags from a training job creation role and reuse these tags when assuming the training job execution role. public let enableSessionTagChaining: Bool? public init(enableSessionTagChaining: Bool? = nil) { @@ -33512,7 +34218,7 @@ extension SageMaker { try self.customFileSystems?.forEach { try $0.validate(name: "\(name).customFileSystems[]") } - try self.validate(self.customFileSystems, name: "customFileSystems", parent: name, max: 1) + try self.validate(self.customFileSystems, name: "customFileSystems", parent: name, max: 5) try self.jupyterLabAppSettings?.validate(name: "\(name).jupyterLabAppSettings") try self.jupyterServerAppSettings?.validate(name: "\(name).jupyterServerAppSettings") try self.kernelGatewayAppSettings?.validate(name: "\(name).kernelGatewayAppSettings") @@ -33676,6 +34382,38 @@ extension SageMaker { } } + public struct StartMlflowTrackingServerRequest: AWSEncodableShape { + /// The name of the tracking server to start. + public let trackingServerName: String? + + public init(trackingServerName: String? = nil) { + self.trackingServerName = trackingServerName + } + + public func validate(name: String) throws { + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, max: 256) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, min: 1) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}$") + } + + private enum CodingKeys: String, CodingKey { + case trackingServerName = "TrackingServerName" + } + } + + public struct StartMlflowTrackingServerResponse: AWSDecodableShape { + /// The ARN of the started tracking server. + public let trackingServerArn: String? + + public init(trackingServerArn: String? = nil) { + self.trackingServerArn = trackingServerArn + } + + private enum CodingKeys: String, CodingKey { + case trackingServerArn = "TrackingServerArn" + } + } + public struct StartMonitoringScheduleRequest: AWSEncodableShape { /// The name of the schedule to start. public let monitoringScheduleName: String? @@ -33983,6 +34721,38 @@ extension SageMaker { } } + public struct StopMlflowTrackingServerRequest: AWSEncodableShape { + /// The name of the tracking server to stop. + public let trackingServerName: String? + + public init(trackingServerName: String? = nil) { + self.trackingServerName = trackingServerName + } + + public func validate(name: String) throws { + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, max: 256) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, min: 1) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}$") + } + + private enum CodingKeys: String, CodingKey { + case trackingServerName = "TrackingServerName" + } + } + + public struct StopMlflowTrackingServerResponse: AWSDecodableShape { + /// The ARN of the stopped tracking server. + public let trackingServerArn: String? + + public init(trackingServerArn: String? = nil) { + self.trackingServerArn = trackingServerArn + } + + private enum CodingKeys: String, CodingKey { + case trackingServerArn = "TrackingServerArn" + } + } + public struct StopMonitoringScheduleRequest: AWSEncodableShape { /// The name of the schedule to stop. public let monitoringScheduleName: String? @@ -34582,6 +35352,7 @@ extension SageMaker { } public struct TimeSeriesForecastingJobConfig: AWSEncodableShape & AWSDecodableShape { + public let candidateGenerationConfig: CandidateGenerationConfig? public let completionCriteria: AutoMLJobCompletionCriteria? /// A URL to the Amazon S3 data source containing additional selected features that complement the target, itemID, timestamp, and grouped columns set in TimeSeriesConfig. When not provided, the AutoML job V2 includes all the columns from the original dataset that are not already declared in TimeSeriesConfig. If provided, the AutoML job V2 only considers these additional columns as a complement to the ones declared in TimeSeriesConfig. You can input FeatureAttributeNames (optional) in JSON format as shown below: { "FeatureAttributeNames":["col1", "col2", ...] }. You can also specify the data type of the feature (optional) in the format shown below: { "FeatureDataTypes":{"col1":"numeric", "col2":"categorical" ... } } Autopilot supports the following data types: numeric, categorical, text, and datetime. These column keys must not include any column set in TimeSeriesConfig. public let featureSpecificationS3Uri: String? @@ -34598,7 +35369,8 @@ extension SageMaker { /// The transformations modifying specific attributes of the time-series, such as filling strategies for missing values. public let transformations: TimeSeriesTransformations? - public init(completionCriteria: AutoMLJobCompletionCriteria? = nil, featureSpecificationS3Uri: String? = nil, forecastFrequency: String? = nil, forecastHorizon: Int? = nil, forecastQuantiles: [String]? = nil, holidayConfig: [HolidayConfigAttributes]? = nil, timeSeriesConfig: TimeSeriesConfig? = nil, transformations: TimeSeriesTransformations? = nil) { + public init(candidateGenerationConfig: CandidateGenerationConfig? = nil, completionCriteria: AutoMLJobCompletionCriteria? = nil, featureSpecificationS3Uri: String? = nil, forecastFrequency: String? = nil, forecastHorizon: Int? = nil, forecastQuantiles: [String]? = nil, holidayConfig: [HolidayConfigAttributes]? = nil, timeSeriesConfig: TimeSeriesConfig? = nil, transformations: TimeSeriesTransformations? = nil) { + self.candidateGenerationConfig = candidateGenerationConfig self.completionCriteria = completionCriteria self.featureSpecificationS3Uri = featureSpecificationS3Uri self.forecastFrequency = forecastFrequency @@ -34610,6 +35382,7 @@ extension SageMaker { } public func validate(name: String) throws { + try self.candidateGenerationConfig?.validate(name: "\(name).candidateGenerationConfig") try self.completionCriteria?.validate(name: "\(name).completionCriteria") try self.validate(self.featureSpecificationS3Uri, name: "featureSpecificationS3Uri", parent: name, max: 1024) try self.validate(self.featureSpecificationS3Uri, name: "featureSpecificationS3Uri", parent: name, pattern: "^(https|s3)://([^/]+)/?(.*)$") @@ -34634,6 +35407,7 @@ extension SageMaker { } private enum CodingKeys: String, CodingKey { + case candidateGenerationConfig = "CandidateGenerationConfig" case completionCriteria = "CompletionCriteria" case featureSpecificationS3Uri = "FeatureSpecificationS3Uri" case forecastFrequency = "ForecastFrequency" @@ -34702,6 +35476,43 @@ extension SageMaker { } } + public struct TrackingServerSummary: AWSDecodableShape { + /// The creation time of a listed tracking server. + public let creationTime: Date? + /// The activity status of a listed tracking server. + public let isActive: IsTrackingServerActive? + /// The last modified time of a listed tracking server. + public let lastModifiedTime: Date? + /// The MLflow version used for a listed tracking server. + public let mlflowVersion: String? + /// The ARN of a listed tracking server. + public let trackingServerArn: String? + /// The name of a listed tracking server. + public let trackingServerName: String? + /// The creation status of a listed tracking server. + public let trackingServerStatus: TrackingServerStatus? + + public init(creationTime: Date? = nil, isActive: IsTrackingServerActive? = nil, lastModifiedTime: Date? = nil, mlflowVersion: String? = nil, trackingServerArn: String? = nil, trackingServerName: String? = nil, trackingServerStatus: TrackingServerStatus? = nil) { + self.creationTime = creationTime + self.isActive = isActive + self.lastModifiedTime = lastModifiedTime + self.mlflowVersion = mlflowVersion + self.trackingServerArn = trackingServerArn + self.trackingServerName = trackingServerName + self.trackingServerStatus = trackingServerStatus + } + + private enum CodingKeys: String, CodingKey { + case creationTime = "CreationTime" + case isActive = "IsActive" + case lastModifiedTime = "LastModifiedTime" + case mlflowVersion = "MlflowVersion" + case trackingServerArn = "TrackingServerArn" + case trackingServerName = "TrackingServerName" + case trackingServerStatus = "TrackingServerStatus" + } + } + public struct TrafficPattern: AWSEncodableShape & AWSDecodableShape { /// Defines the phases traffic specification. public let phases: [Phase]? @@ -36218,7 +37029,7 @@ extension SageMaker { try self.instanceGroups?.forEach { try $0.validate(name: "\(name).instanceGroups[]") } - try self.validate(self.instanceGroups, name: "instanceGroups", parent: name, max: 5) + try self.validate(self.instanceGroups, name: "instanceGroups", parent: name, max: 20) try self.validate(self.instanceGroups, name: "instanceGroups", parent: name, min: 1) } @@ -36759,8 +37570,7 @@ extension SageMaker { try self.validate(self.hubDescription, name: "hubDescription", parent: name, pattern: ".*") try self.validate(self.hubDisplayName, name: "hubDisplayName", parent: name, max: 255) try self.validate(self.hubDisplayName, name: "hubDisplayName", parent: name, pattern: ".*") - try self.validate(self.hubName, name: "hubName", parent: name, max: 63) - try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") + try self.validate(self.hubName, name: "hubName", parent: name, pattern: "^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$") try self.hubSearchKeywords?.forEach { try validate($0, name: "hubSearchKeywords[]", parent: name, max: 255) try validate($0, name: "hubSearchKeywords[]", parent: name, pattern: "^[^A-Z]*$") @@ -37089,6 +37899,58 @@ extension SageMaker { } } + public struct UpdateMlflowTrackingServerRequest: AWSEncodableShape { + /// The new S3 URI for the general purpose bucket to use as the artifact store for the MLflow Tracking Server. + public let artifactStoreUri: String? + /// Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. To enable automatic model registration, set this value to True. To disable automatic model registration, set this value to False. If not specified, AutomaticModelRegistration defaults to False + public let automaticModelRegistration: Bool? + /// The name of the MLflow Tracking Server to update. + public let trackingServerName: String? + /// The new size for the MLflow Tracking Server. + public let trackingServerSize: TrackingServerSize? + /// The new weekly maintenance window start day and time to update. The maintenance window day and time should be in Coordinated Universal Time (UTC) 24-hour standard time. For example: TUE:03:30. + public let weeklyMaintenanceWindowStart: String? + + public init(artifactStoreUri: String? = nil, automaticModelRegistration: Bool? = nil, trackingServerName: String? = nil, trackingServerSize: TrackingServerSize? = nil, weeklyMaintenanceWindowStart: String? = nil) { + self.artifactStoreUri = artifactStoreUri + self.automaticModelRegistration = automaticModelRegistration + self.trackingServerName = trackingServerName + self.trackingServerSize = trackingServerSize + self.weeklyMaintenanceWindowStart = weeklyMaintenanceWindowStart + } + + public func validate(name: String) throws { + try self.validate(self.artifactStoreUri, name: "artifactStoreUri", parent: name, max: 1024) + try self.validate(self.artifactStoreUri, name: "artifactStoreUri", parent: name, pattern: "^(https|s3)://([^/]+)/?(.*)$") + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, max: 256) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, min: 1) + try self.validate(self.trackingServerName, name: "trackingServerName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}$") + try self.validate(self.weeklyMaintenanceWindowStart, name: "weeklyMaintenanceWindowStart", parent: name, max: 9) + try self.validate(self.weeklyMaintenanceWindowStart, name: "weeklyMaintenanceWindowStart", parent: name, pattern: "^(Mon|Tue|Wed|Thu|Fri|Sat|Sun):([01]\\d|2[0-3]):([0-5]\\d)$") + } + + private enum CodingKeys: String, CodingKey { + case artifactStoreUri = "ArtifactStoreUri" + case automaticModelRegistration = "AutomaticModelRegistration" + case trackingServerName = "TrackingServerName" + case trackingServerSize = "TrackingServerSize" + case weeklyMaintenanceWindowStart = "WeeklyMaintenanceWindowStart" + } + } + + public struct UpdateMlflowTrackingServerResponse: AWSDecodableShape { + /// The ARN of the updated MLflow Tracking Server. + public let trackingServerArn: String? + + public init(trackingServerArn: String? = nil) { + self.trackingServerArn = trackingServerArn + } + + private enum CodingKeys: String, CodingKey { + case trackingServerArn = "TrackingServerArn" + } + } + public struct UpdateModelCardRequest: AWSEncodableShape { /// The updated model card content. Content must be in model card JSON schema and provided as a string. When updating model card content, be sure to include the full content and not just updated content. public let content: String? @@ -37144,18 +38006,21 @@ extension SageMaker { public let inferenceSpecification: InferenceSpecification? /// The approval status of the model. public let modelApprovalStatus: ModelApprovalStatus? + /// The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model package model card schema, see Model package model card schema. For more information about the model card associated with the model package, see View the Details of a Model Version. + public let modelCard: ModelPackageModelCard? /// The Amazon Resource Name (ARN) of the model package. public let modelPackageArn: String? /// The URI of the source for the model package. public let sourceUri: String? - public init(additionalInferenceSpecificationsToAdd: [AdditionalInferenceSpecificationDefinition]? = nil, approvalDescription: String? = nil, customerMetadataProperties: [String: String]? = nil, customerMetadataPropertiesToRemove: [String]? = nil, inferenceSpecification: InferenceSpecification? = nil, modelApprovalStatus: ModelApprovalStatus? = nil, modelPackageArn: String? = nil, sourceUri: String? = nil) { + public init(additionalInferenceSpecificationsToAdd: [AdditionalInferenceSpecificationDefinition]? = nil, approvalDescription: String? = nil, customerMetadataProperties: [String: String]? = nil, customerMetadataPropertiesToRemove: [String]? = nil, inferenceSpecification: InferenceSpecification? = nil, modelApprovalStatus: ModelApprovalStatus? = nil, modelCard: ModelPackageModelCard? = nil, modelPackageArn: String? = nil, sourceUri: String? = nil) { self.additionalInferenceSpecificationsToAdd = additionalInferenceSpecificationsToAdd self.approvalDescription = approvalDescription self.customerMetadataProperties = customerMetadataProperties self.customerMetadataPropertiesToRemove = customerMetadataPropertiesToRemove self.inferenceSpecification = inferenceSpecification self.modelApprovalStatus = modelApprovalStatus + self.modelCard = modelCard self.modelPackageArn = modelPackageArn self.sourceUri = sourceUri } @@ -37184,6 +38049,7 @@ extension SageMaker { try validate($0, name: "customerMetadataPropertiesToRemove[]", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*)${1,128}$") } try self.inferenceSpecification?.validate(name: "\(name).inferenceSpecification") + try self.modelCard?.validate(name: "\(name).modelCard") try self.validate(self.modelPackageArn, name: "modelPackageArn", parent: name, max: 2048) try self.validate(self.modelPackageArn, name: "modelPackageArn", parent: name, min: 1) try self.validate(self.modelPackageArn, name: "modelPackageArn", parent: name, pattern: "^arn:aws(-cn|-us-gov)?:sagemaker:[a-z0-9\\-]{9,16}:[0-9]{12}:model-package/[\\S]{1,2048}$") @@ -37198,6 +38064,7 @@ extension SageMaker { case customerMetadataPropertiesToRemove = "CustomerMetadataPropertiesToRemove" case inferenceSpecification = "InferenceSpecification" case modelApprovalStatus = "ModelApprovalStatus" + case modelCard = "ModelCard" case modelPackageArn = "ModelPackageArn" case sourceUri = "SourceUri" } @@ -37949,13 +38816,16 @@ extension SageMaker { public let memberDefinitions: [MemberDefinition]? /// Configures SNS topic notifications for available or expiring work items public let notificationConfiguration: NotificationConfiguration? + /// Use this optional parameter to constrain access to an Amazon S3 resource based on the IP address using supported IAM global condition keys. The Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL. + public let workerAccessConfiguration: WorkerAccessConfiguration? /// The name of the work team to update. public let workteamName: String? - public init(description: String? = nil, memberDefinitions: [MemberDefinition]? = nil, notificationConfiguration: NotificationConfiguration? = nil, workteamName: String? = nil) { + public init(description: String? = nil, memberDefinitions: [MemberDefinition]? = nil, notificationConfiguration: NotificationConfiguration? = nil, workerAccessConfiguration: WorkerAccessConfiguration? = nil, workteamName: String? = nil) { self.description = description self.memberDefinitions = memberDefinitions self.notificationConfiguration = notificationConfiguration + self.workerAccessConfiguration = workerAccessConfiguration self.workteamName = workteamName } @@ -37978,6 +38848,7 @@ extension SageMaker { case description = "Description" case memberDefinitions = "MemberDefinitions" case notificationConfiguration = "NotificationConfiguration" + case workerAccessConfiguration = "WorkerAccessConfiguration" case workteamName = "WorkteamName" } } @@ -38108,7 +38979,7 @@ extension SageMaker { try self.customFileSystemConfigs?.forEach { try $0.validate(name: "\(name).customFileSystemConfigs[]") } - try self.validate(self.customFileSystemConfigs, name: "customFileSystemConfigs", parent: name, max: 2) + try self.validate(self.customFileSystemConfigs, name: "customFileSystemConfigs", parent: name, max: 10) try self.customPosixUserConfig?.validate(name: "\(name).customPosixUserConfig") try self.validate(self.defaultLandingUri, name: "defaultLandingUri", parent: name, max: 1023) try self.validate(self.executionRole, name: "executionRole", parent: name, max: 2048) @@ -38278,6 +39149,19 @@ extension SageMaker { } } + public struct WorkerAccessConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Defines any Amazon S3 resource constraints. + public let s3Presign: S3Presign? + + public init(s3Presign: S3Presign? = nil) { + self.s3Presign = s3Presign + } + + private enum CodingKeys: String, CodingKey { + case s3Presign = "S3Presign" + } + } + public struct Workforce: AWSDecodableShape { /// The configuration of an Amazon Cognito workforce. A single Cognito workforce is created using and corresponds to a single Amazon Cognito user pool. public let cognitoConfig: CognitoConfig? @@ -38433,6 +39317,8 @@ extension SageMaker { public let productListingIds: [String]? /// The URI of the labeling job's user interface. Workers open this URI to start labeling your data objects. public let subDomain: String? + /// Describes any access constraints that have been defined for Amazon S3 resources. + public let workerAccessConfiguration: WorkerAccessConfiguration? /// The Amazon Resource Name (ARN) of the workforce. public let workforceArn: String? /// The Amazon Resource Name (ARN) that identifies the work team. @@ -38440,7 +39326,7 @@ extension SageMaker { /// The name of the work team. public let workteamName: String? - public init(createDate: Date? = nil, description: String? = nil, lastUpdatedDate: Date? = nil, memberDefinitions: [MemberDefinition]? = nil, notificationConfiguration: NotificationConfiguration? = nil, productListingIds: [String]? = nil, subDomain: String? = nil, workforceArn: String? = nil, workteamArn: String? = nil, workteamName: String? = nil) { + public init(createDate: Date? = nil, description: String? = nil, lastUpdatedDate: Date? = nil, memberDefinitions: [MemberDefinition]? = nil, notificationConfiguration: NotificationConfiguration? = nil, productListingIds: [String]? = nil, subDomain: String? = nil, workerAccessConfiguration: WorkerAccessConfiguration? = nil, workforceArn: String? = nil, workteamArn: String? = nil, workteamName: String? = nil) { self.createDate = createDate self.description = description self.lastUpdatedDate = lastUpdatedDate @@ -38448,6 +39334,7 @@ extension SageMaker { self.notificationConfiguration = notificationConfiguration self.productListingIds = productListingIds self.subDomain = subDomain + self.workerAccessConfiguration = workerAccessConfiguration self.workforceArn = workforceArn self.workteamArn = workteamArn self.workteamName = workteamName @@ -38461,12 +39348,30 @@ extension SageMaker { case notificationConfiguration = "NotificationConfiguration" case productListingIds = "ProductListingIds" case subDomain = "SubDomain" + case workerAccessConfiguration = "WorkerAccessConfiguration" case workforceArn = "WorkforceArn" case workteamArn = "WorkteamArn" case workteamName = "WorkteamName" } } + public struct ClusterInstanceStorageConfig: AWSEncodableShape & AWSDecodableShape { + /// Defines the configuration for attaching additional Amazon Elastic Block Store (EBS) volumes to the instances in the SageMaker HyperPod cluster instance group. The additional EBS volume is attached to each instance within the SageMaker HyperPod cluster instance group and mounted to /opt/sagemaker. + public let ebsVolumeConfig: ClusterEbsVolumeConfig? + + public init(ebsVolumeConfig: ClusterEbsVolumeConfig? = nil) { + self.ebsVolumeConfig = ebsVolumeConfig + } + + public func validate(name: String) throws { + try self.ebsVolumeConfig?.validate(name: "\(name).ebsVolumeConfig") + } + + private enum CodingKeys: String, CodingKey { + case ebsVolumeConfig = "EbsVolumeConfig" + } + } + public struct CollectionConfig: AWSEncodableShape & AWSDecodableShape { /// Configuration for your vector collection type. Dimension: The number of elements in your vector. public let vectorConfig: VectorConfig? diff --git a/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift b/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift index 354651c414..8b9a820834 100644 --- a/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift +++ b/Sources/Soto/Services/SecretsManager/SecretsManager_api.swift @@ -160,7 +160,7 @@ public struct SecretsManager: AWSService { } /// Creates a new secret. A secret can be a password, a set of credentials such as a user name and password, an OAuth token, or other secret information that you store in an encrypted form in Secrets Manager. The secret also includes the connection information to access a database or other service, which Secrets Manager doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret. For secrets that use managed rotation, you need to create the secret through the managing service. For more information, see Secrets Manager secrets managed by other Amazon Web Services services. - /// For information about creating a secret in the console, see Create a secret. To create a secret, you can provide the secret value to be encrypted in either the SecretString parameter or the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager creates an initial secret version and automatically attaches the staging label AWSCURRENT to it. For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the JSON you store in the SecretString matches the JSON structure of a database secret. If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result. If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use aws/secretsmanager to encrypt the secret, and you must create and use a customer managed KMS key. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:CreateSecret. If you include tags in the secret, you also need secretsmanager:TagResource. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key. + /// For information about creating a secret in the console, see Create a secret. To create a secret, you can provide the secret value to be encrypted in either the SecretString parameter or the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager creates an initial secret version and automatically attaches the staging label AWSCURRENT to it. For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the JSON you store in the SecretString matches the JSON structure of a database secret. If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result. If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use aws/secretsmanager to encrypt the secret, and you must create and use a customer managed KMS key. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:CreateSecret. If you include tags in the secret, you also need secretsmanager:TagResource. To add replica Regions, you must also have secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key. @Sendable public func createSecret(_ input: CreateSecretRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSecretResponse { return try await self.client.execute( @@ -290,7 +290,7 @@ public struct SecretsManager: AWSService { ) } - /// Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions. You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it. If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from. This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. + /// Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions. You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it. If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from. This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. @Sendable public func putSecretValue(_ input: PutSecretValueRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutSecretValueResponse { return try await self.client.execute( diff --git a/Sources/Soto/Services/SecretsManager/SecretsManager_shapes.swift b/Sources/Soto/Services/SecretsManager/SecretsManager_shapes.swift index ed8fa72cc6..12306bd336 100644 --- a/Sources/Soto/Services/SecretsManager/SecretsManager_shapes.swift +++ b/Sources/Soto/Services/SecretsManager/SecretsManager_shapes.swift @@ -188,9 +188,9 @@ extension SecretsManager { public let kmsKeyId: String? /// The name of the new secret. The secret name can contain ASCII letters, numbers, and the following characters: /_+=.@- Do not end your secret name with a hyphen followed by six characters. If you do so, you risk confusion and unexpected results when searching for a secret by partial ARN. Secrets Manager automatically adds a hyphen and six random characters after the secret name at the end of the ARN. public let name: String - /// The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretString or SecretBinary must have a value, but not both. This parameter is not available in the Secrets Manager console. + /// The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretString or SecretBinary must have a value, but not both. This parameter is not available in the Secrets Manager console. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field. public let secretBinary: AWSBase64Data? - /// The text data to encrypt and store in this new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. Either SecretString or SecretBinary must have a value, but not both. If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that a Lambda rotation function can parse. + /// The text data to encrypt and store in this new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. Either SecretString or SecretBinary must have a value, but not both. If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that a Lambda rotation function can parse. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field. public let secretString: String? /// A list of tags to attach to the secret. Each tag is a key and value pair of strings in a JSON text string, for example: [{"Key":"CostCenter","Value":"12345"},{"Key":"environment","Value":"production"}] Secrets Manager tag key names are case sensitive. A tag with the key "ABC" is a different tag from one with key "abc". If you check tags in permissions policies as part of your security strategy, then adding or removing a tag can change permissions. If the completion of this operation would result in you losing your permissions for this secret, then Secrets Manager blocks the operation and returns an Access Denied error. For more information, see Control access to secrets using tags and Limit access to identities with tags that match secrets' tags. For information about how to format a JSON parameter for the various command line tool environments, see Using JSON for Parameters. If your command-line tool or SDK requires quotation marks around the parameter, you should use single quotes to avoid confusion with the double quotes required in the JSON text. For tag quotas and naming restrictions, see Service quotas for Tagging in the Amazon Web Services General Reference guide. public let tags: [Tag]? @@ -392,7 +392,7 @@ extension SecretsManager { public let primaryRegion: String? /// A list of the replicas of this secret and their status: Failed, which indicates that the replica was not created. InProgress, which indicates that Secrets Manager is in the process of creating the replica. InSync, which indicates that the replica was created. public let replicationStatus: [ReplicationStatusType]? - /// Specifies whether automatic rotation is turned on for this secret. To turn on rotation, use RotateSecret. To turn off rotation, use CancelRotateSecret. + /// Specifies whether automatic rotation is turned on for this secret. If the secret has never been configured for rotation, Secrets Manager returns null. To turn on rotation, use RotateSecret. To turn off rotation, use CancelRotateSecret. public let rotationEnabled: Bool? /// The ARN of the Lambda function that Secrets Manager invokes to rotate the secret. public let rotationLambdaARN: String? @@ -572,7 +572,7 @@ extension SecretsManager { } public struct GetSecretValueRequest: AWSEncodableShape { - /// The ARN or name of the secret to retrieve. For an ARN, we recommend that you specify a complete ARN rather than a partial ARN. See Finding a secret from a partial ARN. + /// The ARN or name of the secret to retrieve. To retrieve a secret from another account, you must use an ARN. For an ARN, we recommend that you specify a complete ARN rather than a partial ARN. See Finding a secret from a partial ARN. public let secretId: String /// The unique identifier of the version of the secret to retrieve. If you include both this parameter and VersionStage, the two parameters must refer to the same secret version. If you don't specify either a VersionStage or VersionId, then Secrets Manager returns the AWSCURRENT version. This value is typically a UUID-type value with 32 hexadecimal digits. public let versionId: String? @@ -608,9 +608,9 @@ extension SecretsManager { public let createdDate: Date? /// The friendly name of the secret. public let name: String? - /// The decrypted secret value, if the secret value was originally provided as binary data in the form of a byte array. When you retrieve a SecretBinary using the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not encoded. If the secret was created by using the Secrets Manager console, or if the secret value was originally provided as a string, then this field is omitted. The secret value appears in SecretString instead. + /// The decrypted secret value, if the secret value was originally provided as binary data in the form of a byte array. When you retrieve a SecretBinary using the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not encoded. If the secret was created by using the Secrets Manager console, or if the secret value was originally provided as a string, then this field is omitted. The secret value appears in SecretString instead. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field. public let secretBinary: AWSBase64Data? - /// The decrypted secret value, if the secret value was originally provided as a string or through the Secrets Manager console. If this secret was created by using the console, then Secrets Manager stores the information as a JSON structure of key/value pairs. + /// The decrypted secret value, if the secret value was originally provided as a string or through the Secrets Manager console. If this secret was created by using the console, then Secrets Manager stores the information as a JSON structure of key/value pairs. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field. public let secretString: String? /// The unique identifier of this version of the secret. public let versionId: String? @@ -802,17 +802,20 @@ extension SecretsManager { public struct PutSecretValueRequest: AWSEncodableShape { /// A unique identifier for the new version of the secret. If you use the Amazon Web Services CLI or one of the Amazon Web Services SDKs to call this operation, then you can leave this parameter empty. The CLI or SDK generates a random UUID for you and includes it as the value for this parameter in the request. If you generate a raw HTTP request to the Secrets Manager service endpoint, then you must generate a ClientRequestToken and include it in the request. This value helps ensure idempotency. Secrets Manager uses this value to prevent the accidental creation of duplicate versions if there are failures and retries during a rotation. We recommend that you generate a UUID-type value to ensure uniqueness of your versions within the specified secret. If the ClientRequestToken value isn't already associated with a version of the secret then a new version of the secret is created. If a version with this value already exists and that version's SecretString or SecretBinary values are the same as those in the request then the request is ignored. The operation is idempotent. If a version with this value already exists and the version of the SecretString and SecretBinary values are different from those in the request, then the request fails because you can't modify a secret version. You can only create new versions to store new secret values. This value becomes the VersionId of the new version. public let clientRequestToken: String? - /// The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter. You must include SecretBinary or SecretString, but not both. You can't access this value from the Secrets Manager console. + /// A unique identifier that indicates the source of the request. For cross-account rotation (when you rotate a secret in one account by using a Lambda rotation function in another account) and the Lambda rotation function assumes an IAM role to call Secrets Manager, Secrets Manager validates the identity with the rotation token. For more information, see How rotation works. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field. + public let rotationToken: String? + /// The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter. You must include SecretBinary or SecretString, but not both. You can't access this value from the Secrets Manager console. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field. public let secretBinary: AWSBase64Data? /// The ARN or name of the secret to add a new version to. For an ARN, we recommend that you specify a complete ARN rather than a partial ARN. See Finding a secret from a partial ARN. If the secret doesn't already exist, use CreateSecret instead. public let secretId: String - /// The text to encrypt and store in the new version of the secret. You must include SecretBinary or SecretString, but not both. We recommend you create the secret string as JSON key/value pairs, as shown in the example. + /// The text to encrypt and store in the new version of the secret. You must include SecretBinary or SecretString, but not both. We recommend you create the secret string as JSON key/value pairs, as shown in the example. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field. public let secretString: String? /// A list of staging labels to attach to this version of the secret. Secrets Manager uses staging labels to track versions of a secret through the rotation process. If you specify a staging label that's already associated with a different version of the same secret, then Secrets Manager removes the label from the other version and attaches it to this version. If you specify AWSCURRENT, and it is already attached to another version, then Secrets Manager also moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. public let versionStages: [String]? - public init(clientRequestToken: String? = PutSecretValueRequest.idempotencyToken(), secretBinary: AWSBase64Data? = nil, secretId: String, secretString: String? = nil, versionStages: [String]? = nil) { + public init(clientRequestToken: String? = PutSecretValueRequest.idempotencyToken(), rotationToken: String? = nil, secretBinary: AWSBase64Data? = nil, secretId: String, secretString: String? = nil, versionStages: [String]? = nil) { self.clientRequestToken = clientRequestToken + self.rotationToken = rotationToken self.secretBinary = secretBinary self.secretId = secretId self.secretString = secretString @@ -822,6 +825,9 @@ extension SecretsManager { public func validate(name: String) throws { try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, max: 64) try self.validate(self.clientRequestToken, name: "clientRequestToken", parent: name, min: 32) + try self.validate(self.rotationToken, name: "rotationToken", parent: name, max: 256) + try self.validate(self.rotationToken, name: "rotationToken", parent: name, min: 36) + try self.validate(self.rotationToken, name: "rotationToken", parent: name, pattern: "^[a-zA-Z0-9\\-]+$") try self.validate(self.secretBinary, name: "secretBinary", parent: name, max: 65536) try self.validate(self.secretBinary, name: "secretBinary", parent: name, min: 1) try self.validate(self.secretId, name: "secretId", parent: name, max: 2048) @@ -838,6 +844,7 @@ extension SecretsManager { private enum CodingKeys: String, CodingKey { case clientRequestToken = "ClientRequestToken" + case rotationToken = "RotationToken" case secretBinary = "SecretBinary" case secretId = "SecretId" case secretString = "SecretString" @@ -1396,11 +1403,11 @@ extension SecretsManager { public let description: String? /// The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more information about versions and staging labels, see Concepts: Version. A key alias is always prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About aliases. If you set this to an empty string, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result. You can only use the Amazon Web Services managed key aws/secretsmanager if you call this operation using credentials from the same Amazon Web Services account that owns the secret. If the secret is in a different account, then you must use a customer managed key and provide the ARN of that KMS key in this field. The user making the call must have permissions to both the secret and the KMS key in their respective accounts. public let kmsKeyId: String? - /// The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretBinary or SecretString must have a value, but not both. You can't access this parameter in the Secrets Manager console. + /// The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretBinary or SecretString must have a value, but not both. You can't access this parameter in the Secrets Manager console. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field. public let secretBinary: AWSBase64Data? /// The ARN or name of the secret. For an ARN, we recommend that you specify a complete ARN rather than a partial ARN. See Finding a secret from a partial ARN. public let secretId: String - /// The text data to encrypt and store in the new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. Either SecretBinary or SecretString must have a value, but not both. + /// The text data to encrypt and store in the new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. Either SecretBinary or SecretString must have a value, but not both. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field. public let secretString: String? public init(clientRequestToken: String? = UpdateSecretRequest.idempotencyToken(), description: String? = nil, kmsKeyId: String? = nil, secretBinary: AWSBase64Data? = nil, secretId: String, secretString: String? = nil) { @@ -1512,7 +1519,7 @@ extension SecretsManager { public struct ValidateResourcePolicyRequest: AWSEncodableShape { /// A JSON-formatted string that contains an Amazon Web Services resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For example policies, see Permissions policy examples. public let resourcePolicy: String - /// This field is reserved for internal use. + /// The ARN or name of the secret with the resource-based policy you want to validate. public let secretId: String? public init(resourcePolicy: String, secretId: String? = nil) { diff --git a/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift b/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift index 2c5c3ccf4d..d4b1e69fe3 100644 --- a/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift +++ b/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift @@ -252,6 +252,7 @@ extension SecurityHub { public enum TargetType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case account = "ACCOUNT" case organizationalUnit = "ORGANIZATIONAL_UNIT" + case root = "ROOT" public var description: String { return self.rawValue } } @@ -699,7 +700,7 @@ extension SecurityHub { public struct ActionLocalPortDetails: AWSEncodableShape & AWSDecodableShape { /// The number of the port. public let port: Int? - /// The port name of the local connection. + /// The port name of the local connection. Length Constraints: 128. public let portName: String? public init(port: Int? = nil, portName: String? = nil) { @@ -756,7 +757,7 @@ extension SecurityHub { public struct ActionRemotePortDetails: AWSEncodableShape & AWSDecodableShape { /// The number of the port. public let port: Int? - /// The port name of the remote connection. + /// The port name of the remote connection. Length Constraints: 128. public let portName: String? public init(port: Int? = nil, portName: String? = nil) { @@ -1727,7 +1728,7 @@ extension SecurityHub { public struct AwsApiCallAction: AWSEncodableShape & AWSDecodableShape { /// Identifies the resources that were affected by the API call. public let affectedResources: [String: String]? - /// The name of the API method that was issued. + /// The name of the API method that was issued. Length Constraints: 128. public let api: String? /// Indicates whether the API call originated from a remote IP address (remoteip) or from a DNS domain (domain). public let callerType: String? @@ -1743,7 +1744,7 @@ extension SecurityHub { public let lastSeen: String? /// Provided if CallerType is remoteIp. Provides information about the remote IP address that the API call originated from. public let remoteIpDetails: ActionRemoteIpDetails? - /// The name of the Amazon Web Services service that the API method belongs to. + /// The name of the Amazon Web Services service that the API method belongs to. Length Constraints: 128. public let serviceName: String? public init(affectedResources: [String: String]? = nil, api: String? = nil, callerType: String? = nil, domainDetails: AwsApiCallActionDomainDetails? = nil, firstSeen: String? = nil, lastSeen: String? = nil, remoteIpDetails: ActionRemoteIpDetails? = nil, serviceName: String? = nil) { @@ -1784,7 +1785,7 @@ extension SecurityHub { } public struct AwsApiCallActionDomainDetails: AWSEncodableShape & AWSDecodableShape { - /// The name of the DNS domain that issued the API call. + /// The name of the DNS domain that issued the API call. Length Constraints: 128. public let domain: String? public init(domain: String? = nil) { @@ -17496,11 +17497,11 @@ extension SecurityHub { public struct AwsSecurityFinding: AWSEncodableShape & AWSDecodableShape { /// Provides details about an action that affects or that was taken on a resource. public let action: Action? - /// The Amazon Web Services account ID that a finding is generated in. + /// The Amazon Web Services account ID that a finding is generated in. Length Constraints: 12. public let awsAccountId: String? - /// The name of the Amazon Web Services account from which a finding was generated. + /// The name of the Amazon Web Services account from which a finding was generated. Length Constraints: Minimum length of 1. Maximum length of 50. public let awsAccountName: String? - /// The name of the company for the product that generated the finding. Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration. When you use the Security Hub console or API to filter findings by company name, you use this attribute. + /// The name of the company for the product that generated the finding. Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration. When you use the Security Hub console or API to filter findings by company name, you use this attribute. Length Constraints: Minimum length of 1. Maximum length of 128. public let companyName: String? /// This data type is exclusive to findings that are generated as the result of a check run against a specific rule in a supported security standard, such as CIS Amazon Web Services Foundations. Contains security standard-related finding details. public let compliance: Compliance? @@ -17512,7 +17513,7 @@ extension SecurityHub { public let createdAt: String? /// The level of importance assigned to the resources associated with the finding. A score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources. public let criticality: Int? - /// A finding's description. In this release, Description is a required property. + /// A finding's description. Description is a required property. Length Constraints: Minimum length of 1. Maximum length of 1024. public let description: String? /// In a BatchImportFindings request, finding providers use FindingProviderFields to provide and update their own values for confidence, criticality, related findings, severity, and types. public let findingProviderFields: FindingProviderFields? @@ -17525,15 +17526,15 @@ extension SecurityHub { /// vulnerabilities in Lambda function code based on internal detectors developed /// in collaboration with Amazon CodeGuru. Security Hub receives those findings. public let generatorDetails: GeneratorDetails? - /// The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc. + /// The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, or something else. Length Constraints: Minimum length of 1. Maximum length of 512. public let generatorId: String? - /// The security findings provider-specific identifier for a finding. + /// The security findings provider-specific identifier for a finding. Length Constraints: Minimum length of 1. Maximum length of 512. public let id: String? /// Indicates when the security findings provider most recently observed the potential security issue that a finding captured. This field accepts only the specified formats. Timestamps /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) public let lastObservedAt: String? - /// A list of malware related to a finding. + /// A list of malware related to a finding. Array Members: Maximum number of 5 items. public let malware: [Malware]? /// The details of network-related information about a finding. public let network: Network? @@ -17545,47 +17546,47 @@ extension SecurityHub { public let patchSummary: PatchSummary? /// The details of process-related information about a finding. public let process: ProcessDetails? - /// A imestamp that indicates when Security Hub received a finding and begins to process it. This field accepts only the specified formats. Timestamps + /// A timestamp that indicates when Security Hub received a finding and begins to process it. This field accepts only the specified formats. Timestamps /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) public let processedAt: String? - /// The ARN generated by Security Hub that uniquely identifies a product that generates findings. This can be the ARN for a third-party product that is integrated with Security Hub, or the ARN for a custom integration. + /// The ARN generated by Security Hub that uniquely identifies a product that generates findings. This can be the ARN for a third-party product that is integrated with Security Hub, or the ARN for a custom integration. Length Constraints: Minimum length of 12. Maximum length of 2048. public let productArn: String? /// A data type where security findings providers can include additional solution-specific details that aren't part of the defined AwsSecurityFinding format. Can contain up to 50 key-value pairs. For each key-value pair, the key can contain up to 128 characters, and the value can contain up to 2048 characters. public let productFields: [String: String]? - /// The name of the product that generated the finding. Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration. When you use the Security Hub console or API to filter findings by product name, you use this attribute. + /// The name of the product that generated the finding. Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration. When you use the Security Hub console or API to filter findings by product name, you use this attribute. Length Constraints: Minimum length of 1. Maximum length of 128. public let productName: String? /// The record state of a finding. public let recordState: RecordState? - /// The Region from which the finding was generated. Security Hub populates this attribute automatically for each finding. You cannot update it using BatchImportFindings or BatchUpdateFindings. + /// The Region from which the finding was generated. Security Hub populates this attribute automatically for each finding. You cannot update it using BatchImportFindings or BatchUpdateFindings. Length Constraints: Minimum length of 1. Maximum length of 16. public let region: String? - /// A list of related findings. + /// A list of related findings. Array Members: Minimum number of 1 item. Maximum number of 10 items. public let relatedFindings: [RelatedFinding]? /// A data type that describes the remediation options for a finding. public let remediation: Remediation? - /// A set of resource data types that describe the resources that the finding refers to. + /// A set of resource data types that describe the resources that the finding refers to. Array Members: Minimum number of 1 item. Maximum number of 32 items. public let resources: [Resource]? /// Indicates whether the finding is a sample finding. public let sample: Bool? - /// The schema version that a finding is formatted for. + /// The schema version that a finding is formatted for. The value is 2018-10-08. public let schemaVersion: String? /// A finding's severity. public let severity: Severity? /// A URL that links to a page about the current finding in the security findings provider's solution. public let sourceUrl: String? - /// Threat intelligence details related to a finding. + /// Threat intelligence details related to a finding. Array Members: Minimum number of 1 item. Maximum number of 5 items. public let threatIntelIndicators: [ThreatIntelIndicator]? - /// Details about the threat detected in a security finding and the file paths that were affected by the threat. + /// Details about the threat detected in a security finding and the file paths that were affected by the threat. Array Members: Minimum number of 1 item. Maximum number of 32 items. public let threats: [Threat]? - /// A finding's title. In this release, Title is a required property. + /// A finding's title. Title is a required property. Length Constraints: Minimum length of 1. Maximum length of 256. public let title: String? - /// One or more finding types in the format of namespace/category/classifier that classify a finding. Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications + /// One or more finding types in the format of namespace/category/classifier that classify a finding. Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications Array Members: Maximum number of 50 items. public let types: [String]? /// Indicates when the security findings provider last updated the finding record. This field accepts only the specified formats. Timestamps /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) public let updatedAt: String? - /// A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding. + /// A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding. Can contain up to 50 key-value pairs. For each key-value pair, the key can contain up to 128 characters, and the value can contain up to 1024 characters. public let userDefinedFields: [String: String]? /// Indicates the veracity of a finding. public let verificationState: VerificationState? @@ -20706,13 +20707,13 @@ extension SecurityHub { public struct Compliance: AWSEncodableShape & AWSDecodableShape { /// The enabled security standards in which a security control is currently enabled. public let associatedStandards: [AssociatedStandard]? - /// For a control, the industry or regulatory framework requirements that are related to the control. The check for that control is aligned with these requirements. + /// For a control, the industry or regulatory framework requirements that are related to the control. The check for that control is aligned with these requirements. Array Members: Maximum number of 32 items. public let relatedRequirements: [String]? /// The unique identifier of a control across standards. Values for this field typically consist of an Amazon Web Service and a number, such as APIGateway.5. public let securityControlId: String? /// An object that includes security control parameter names and values. public let securityControlParameters: [SecurityControlParameter]? - /// The result of a standards check. The valid values for Status are as follows. PASSED - Standards check passed for all evaluated resources. WARNING - Some information is missing or this check is not supported for your configuration. FAILED - Standards check failed for at least one evaluated resource. NOT_AVAILABLE - Check could not be performed due to a service outage, API error, or because the result of the Config evaluation was NOT_APPLICABLE. If the Config evaluation result was NOT_APPLICABLE, then after 3 days, Security Hub automatically archives the finding. + /// The result of a standards check. The valid values for Status are as follows. PASSED - Standards check passed for all evaluated resources. WARNING - Some information is missing or this check is not supported for your configuration. FAILED - Standards check failed for at least one evaluated resource. NOT_AVAILABLE - Check could not be performed due to a service outage, API error, or because the result of the Config evaluation was NOT_APPLICABLE. If the Config evaluation result was NOT_APPLICABLE for a Security Hub control, Security Hub automatically archives the finding after 3 days. public let status: ComplianceStatus? /// For findings generated from controls, a list of reasons behind the value of Status. For the list of status reason codes and their meanings, see Standards-related information in the ASFF in the Security Hub User Guide. public let statusReasons: [StatusReason]? @@ -21960,9 +21961,9 @@ extension SecurityHub { public struct DnsRequestAction: AWSEncodableShape & AWSDecodableShape { /// Indicates whether the DNS request was blocked. public let blocked: Bool? - /// The DNS domain that is associated with the DNS request. + /// The DNS domain that is associated with the DNS request. Length Constraints: 128. public let domain: String? - /// The protocol that was used for the DNS request. + /// The protocol that was used for the DNS request. Length Constraints: Minimum length of 1. Maximum length of 64. public let `protocol`: String? public init(blocked: Bool? = nil, domain: String? = nil, protocol: String? = nil) { @@ -22131,16 +22132,16 @@ extension SecurityHub { public struct FilePaths: AWSEncodableShape & AWSDecodableShape { /// The name of the infected or suspicious file corresponding to the hash. - /// + /// Length Constraints: Minimum of 1 length. Maximum of 128 length. public let fileName: String? /// Path to the infected or suspicious file on the resource it was detected on. - /// + /// Length Constraints: Minimum of 1 length. Maximum of 128 length. public let filePath: String? /// The hash value for the infected or suspicious file. - /// + /// Length Constraints: Minimum of 1 length. Maximum of 128 length. public let hash: String? /// The Amazon Resource Name (ARN) of the resource on which the threat was detected. - /// + /// Length Constraints: Minimum of 1 length. Maximum of 128 length. public let resourceId: String? public init(fileName: String? = nil, filePath: String? = nil, hash: String? = nil, resourceId: String? = nil) { @@ -22297,7 +22298,7 @@ extension SecurityHub { public struct FindingProviderSeverity: AWSEncodableShape & AWSDecodableShape { /// The severity label assigned to the finding by the finding provider. public let label: SeverityLabel? - /// The finding provider's original value for the severity. + /// The finding provider's original value for the severity. Length Constraints: Minimum length of 1. Maximum length of 64. public let original: String? public init(label: SeverityLabel? = nil, original: String? = nil) { @@ -22425,7 +22426,7 @@ extension SecurityHub { public struct GeneratorDetails: AWSEncodableShape & AWSDecodableShape { /// The description of the detector used to identify the code vulnerability. public let description: String? - /// An array of tags used to identify the detector associated with the finding. + /// An array of tags used to identify the detector associated with the finding. Array Members: Minimum number of 0 items. Maximum number of 10 items. public let labels: [String]? /// The name of the detector used to identify the code vulnerability. public let name: String? @@ -23783,9 +23784,9 @@ extension SecurityHub { } public struct Malware: AWSEncodableShape & AWSDecodableShape { - /// The name of the malware that was observed. + /// The name of the malware that was observed. Length Constraints: Minimum of 1. Maximum of 64. public let name: String? - /// The file system path of the malware that was observed. + /// The file system path of the malware that was observed. Length Constraints: Minimum of 1. Maximum of 512. public let path: String? /// The state of the malware that was observed. public let state: MalwareState? @@ -23889,7 +23890,7 @@ extension SecurityHub { } public struct Network: AWSEncodableShape & AWSDecodableShape { - /// The destination domain of network-related information about a finding. + /// The destination domain of network-related information about a finding. Length Constraints: Minimum of 1. Maximum of 128. public let destinationDomain: String? /// The destination IPv4 address of network-related information about a finding. public let destinationIpV4: String? @@ -23901,9 +23902,9 @@ extension SecurityHub { public let direction: NetworkDirection? /// The range of open ports that is present on the network. public let openPortRange: PortRange? - /// The protocol of network-related information about a finding. + /// The protocol of network-related information about a finding. Length Constraints: Minimum of 1. Maximum of 16. public let `protocol`: String? - /// The source domain of network-related information about a finding. + /// The source domain of network-related information about a finding. Length Constraints: Minimum of 1. Maximum of 128. public let sourceDomain: String? /// The source IPv4 address of network-related information about a finding. public let sourceIpV4: String? @@ -23963,7 +23964,7 @@ extension SecurityHub { public let connectionDirection: String? /// Information about the port on the EC2 instance. public let localPortDetails: ActionLocalPortDetails? - /// The protocol used to make the network connection request. + /// The protocol used to make the network connection request. Length Constraints: Minimum length of 1. Maximum length of 64. public let `protocol`: String? /// Information about the remote IP address that issued the network connection request. public let remoteIpDetails: ActionRemoteIpDetails? @@ -24000,7 +24001,7 @@ extension SecurityHub { public struct NetworkHeader: AWSEncodableShape & AWSDecodableShape { /// Information about the destination of the component. public let destination: NetworkPathComponentDetails? - /// The protocol used for the component. + /// The protocol used for the component. Length Constraints: Minimum of 1. Maximum of 16. public let `protocol`: String? /// Information about the origin of the component. public let source: NetworkPathComponentDetails? @@ -24025,9 +24026,9 @@ extension SecurityHub { } public struct NetworkPathComponent: AWSEncodableShape & AWSDecodableShape { - /// The identifier of a component in the network path. + /// The identifier of a component in the network path. Length Constraints: Minimum of 1. Maximum of 32. public let componentId: String? - /// The type of component. + /// The type of component. Length Constraints: Minimum of 1. Maximum of 32. public let componentType: String? /// Information about the component that comes after the current component in the network path. public let egress: NetworkHeader? @@ -24080,7 +24081,7 @@ extension SecurityHub { } public struct Note: AWSEncodableShape & AWSDecodableShape { - /// The text of a note. + /// The text of a note. Length Constraints: Minimum of 1. Maximum of 512. public let text: String? /// A timestamp that indicates when the note was updated. This field accepts only the specified formats. Timestamps /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited @@ -24282,21 +24283,21 @@ extension SecurityHub { } public struct PatchSummary: AWSEncodableShape & AWSDecodableShape { - /// The number of patches from the compliance standard that failed to install. + /// The number of patches from the compliance standard that failed to install. The value can be an integer from 0 to 100000. public let failedCount: Int? - /// The identifier of the compliance standard that was used to determine the patch compliance status. + /// The identifier of the compliance standard that was used to determine the patch compliance status. Length Constraints: Minimum length of 1. Maximum length of 256. public let id: String? - /// The number of patches from the compliance standard that were installed successfully. + /// The number of patches from the compliance standard that were installed successfully. The value can be an integer from 0 to 100000. public let installedCount: Int? - /// The number of installed patches that are not part of the compliance standard. + /// The number of installed patches that are not part of the compliance standard. The value can be an integer from 0 to 100000. public let installedOtherCount: Int? - /// The number of patches that were applied, but that require the instance to be rebooted in order to be marked as installed. + /// The number of patches that were applied, but that require the instance to be rebooted in order to be marked as installed. The value can be an integer from 0 to 100000. public let installedPendingReboot: Int? - /// The number of patches that are installed but are also on a list of patches that the customer rejected. + /// The number of patches that are installed but are also on a list of patches that the customer rejected. The value can be an integer from 0 to 100000. public let installedRejectedCount: Int? - /// The number of patches that are part of the compliance standard but are not installed. The count includes patches that failed to install. + /// The number of patches that are part of the compliance standard but are not installed. The count includes patches that failed to install. The value can be an integer from 0 to 100000. public let missingCount: Int? - /// The type of patch operation performed. For Patch Manager, the values are SCAN and INSTALL. + /// The type of patch operation performed. For Patch Manager, the values are SCAN and INSTALL. Length Constraints: Minimum length of 1. Maximum length of 256. public let operation: String? /// Indicates when the operation completed. This field accepts only the specified formats. Timestamps /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited @@ -24306,7 +24307,7 @@ extension SecurityHub { /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) public let operationStartTime: String? - /// The reboot option specified for the instance. + /// The reboot option specified for the instance. Length Constraints: Minimum length of 1. Maximum length of 256. public let rebootOption: String? public init(failedCount: Int? = nil, id: String? = nil, installedCount: Int? = nil, installedOtherCount: Int? = nil, installedPendingReboot: Int? = nil, installedRejectedCount: Int? = nil, missingCount: Int? = nil, operation: String? = nil, operationEndTime: String? = nil, operationStartTime: String? = nil, rebootOption: String? = nil) { @@ -24435,11 +24436,11 @@ extension SecurityHub { /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) public let launchedAt: String? - /// The name of the process. + /// The name of the process. Length Constraints: Minimum of 1. Maximum of 64. public let name: String? /// The parent process ID. This field accepts positive integers between O and 2147483647. public let parentPid: Int? - /// The path to the process executable. + /// The path to the process executable. Length Constraints: Minimum of 1. Maximum of 512. public let path: String? /// The process ID. public let pid: Int? @@ -24558,7 +24559,7 @@ extension SecurityHub { } public struct Recommendation: AWSEncodableShape & AWSDecodableShape { - /// Describes the recommended steps to take to remediate an issue identified in a finding. + /// Describes the recommended steps to take to remediate an issue identified in a finding. Length Constraints: Minimum of 1 length. Maximum of 512 length. public let text: String? /// A URL to a page or site that contains information about how to remediate a finding. public let url: String? @@ -24652,13 +24653,13 @@ extension SecurityHub { public let id: String? /// The canonical Amazon Web Services partition name that the Region is assigned to. public let partition: Partition? - /// The canonical Amazon Web Services external Region name where this resource is located. + /// The canonical Amazon Web Services external Region name where this resource is located. Length Constraints: Minimum length of 1. Maximum length of 16. public let region: String? /// Identifies the role of the resource in the finding. A resource is either the actor or target of the finding activity, public let resourceRole: String? - /// A list of Amazon Web Services tags associated with a resource at the time the finding was processed. + /// A list of Amazon Web Services tags associated with a resource at the time the finding was processed. Tags must follow Amazon Web Services tag naming limits and requirements. public let tags: [String: String]? - /// The type of the resource that details are provided for. If possible, set Type to one of the supported resource types. For example, if the resource is an EC2 instance, then set Type to AwsEc2Instance. If the resource does not match any of the provided types, then set Type to Other. + /// The type of the resource that details are provided for. If possible, set Type to one of the supported resource types. For example, if the resource is an EC2 instance, then set Type to AwsEc2Instance. If the resource does not match any of the provided types, then set Type to Other. Length Constraints: Minimum length of 1. Maximum length of 256. public let type: String? public init(applicationArn: String? = nil, applicationName: String? = nil, dataClassification: DataClassificationDetails? = nil, details: ResourceDetails? = nil, id: String? = nil, partition: Partition? = nil, region: String? = nil, resourceRole: String? = nil, tags: [String: String]? = nil, type: String? = nil) { @@ -26058,9 +26059,9 @@ extension SecurityHub { public struct Severity: AWSEncodableShape & AWSDecodableShape { /// The severity value of the finding. The allowed values are the following. INFORMATIONAL - No issue was found. LOW - The issue does not require action on its own. MEDIUM - The issue must be addressed but not urgently. HIGH - The issue must be addressed as a priority. CRITICAL - The issue must be remediated immediately to avoid it escalating. If you provide Normalized and do not provide Label, then Label is set automatically as follows. 0 - INFORMATIONAL 1–39 - LOW 40–69 - MEDIUM 70–89 - HIGH 90–100 - CRITICAL public let label: SeverityLabel? - /// Deprecated. The normalized severity of a finding. Instead of providing Normalized, provide Label. If you provide Label and do not provide Normalized, then Normalized is set automatically as follows. INFORMATIONAL - 0 LOW - 1 MEDIUM - 40 HIGH - 70 CRITICAL - 90 + /// Deprecated. The normalized severity of a finding. Instead of providing Normalized, provide Label. The value of Normalized can be an integer between 0 and 100. If you provide Label and do not provide Normalized, then Normalized is set automatically as follows. INFORMATIONAL - 0 LOW - 1 MEDIUM - 40 HIGH - 70 CRITICAL - 90 public let normalized: Int? - /// The native severity from the finding product that generated the finding. + /// The native severity from the finding product that generated the finding. Length Constraints: Minimum length of 1. Maximum length of 64. public let original: String? /// Deprecated. This attribute isn't included in findings. Instead of providing Product, provide Original. The native severity as defined by the Amazon Web Services service or integrated partner product that generated the finding. public let product: Double? @@ -26779,16 +26780,16 @@ extension SecurityHub { public struct Threat: AWSEncodableShape & AWSDecodableShape { /// Provides information about the file paths that were affected by the threat. - /// + /// Array Members: Minimum number of 1 item. Maximum number of 5 items. public let filePaths: [FilePaths]? /// This total number of items in which the threat has been detected. /// public let itemCount: Int? /// The name of the threat. - /// + /// Length Constraints: Minimum of 1 length. Maximum of 128 length. public let name: String? /// The severity of the threat. - /// + /// Length Constraints: Minimum of 1 length. Maximum of 128 length. public let severity: String? public init(filePaths: [FilePaths]? = nil, itemCount: Int? = nil, name: String? = nil, severity: String? = nil) { @@ -26821,13 +26822,13 @@ extension SecurityHub { /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited /// to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples: YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z) YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z) YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59) YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759) YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59) public let lastObservedAt: String? - /// The source of the threat intelligence indicator. + /// The source of the threat intelligence indicator. Length Constraints: Minimum of 1 length. Maximum of 64 length. public let source: String? /// The URL to the page or site where you can get more information about the threat intelligence indicator. public let sourceUrl: String? /// The type of threat intelligence indicator. public let type: ThreatIntelIndicatorType? - /// The value of a threat intelligence indicator. + /// The value of a threat intelligence indicator. Length Constraints: Minimum of 1 length. Maximum of 512 length. public let value: String? public init(category: ThreatIntelIndicatorCategory? = nil, lastObservedAt: String? = nil, source: String? = nil, sourceUrl: String? = nil, type: ThreatIntelIndicatorType? = nil, value: String? = nil) { diff --git a/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift b/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift index 5f6629321e..fa288a62c6 100644 --- a/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift +++ b/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift @@ -76,6 +76,8 @@ public struct SecurityLake: AWSService { [.fips]: .init(endpoints: [ "us-east-1": "securitylake-fips.us-east-1.amazonaws.com", "us-east-2": "securitylake-fips.us-east-2.amazonaws.com", + "us-gov-east-1": "securitylake.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "securitylake.us-gov-west-1.amazonaws.com", "us-west-1": "securitylake-fips.us-west-1.amazonaws.com", "us-west-2": "securitylake-fips.us-west-2.amazonaws.com" ]) diff --git a/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift b/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift index 987d2938c2..8032b18cd1 100644 --- a/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift +++ b/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift @@ -204,7 +204,7 @@ extension SecurityLake { try validate($0, name: "accounts[]", parent: name, pattern: "^[0-9]{12}$") } try self.regions.forEach { - try validate($0, name: "regions[]", parent: name, pattern: "^(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") + try validate($0, name: "regions[]", parent: name, pattern: "^(us(-gov)?|af|ap|ca|eu|me|sa)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") } try self.validate(self.sourceVersion, name: "sourceVersion", parent: name, pattern: "^(latest|[0-9]\\.[0-9])$") } @@ -296,7 +296,7 @@ extension SecurityLake { } try self.validate(self.sourceName, name: "sourceName", parent: name, max: 64) try self.validate(self.sourceName, name: "sourceName", parent: name, min: 1) - try self.validate(self.sourceName, name: "sourceName", parent: name, pattern: "^[\\\\\\w\\-_:/.]*$") + try self.validate(self.sourceName, name: "sourceName", parent: name, pattern: "^[\\w\\-\\_\\:\\.]*$") try self.validate(self.sourceVersion, name: "sourceVersion", parent: name, max: 32) try self.validate(self.sourceVersion, name: "sourceVersion", parent: name, min: 1) try self.validate(self.sourceVersion, name: "sourceVersion", parent: name, pattern: "^[A-Za-z0-9\\-\\.\\_]*$") @@ -471,7 +471,7 @@ extension SecurityLake { public let sources: [LogSourceResource] /// The description for your subscriber account in Security Lake. public let subscriberDescription: String? - /// The AWS identity used to access your data. + /// The Amazon Web Services identity used to access your data. public let subscriberIdentity: AwsIdentity /// The name of your Security Lake subscriber account. public let subscriberName: String @@ -539,13 +539,13 @@ extension SecurityLake { public func validate(name: String) throws { try self.validate(self.crawlerArn, name: "crawlerArn", parent: name, max: 1011) try self.validate(self.crawlerArn, name: "crawlerArn", parent: name, min: 1) - try self.validate(self.crawlerArn, name: "crawlerArn", parent: name, pattern: "^arn:aws:securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.crawlerArn, name: "crawlerArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") try self.validate(self.databaseArn, name: "databaseArn", parent: name, max: 1011) try self.validate(self.databaseArn, name: "databaseArn", parent: name, min: 1) - try self.validate(self.databaseArn, name: "databaseArn", parent: name, pattern: "^arn:aws:securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.databaseArn, name: "databaseArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") try self.validate(self.tableArn, name: "tableArn", parent: name, max: 1011) try self.validate(self.tableArn, name: "tableArn", parent: name, min: 1) - try self.validate(self.tableArn, name: "tableArn", parent: name, pattern: "^arn:aws:securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.tableArn, name: "tableArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") } private enum CodingKeys: String, CodingKey { @@ -639,7 +639,7 @@ extension SecurityLake { try self.provider?.validate(name: "\(name).provider") try self.validate(self.sourceName, name: "sourceName", parent: name, max: 64) try self.validate(self.sourceName, name: "sourceName", parent: name, min: 1) - try self.validate(self.sourceName, name: "sourceName", parent: name, pattern: "^[\\\\\\w\\-_:/.]*$") + try self.validate(self.sourceName, name: "sourceName", parent: name, pattern: "^[\\w\\-\\_\\:\\.]*$") try self.validate(self.sourceVersion, name: "sourceVersion", parent: name, max: 32) try self.validate(self.sourceVersion, name: "sourceVersion", parent: name, min: 1) try self.validate(self.sourceVersion, name: "sourceVersion", parent: name, pattern: "^[A-Za-z0-9\\-\\.\\_]*$") @@ -665,7 +665,7 @@ extension SecurityLake { } public func validate(name: String) throws { - try self.validate(self.region, name: "region", parent: name, pattern: "^(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") + try self.validate(self.region, name: "region", parent: name, pattern: "^(us(-gov)?|af|ap|ca|eu|me|sa)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") try self.sources.forEach { try $0.validate(name: "\(name).sources[]") } @@ -696,7 +696,7 @@ extension SecurityLake { } public func validate(name: String) throws { - try self.validate(self.region, name: "region", parent: name, pattern: "^(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") + try self.validate(self.region, name: "region", parent: name, pattern: "^(us(-gov)?|af|ap|ca|eu|me|sa)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") try self.replicationConfiguration?.validate(name: "\(name).replicationConfiguration") } @@ -806,7 +806,7 @@ extension SecurityLake { public func validate(name: String) throws { try self.regions?.forEach { - try validate($0, name: "regions[]", parent: name, pattern: "^(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") + try validate($0, name: "regions[]", parent: name, pattern: "^(us(-gov)?|af|ap|ca|eu|me|sa)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") } try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") } @@ -993,7 +993,7 @@ extension SecurityLake { public func validate(name: String) throws { try self.validate(self.sourceName, name: "sourceName", parent: name, max: 64) try self.validate(self.sourceName, name: "sourceName", parent: name, min: 1) - try self.validate(self.sourceName, name: "sourceName", parent: name, pattern: "^[\\\\\\w\\-_:/.]*$") + try self.validate(self.sourceName, name: "sourceName", parent: name, pattern: "^[\\w\\-\\_\\:\\.]*$") try self.validate(self.sourceVersion, name: "sourceVersion", parent: name, max: 32) try self.validate(self.sourceVersion, name: "sourceVersion", parent: name, min: 1) try self.validate(self.sourceVersion, name: "sourceVersion", parent: name, pattern: "^[A-Za-z0-9\\-\\.\\_]*$") @@ -1048,7 +1048,7 @@ extension SecurityLake { public func validate(name: String) throws { try self.regions.forEach { - try validate($0, name: "regions[]", parent: name, pattern: "^(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") + try validate($0, name: "regions[]", parent: name, pattern: "^(us(-gov)?|af|ap|ca|eu|me|sa)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") } } @@ -1300,7 +1300,7 @@ extension SecurityLake { try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) try self.regions?.forEach { - try validate($0, name: "regions[]", parent: name, pattern: "^(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") + try validate($0, name: "regions[]", parent: name, pattern: "^(us(-gov)?|af|ap|ca|eu|me|sa)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") } } @@ -1344,7 +1344,7 @@ extension SecurityLake { public func validate(name: String) throws { try self.regions?.forEach { - try validate($0, name: "regions[]", parent: name, pattern: "^(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") + try validate($0, name: "regions[]", parent: name, pattern: "^(us(-gov)?|af|ap|ca|eu|me|sa)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") } } @@ -1394,7 +1394,7 @@ extension SecurityLake { try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) try self.regions?.forEach { - try validate($0, name: "regions[]", parent: name, pattern: "^(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") + try validate($0, name: "regions[]", parent: name, pattern: "^(us(-gov)?|af|ap|ca|eu|me|sa)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$") } try self.sources?.forEach { try $0.validate(name: "\(name).sources[]") @@ -1488,7 +1488,7 @@ extension SecurityLake { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") } private enum CodingKeys: CodingKey {} @@ -1558,7 +1558,7 @@ extension SecurityLake { public let accessTypes: [AccessType]? /// The date and time when the subscriber was created. public let createdAt: Date? - /// The Amazon Resource Name (ARN) which uniquely defines the AWS RAM resource share. Before accepting the RAM resource share invitation, you can view details related to the RAM resource share. This field is available only for Lake Formation subscribers created after March 8, 2023. + /// The Amazon Resource Name (ARN) which uniquely defines the Amazon Web Services RAM resource share. Before accepting the RAM resource share invitation, you can view details related to the RAM resource share. This field is available only for Lake Formation subscribers created after March 8, 2023. public let resourceShareArn: String? /// The name of the resource share. public let resourceShareName: String? @@ -1576,7 +1576,7 @@ extension SecurityLake { public let subscriberEndpoint: String? /// The subscriber ID of the Amazon Security Lake subscriber account. public let subscriberId: String - /// The AWS identity used to access your data. + /// The Amazon Web Services identity used to access your data. public let subscriberIdentity: AwsIdentity /// The name of your Amazon Security Lake subscriber account. public let subscriberName: String @@ -1666,7 +1666,7 @@ extension SecurityLake { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") try self.tags.forEach { try $0.validate(name: "\(name).tags[]") } @@ -1703,7 +1703,7 @@ extension SecurityLake { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") try self.tagKeys.forEach { try validate($0, name: "tagKeys[]", parent: name, max: 128) try validate($0, name: "tagKeys[]", parent: name, min: 1) diff --git a/Sources/Soto/Services/SsmSap/SsmSap_api.swift b/Sources/Soto/Services/SsmSap/SsmSap_api.swift index a7138164a1..dd8e2b257c 100644 --- a/Sources/Soto/Services/SsmSap/SsmSap_api.swift +++ b/Sources/Soto/Services/SsmSap/SsmSap_api.swift @@ -214,6 +214,19 @@ public struct SsmSap: AWSService { ) } + /// Returns a list of operations events. Available parameters include OperationID, as well as optional parameters MaxResults, NextToken, and Filters. + @Sendable + public func listOperationEvents(_ input: ListOperationEventsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListOperationEventsOutput { + return try await self.client.execute( + operation: "ListOperationEvents", + path: "/list-operation-events", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists the operations performed by AWS Systems Manager for SAP. @Sendable public func listOperations(_ input: ListOperationsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListOperationsOutput { @@ -266,6 +279,19 @@ public struct SsmSap: AWSService { ) } + /// Request is an operation which starts an application. Parameter ApplicationId is required. + @Sendable + public func startApplication(_ input: StartApplicationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartApplicationOutput { + return try await self.client.execute( + operation: "StartApplication", + path: "/start-application", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Refreshes a registered application. @Sendable public func startApplicationRefresh(_ input: StartApplicationRefreshInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartApplicationRefreshOutput { @@ -279,6 +305,19 @@ public struct SsmSap: AWSService { ) } + /// Request is an operation to stop an application. Parameter ApplicationId is required. Parameters StopConnectedEntity and IncludeEc2InstanceShutdown are optional. + @Sendable + public func stopApplication(_ input: StopApplicationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StopApplicationOutput { + return try await self.client.execute( + operation: "StopApplication", + path: "/stop-application", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates tag for a resource by specifying the ARN. @Sendable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { @@ -389,6 +428,25 @@ extension SsmSap { ) } + /// Returns a list of operations events. Available parameters include OperationID, as well as optional parameters MaxResults, NextToken, and Filters. + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listOperationEventsPaginator( + _ input: ListOperationEventsInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listOperationEvents, + inputKey: \ListOperationEventsInput.nextToken, + outputKey: \ListOperationEventsOutput.nextToken, + logger: logger + ) + } + /// Lists the operations performed by AWS Systems Manager for SAP. /// Return PaginatorSequence for operation. /// @@ -440,6 +498,17 @@ extension SsmSap.ListDatabasesInput: AWSPaginateToken { } } +extension SsmSap.ListOperationEventsInput: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> SsmSap.ListOperationEventsInput { + return .init( + filters: self.filters, + maxResults: self.maxResults, + nextToken: token, + operationId: self.operationId + ) + } +} + extension SsmSap.ListOperationsInput: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> SsmSap.ListOperationsInput { return .init( diff --git a/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift b/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift index 3a190e319c..edcd5469b2 100644 --- a/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift +++ b/Sources/Soto/Services/SsmSap/SsmSap_shapes.swift @@ -98,6 +98,11 @@ extension SsmSap { public var description: String { return self.rawValue } } + public enum ConnectedEntityType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case dbms = "DBMS" + public var description: String { return self.rawValue } + } + public enum CredentialType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case admin = "ADMIN" public var description: String { return self.rawValue } @@ -140,6 +145,13 @@ extension SsmSap { public var description: String { return self.rawValue } } + public enum OperationEventStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case completed = "COMPLETED" + case failed = "FAILED" + case inProgress = "IN_PROGRESS" + public var description: String { return self.rawValue } + } + public enum OperationMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case deltaDatashipping = "DELTA_DATASHIPPING" case logreplay = "LOGREPLAY" @@ -1044,6 +1056,60 @@ extension SsmSap { } } + public struct ListOperationEventsInput: AWSEncodableShape { + /// Optionally specify filters to narrow the returned operation event items. Valid filter names include status, resourceID, and resourceType. The valid operator for all three filters is Equals. + public let filters: [Filter]? + /// The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value. If you do not specify a value for MaxResults, the request returns 50 items per page by default. + public let maxResults: Int? + /// The token to use to retrieve the next page of results. This value is null when there are no more results to return. + public let nextToken: String? + /// The ID of the operation. + public let operationId: String + + public init(filters: [Filter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, operationId: String) { + self.filters = filters + self.maxResults = maxResults + self.nextToken = nextToken + self.operationId = operationId + } + + public func validate(name: String) throws { + try self.filters?.forEach { + try $0.validate(name: "\(name).filters[]") + } + try self.validate(self.filters, name: "filters", parent: name, max: 10) + try self.validate(self.filters, name: "filters", parent: name, min: 1) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^.{16,1024}$") + try self.validate(self.operationId, name: "operationId", parent: name, pattern: "^[{]?[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}[}]?$") + } + + private enum CodingKeys: String, CodingKey { + case filters = "Filters" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case operationId = "OperationId" + } + } + + public struct ListOperationEventsOutput: AWSDecodableShape { + /// The token to use to retrieve the next page of results. This value is null when there are no more results to return. + public let nextToken: String? + /// A returned list of operation events that meet the filter criteria. + public let operationEvents: [OperationEvent]? + + public init(nextToken: String? = nil, operationEvents: [OperationEvent]? = nil) { + self.nextToken = nextToken + self.operationEvents = operationEvents + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case operationEvents = "OperationEvents" + } + } + public struct ListOperationsInput: AWSEncodableShape { /// The ID of the application. public let applicationId: String @@ -1184,6 +1250,35 @@ extension SsmSap { } } + public struct OperationEvent: AWSDecodableShape { + /// A description of the operation event. For example, "Stop the EC2 instance i-abcdefgh987654321". + public let description: String? + /// The resource involved in the operations event. Contains ResourceArn ARN and ResourceType. + public let resource: Resource? + /// The status of the operation event. The possible statuses are: IN_PROGRESS, COMPLETED, and FAILED. + public let status: OperationEventStatus? + /// The status message relating to a specific operation event. + public let statusMessage: String? + /// The timestamp of the specified operation event. + public let timestamp: Date? + + public init(description: String? = nil, resource: Resource? = nil, status: OperationEventStatus? = nil, statusMessage: String? = nil, timestamp: Date? = nil) { + self.description = description + self.resource = resource + self.status = status + self.statusMessage = statusMessage + self.timestamp = timestamp + } + + private enum CodingKeys: String, CodingKey { + case description = "Description" + case resource = "Resource" + case status = "Status" + case statusMessage = "StatusMessage" + case timestamp = "Timestamp" + } + } + public struct PutResourcePermissionInput: AWSEncodableShape { public let actionType: PermissionActionType public let resourceArn: String @@ -1327,6 +1422,53 @@ extension SsmSap { } } + public struct Resource: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the source resource. Example of ResourceArn: "arn:aws:ec2:us-east-1:111111111111:instance/i-abcdefgh987654321" + public let resourceArn: String? + /// The resource type. Example of ResourceType: "AWS::SystemsManagerSAP::Component" or "AWS::EC2::Instance". + public let resourceType: String? + + public init(resourceArn: String? = nil, resourceType: String? = nil) { + self.resourceArn = resourceArn + self.resourceType = resourceType + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + case resourceType = "ResourceType" + } + } + + public struct StartApplicationInput: AWSEncodableShape { + /// The ID of the application. + public let applicationId: String + + public init(applicationId: String) { + self.applicationId = applicationId + } + + public func validate(name: String) throws { + try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^[\\w\\d]{1,50}$") + } + + private enum CodingKeys: String, CodingKey { + case applicationId = "ApplicationId" + } + } + + public struct StartApplicationOutput: AWSDecodableShape { + /// The ID of the operation. + public let operationId: String? + + public init(operationId: String? = nil) { + self.operationId = operationId + } + + private enum CodingKeys: String, CodingKey { + case operationId = "OperationId" + } + } + public struct StartApplicationRefreshInput: AWSEncodableShape { /// The ID of the application. public let applicationId: String @@ -1357,6 +1499,44 @@ extension SsmSap { } } + public struct StopApplicationInput: AWSEncodableShape { + /// The ID of the application. + public let applicationId: String + /// Boolean. If included and if set to True, the StopApplication operation will shut down the associated Amazon EC2 instance in addition to the application. + public let includeEc2InstanceShutdown: Bool? + /// Specify the ConnectedEntityType. Accepted type is DBMS. If this parameter is included, the connected DBMS (Database Management System) will be stopped. + public let stopConnectedEntity: ConnectedEntityType? + + public init(applicationId: String, includeEc2InstanceShutdown: Bool? = nil, stopConnectedEntity: ConnectedEntityType? = nil) { + self.applicationId = applicationId + self.includeEc2InstanceShutdown = includeEc2InstanceShutdown + self.stopConnectedEntity = stopConnectedEntity + } + + public func validate(name: String) throws { + try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^[\\w\\d]{1,50}$") + } + + private enum CodingKeys: String, CodingKey { + case applicationId = "ApplicationId" + case includeEc2InstanceShutdown = "IncludeEc2InstanceShutdown" + case stopConnectedEntity = "StopConnectedEntity" + } + } + + public struct StopApplicationOutput: AWSDecodableShape { + /// The ID of the operation. + public let operationId: String? + + public init(operationId: String? = nil) { + self.operationId = operationId + } + + private enum CodingKeys: String, CodingKey { + case operationId = "OperationId" + } + } + public struct TagResourceRequest: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the resource. public let resourceArn: String diff --git a/Sources/Soto/Services/StorageGateway/StorageGateway_api.swift b/Sources/Soto/Services/StorageGateway/StorageGateway_api.swift index 87947d2fb1..df1c81b0e2 100644 --- a/Sources/Soto/Services/StorageGateway/StorageGateway_api.swift +++ b/Sources/Soto/Services/StorageGateway/StorageGateway_api.swift @@ -77,6 +77,7 @@ public struct StorageGateway: AWSService { static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.fips]: .init(endpoints: [ "ca-central-1": "storagegateway-fips.ca-central-1.amazonaws.com", + "ca-west-1": "storagegateway-fips.ca-west-1.amazonaws.com", "us-east-1": "storagegateway-fips.us-east-1.amazonaws.com", "us-east-2": "storagegateway-fips.us-east-2.amazonaws.com", "us-gov-east-1": "storagegateway-fips.us-gov-east-1.amazonaws.com", @@ -570,7 +571,7 @@ public struct StorageGateway: AWSService { ) } - /// Returns your gateway's weekly maintenance start time including the day and time of the week. Note that values are in terms of the gateway's time zone. + /// Returns your gateway's maintenance window schedule information, with values for monthly or weekly cadence, specific day and time to begin maintenance, and which types of updates to apply. Time values returned are for the gateway's time zone. @Sendable public func describeMaintenanceStartTime(_ input: DescribeMaintenanceStartTimeInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeMaintenanceStartTimeOutput { return try await self.client.execute( @@ -1129,7 +1130,7 @@ public struct StorageGateway: AWSService { ) } - /// Updates a gateway's metadata, which includes the gateway's name and time zone. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request. For gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID rather than the gateway name. However, changing the name of the gateway has no effect on the gateway's ARN. + /// Updates a gateway's metadata, which includes the gateway's name, time zone, and metadata cache size. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request. For gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID rather than the gateway name. However, changing the name of the gateway has no effect on the gateway's ARN. @Sendable public func updateGatewayInformation(_ input: UpdateGatewayInformationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateGatewayInformationOutput { return try await self.client.execute( @@ -1155,7 +1156,7 @@ public struct StorageGateway: AWSService { ) } - /// Updates a gateway's weekly maintenance start time information, including day and time of the week. The maintenance time is the time in your gateway's time zone. + /// Updates a gateway's maintenance window schedule, with settings for monthly or weekly cadence, specific day and time to begin maintenance, and which types of updates to apply. Time configuration uses the gateway's time zone. You can pass values for a complete maintenance schedule, or update policy, or both. Previous values will persist for whichever setting you choose not to modify. If an incomplete or invalid maintenance schedule is passed, the entire request will be rejected with an error and no changes will occur. A complete maintenance schedule must include values for both MinuteOfHour and HourOfDay, and either DayOfMonth or DayOfWeek. We recommend keeping maintenance updates turned on, except in specific use cases where the brief disruptions caused by updating the gateway could critically impact your deployment. @Sendable public func updateMaintenanceStartTime(_ input: UpdateMaintenanceStartTimeInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateMaintenanceStartTimeOutput { return try await self.client.execute( @@ -1220,7 +1221,7 @@ public struct StorageGateway: AWSService { ) } - /// Updates the SMB security strategy on a file gateway. This action is only supported in file gateways. This API is called Security level in the User Guide. A higher security level can affect performance of the gateway. + /// Updates the SMB security strategy level for an Amazon S3 file gateway. This action is only supported for Amazon S3 file gateways. For information about configuring this setting using the Amazon Web Services console, see Setting a security level for your gateway in the Amazon S3 File Gateway User Guide. A higher security strategy level can affect performance of the gateway. @Sendable public func updateSMBSecurityStrategy(_ input: UpdateSMBSecurityStrategyInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateSMBSecurityStrategyOutput { return try await self.client.execute( diff --git a/Sources/Soto/Services/StorageGateway/StorageGateway_shapes.swift b/Sources/Soto/Services/StorageGateway/StorageGateway_shapes.swift index 53e7d50a46..0d3258ea48 100644 --- a/Sources/Soto/Services/StorageGateway/StorageGateway_shapes.swift +++ b/Sources/Soto/Services/StorageGateway/StorageGateway_shapes.swift @@ -37,6 +37,12 @@ extension StorageGateway { public var description: String { return self.rawValue } } + public enum AutomaticUpdatePolicy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case allVersions = "ALL_VERSIONS" + case emergencyVersionsOnly = "EMERGENCY_VERSIONS_ONLY" + public var description: String { return self.rawValue } + } + public enum AvailabilityMonitorTestStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case complete = "COMPLETE" case failed = "FAILED" @@ -100,6 +106,7 @@ extension StorageGateway { public enum SMBSecurityStrategy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case clientSpecified = "ClientSpecified" case mandatoryEncryption = "MandatoryEncryption" + case mandatoryEncryptionNoAes128 = "MandatoryEncryptionNoAes128" case mandatorySigning = "MandatorySigning" public var description: String { return self.rawValue } } @@ -119,9 +126,9 @@ extension StorageGateway { public let gatewayName: String /// A value that indicates the Amazon Web Services Region where you want to store your data. The gateway Amazon Web Services Region specified must be the same Amazon Web Services Region as the Amazon Web Services Region in your Host header in the request. For more information about available Amazon Web Services Regions and endpoints for Storage Gateway, see Storage Gateway endpoints and quotas in the Amazon Web Services General Reference. Valid Values: See Storage Gateway endpoints and quotas in the Amazon Web Services General Reference. public let gatewayRegion: String - /// A value that indicates the time zone you want to set for the gateway. The time zone is of the format "GMT-hr:mm" or "GMT+hr:mm". For example, GMT-4:00 indicates the time is 4 hours behind GMT. GMT+2:00 indicates the time is 2 hours ahead of GMT. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule. + /// A value that indicates the time zone you want to set for the gateway. The time zone is of the format "GMT", "GMT-hr:mm", or "GMT+hr:mm". For example, GMT indicates Greenwich Mean Time without any offset. GMT-4:00 indicates the time is 4 hours behind GMT. GMT+2:00 indicates the time is 2 hours ahead of GMT. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule. public let gatewayTimezone: String - /// A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED. Valid Values: STORED | CACHED | VTL | VTL_SNOW | FILE_S3 | FILE_FSX_SMB + /// A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED. Valid Values: STORED | CACHED | VTL | FILE_S3 | FILE_FSX_SMB public let gatewayType: String? /// The value that indicates the type of medium changer to use for tape gateway. This field is optional. Valid Values: STK-L700 | AWS-Gateway-VTL | IBM-03584L32-0402 public let mediumChangerType: String? @@ -2230,7 +2237,7 @@ extension StorageGateway { public let gatewayTimezone: String? /// The type of the gateway. public let gatewayType: String? - /// The type of hardware or software platform on which the gateway is running. + /// The type of hardware or software platform on which the gateway is running. Tape Gateway is no longer available on Snow Family devices. public let hostEnvironment: HostEnvironment? /// A unique identifier for the specific instance of the host platform running the gateway. This value is only available for certain host environments, and its format depends on the host environment type. public let hostEnvironmentId: String? @@ -2318,7 +2325,7 @@ extension StorageGateway { } public struct DescribeMaintenanceStartTimeOutput: AWSDecodableShape { - /// The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month. + /// The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month. It is not possible to set the maintenance schedule to start on days 29 through 31. public let dayOfMonth: Int? /// An ordinal number between 0 and 6 that represents the day of the week, where 0 represents Sunday and 6 represents Saturday. The day of week is in the time zone of the gateway. public let dayOfWeek: Int? @@ -2327,15 +2334,18 @@ extension StorageGateway { public let hourOfDay: Int? /// The minute component of the maintenance start time represented as mm, where mm is the minute (0 to 59). The minute of the hour is in the time zone of the gateway. public let minuteOfHour: Int? + /// A set of variables indicating the software update preferences for the gateway. Includes AutomaticUpdatePolicy field with the following inputs: ALL_VERSIONS - Enables regular gateway maintenance updates. EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance updates. + public let softwareUpdatePreferences: SoftwareUpdatePreferences? /// A value that indicates the time zone that is set for the gateway. The start time and day of week specified should be in the time zone of the gateway. public let timezone: String? - public init(dayOfMonth: Int? = nil, dayOfWeek: Int? = nil, gatewayARN: String? = nil, hourOfDay: Int? = nil, minuteOfHour: Int? = nil, timezone: String? = nil) { + public init(dayOfMonth: Int? = nil, dayOfWeek: Int? = nil, gatewayARN: String? = nil, hourOfDay: Int? = nil, minuteOfHour: Int? = nil, softwareUpdatePreferences: SoftwareUpdatePreferences? = nil, timezone: String? = nil) { self.dayOfMonth = dayOfMonth self.dayOfWeek = dayOfWeek self.gatewayARN = gatewayARN self.hourOfDay = hourOfDay self.minuteOfHour = minuteOfHour + self.softwareUpdatePreferences = softwareUpdatePreferences self.timezone = timezone } @@ -2345,6 +2355,7 @@ extension StorageGateway { case gatewayARN = "GatewayARN" case hourOfDay = "HourOfDay" case minuteOfHour = "MinuteOfHour" + case softwareUpdatePreferences = "SoftwareUpdatePreferences" case timezone = "Timezone" } } @@ -2448,7 +2459,7 @@ extension StorageGateway { public let smbGuestPasswordSet: Bool? /// A list of Active Directory users and groups that have special permissions for SMB file shares on the gateway. public let smbLocalGroups: SMBLocalGroups? - /// The type of security strategy that was specified for file gateway. ClientSpecified: If you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment. Only supported for S3 File Gateways. MandatorySigning: If you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. MandatoryEncryption: If you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer. + /// The type of security strategy that was specified for file gateway. ClientSpecified: If you choose this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment. Supported only for S3 File Gateway. MandatorySigning: If you choose this option, File Gateway only allows connections from SMBv2 or SMBv3 clients that have signing turned on. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008, or later. MandatoryEncryption: If you choose this option, File Gateway only allows connections from SMBv3 clients that have encryption turned on. Both 256-bit and 128-bit algorithms are allowed. This option is recommended for environments that handle sensitive data. It works with SMB clients on Microsoft Windows 8, Windows Server 2012, or later. MandatoryEncryptionNoAes128: If you choose this option, File Gateway only allows connections from SMBv3 clients that use 256-bit AES encryption algorithms. 128-bit algorithms are not allowed. This option is recommended for environments that handle sensitive data. It works with SMB clients on Microsoft Windows 8, Windows Server 2012, or later. public let smbSecurityStrategy: SMBSecurityStrategy? public init(activeDirectoryStatus: ActiveDirectoryStatus? = nil, domainName: String? = nil, fileSharesVisible: Bool? = nil, gatewayARN: String? = nil, smbGuestPasswordSet: Bool? = nil, smbLocalGroups: SMBLocalGroups? = nil, smbSecurityStrategy: SMBSecurityStrategy? = nil) { @@ -3152,7 +3163,7 @@ extension StorageGateway { public let gatewayOperationalState: String? /// The type of the gateway. public let gatewayType: String? - /// The type of hardware or software platform on which the gateway is running. + /// The type of hardware or software platform on which the gateway is running. Tape Gateway is no longer available on Snow Family devices. public let hostEnvironment: HostEnvironment? /// A unique identifier for the specific instance of the host platform running the gateway. This value is only available for certain host environments, and its format depends on the host environment type. public let hostEnvironmentId: String? @@ -3944,7 +3955,7 @@ extension StorageGateway { public struct RefreshCacheInput: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the file share you want to refresh. public let fileShareARN: String - /// A comma-separated list of the paths of folders to refresh in the cache. The default is ["/"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to true, the entire S3 bucket that the file share has access to is refreshed. + /// A comma-separated list of the paths of folders to refresh in the cache. The default is ["/"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to true, the entire S3 bucket that the file share has access to is refreshed. Do not include / when specifying folder names. For example, you would specify samplefolder rather than samplefolder/. public let folderList: [String]? /// A value that specifies whether to recursively refresh folders in the cache. The refresh includes folders that were in the cache the last time the gateway listed the folder's contents. If this value set to true, each folder that is listed in FolderList is recursively updated. Otherwise, subfolders listed in FolderList are not refreshed. Only objects that are in folders listed directly under FolderList are found and used for the update. The default is true. Valid Values: true | false public let recursive: Bool? @@ -4371,6 +4382,19 @@ extension StorageGateway { } } + public struct SoftwareUpdatePreferences: AWSEncodableShape & AWSDecodableShape { + /// Indicates the automatic update policy for a gateway. ALL_VERSIONS - Enables regular gateway maintenance updates. EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance updates. + public let automaticUpdatePolicy: AutomaticUpdatePolicy? + + public init(automaticUpdatePolicy: AutomaticUpdatePolicy? = nil) { + self.automaticUpdatePolicy = automaticUpdatePolicy + } + + private enum CodingKeys: String, CodingKey { + case automaticUpdatePolicy = "AutomaticUpdatePolicy" + } + } + public struct StartAvailabilityMonitorTestInput: AWSEncodableShape { public let gatewayARN: String @@ -4932,7 +4956,7 @@ extension StorageGateway { /// The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you want to use to monitor and log events in the gateway. For more information, see What is Amazon CloudWatch Logs? public let cloudWatchLogGroupARN: String? public let gatewayARN: String - /// Specifies the size of the gateway's metadata cache. + /// Specifies the size of the gateway's metadata cache. This setting impacts gateway performance and hardware recommendations. For more information, see Performance guidance for gateways with multiple file shares in the Amazon S3 File Gateway User Guide. public let gatewayCapacity: GatewayCapacity? public let gatewayName: String? /// A value that indicates the time zone of the gateway. @@ -5012,22 +5036,25 @@ extension StorageGateway { } public struct UpdateMaintenanceStartTimeInput: AWSEncodableShape { - /// The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month and 28 represents the last day of the month. + /// The day of the month component of the maintenance start time represented as an ordinal number from 1 to 28, where 1 represents the first day of the month. It is not possible to set the maintenance schedule to start on days 29 through 31. public let dayOfMonth: Int? - /// The day of the week component of the maintenance start time week represented as an ordinal number from 0 to 6, where 0 represents Sunday and 6 Saturday. + /// The day of the week component of the maintenance start time week represented as an ordinal number from 0 to 6, where 0 represents Sunday and 6 represents Saturday. public let dayOfWeek: Int? public let gatewayARN: String /// The hour component of the maintenance start time represented as hh, where hh is the hour (00 to 23). The hour of the day is in the time zone of the gateway. - public let hourOfDay: Int + public let hourOfDay: Int? /// The minute component of the maintenance start time represented as mm, where mm is the minute (00 to 59). The minute of the hour is in the time zone of the gateway. - public let minuteOfHour: Int + public let minuteOfHour: Int? + /// A set of variables indicating the software update preferences for the gateway. Includes AutomaticUpdatePolicy field with the following inputs: ALL_VERSIONS - Enables regular gateway maintenance updates. EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance updates. + public let softwareUpdatePreferences: SoftwareUpdatePreferences? - public init(dayOfMonth: Int? = nil, dayOfWeek: Int? = nil, gatewayARN: String, hourOfDay: Int, minuteOfHour: Int) { + public init(dayOfMonth: Int? = nil, dayOfWeek: Int? = nil, gatewayARN: String, hourOfDay: Int? = nil, minuteOfHour: Int? = nil, softwareUpdatePreferences: SoftwareUpdatePreferences? = nil) { self.dayOfMonth = dayOfMonth self.dayOfWeek = dayOfWeek self.gatewayARN = gatewayARN self.hourOfDay = hourOfDay self.minuteOfHour = minuteOfHour + self.softwareUpdatePreferences = softwareUpdatePreferences } public func validate(name: String) throws { @@ -5049,6 +5076,7 @@ extension StorageGateway { case gatewayARN = "GatewayARN" case hourOfDay = "HourOfDay" case minuteOfHour = "MinuteOfHour" + case softwareUpdatePreferences = "SoftwareUpdatePreferences" } } @@ -5368,7 +5396,7 @@ extension StorageGateway { public struct UpdateSMBSecurityStrategyInput: AWSEncodableShape { public let gatewayARN: String - /// Specifies the type of security strategy. ClientSpecified: if you use this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment. Supported only in S3 File Gateway. MandatorySigning: if you use this option, file gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. MandatoryEncryption: if you use this option, file gateway only allows connections from SMBv3 clients that have encryption enabled. This option is highly recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer. + /// Specifies the type of security strategy. ClientSpecified: If you choose this option, requests are established based on what is negotiated by the client. This option is recommended when you want to maximize compatibility across different clients in your environment. Supported only for S3 File Gateway. MandatorySigning: If you choose this option, File Gateway only allows connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. MandatoryEncryption: If you choose this option, File Gateway only allows connections from SMBv3 clients that have encryption enabled. This option is recommended for environments that handle sensitive data. This option works with SMB clients on Microsoft Windows 8, Windows Server 2012 or newer. MandatoryEncryptionNoAes128: If you choose this option, File Gateway only allows connections from SMBv3 clients that use 256-bit AES encryption algorithms. 128-bit algorithms are not allowed. This option is recommended for environments that handle sensitive data. It works with SMB clients on Microsoft Windows 8, Windows Server 2012, or later. public let smbSecurityStrategy: SMBSecurityStrategy public init(gatewayARN: String, smbSecurityStrategy: SMBSecurityStrategy) { diff --git a/Sources/Soto/Services/TaxSettings/TaxSettings_api.swift b/Sources/Soto/Services/TaxSettings/TaxSettings_api.swift new file mode 100644 index 0000000000..20e6a2b71f --- /dev/null +++ b/Sources/Soto/Services/TaxSettings/TaxSettings_api.swift @@ -0,0 +1,208 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +@_exported import SotoCore + +/// Service object for interacting with AWS TaxSettings service. +/// +/// You can use the tax setting API to programmatically set, modify, and delete the tax registration number (TRN), associated business legal name, and address (Collectively referred to as "TRN information"). You can also programmatically view TRN information and tax addresses ("Tax profiles"). You can use this API to automate your TRN information settings instead of manually using the console. Service Endpoint https://tax.us-east-1.amazonaws.com +public struct TaxSettings: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the TaxSettings client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded + /// - timeout: Timeout value for HTTP requests + /// - byteBufferAllocator: Allocator for ByteBuffers + /// - options: Service options + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + middleware: AWSMiddlewareProtocol? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + serviceName: "TaxSettings", + serviceIdentifier: "tax", + serviceProtocol: .restjson, + apiVersion: "2018-05-10", + endpoint: endpoint, + errorType: TaxSettingsErrorType.self, + middleware: middleware, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + + + + + // MARK: API Calls + + /// Deletes tax registration for multiple accounts in batch. This can be used to delete tax registrations for up to five accounts in one batch. This API operation can't be used to delete your tax registration in Brazil. Use the Payment preferences page in the Billing and Cost Management console instead. + @Sendable + public func batchDeleteTaxRegistration(_ input: BatchDeleteTaxRegistrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchDeleteTaxRegistrationResponse { + return try await self.client.execute( + operation: "BatchDeleteTaxRegistration", + path: "/BatchDeleteTaxRegistration", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Adds or updates tax registration for multiple accounts in batch. This can be used to add or update tax registrations for up to five accounts in one batch. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first. To call this API operation for specific countries, see the following country-specific requirements. Bangladesh You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Brazil You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation. For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address. Georgia The valid personType values are Physical Person and Business. Kenya You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object. If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Malaysia If you use this operation to set a tax registration number (TRN) in Malaysia, only resellers with a valid sales and service tax (SST) number are required to provide tax registration information. By using this API operation to set a TRN in Malaysia, Amazon Web Services will regard you as self-declaring that you're an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD) and have a valid SST number. Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate. If you're not a reseller of Amazon Web Services, we don't recommend that you use this operation to set the TRN in Malaysia. Only use this API operation to upload the TRNs for accounts through which you're reselling Amazon Web Services. Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller. Taxable service and service tax codes: Consultancy - 9907061674 Training or coaching service - 9907071685 IT service - 9907101676 Digital services and electronic medium - 9907121690 Nepal The sector valid values are Business and Individual. Saudi Arabia For address, you must specify addressLine3. South Korea You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName. You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields. You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion. Spain You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object. If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Turkey You must specify the sector in the taxRegistrationEntry object. If your sector is Business, Individual, or Government: Specify the taxOffice. If your sector is Individual, don't enter this value. (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value. Note: In the Tax Settings page of the Billing console, Government appears as Public institutions If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field. For address, you must specify districtOrCounty. Ukraine The sector valid values are Business and Individual. + @Sendable + public func batchPutTaxRegistration(_ input: BatchPutTaxRegistrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchPutTaxRegistrationResponse { + return try await self.client.execute( + operation: "BatchPutTaxRegistration", + path: "/BatchPutTaxRegistration", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Deletes tax registration for a single account. This API operation can't be used to delete your tax registration in Brazil. Use the Payment preferences page in the Billing and Cost Management console instead. + @Sendable + public func deleteTaxRegistration(_ input: DeleteTaxRegistrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteTaxRegistrationResponse { + return try await self.client.execute( + operation: "DeleteTaxRegistration", + path: "/DeleteTaxRegistration", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves tax registration for a single account. + @Sendable + public func getTaxRegistration(_ input: GetTaxRegistrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTaxRegistrationResponse { + return try await self.client.execute( + operation: "GetTaxRegistration", + path: "/GetTaxRegistration", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Downloads your tax documents to the Amazon S3 bucket that you specify in your request. + @Sendable + public func getTaxRegistrationDocument(_ input: GetTaxRegistrationDocumentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTaxRegistrationDocumentResponse { + return try await self.client.execute( + operation: "GetTaxRegistrationDocument", + path: "/GetTaxRegistrationDocument", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Retrieves the tax registration of accounts listed in a consolidated billing family. This can be used to retrieve up to 100 accounts' tax registrations in one call (default 50). + @Sendable + public func listTaxRegistrations(_ input: ListTaxRegistrationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTaxRegistrationsResponse { + return try await self.client.execute( + operation: "ListTaxRegistrations", + path: "/ListTaxRegistrations", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + + /// Adds or updates tax registration for a single account. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first. To call this API operation for specific countries, see the following country-specific requirements. Bangladesh You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Brazil You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation. For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address. Georgia The valid personType values are Physical Person and Business. Kenya You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object. If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Malaysia If you use this operation to set a tax registration number (TRN) in Malaysia, only resellers with a valid sales and service tax (SST) number are required to provide tax registration information. By using this API operation to set a TRN in Malaysia, Amazon Web Services will regard you as self-declaring that you're an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD) and have a valid SST number. Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate. If you're not a reseller of Amazon Web Services, we don't recommend that you use this operation to set the TRN in Malaysia. Only use this API operation to upload the TRNs for accounts through which you're reselling Amazon Web Services. Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller. Taxable service and service tax codes: Consultancy - 9907061674 Training or coaching service - 9907071685 IT service - 9907101676 Digital services and electronic medium - 9907121690 Nepal The sector valid values are Business and Individual. Saudi Arabia For address, you must specify addressLine3. South Korea You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName. You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields. You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion. Spain You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object. If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Turkey You must specify the sector in the taxRegistrationEntry object. If your sector is Business, Individual, or Government: Specify the taxOffice. If your sector is Individual, don't enter this value. (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value. Note: In the Tax Settings page of the Billing console, Government appears as Public institutions If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field. For address, you must specify districtOrCounty. Ukraine The sector valid values are Business and Individual. + @Sendable + public func putTaxRegistration(_ input: PutTaxRegistrationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutTaxRegistrationResponse { + return try await self.client.execute( + operation: "PutTaxRegistration", + path: "/PutTaxRegistration", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } +} + +extension TaxSettings { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: TaxSettings, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension TaxSettings { + /// Retrieves the tax registration of accounts listed in a consolidated billing family. This can be used to retrieve up to 100 accounts' tax registrations in one call (default 50). + /// Return PaginatorSequence for operation. + /// + /// - Parameters: + /// - input: Input for request + /// - logger: Logger used flot logging + public func listTaxRegistrationsPaginator( + _ input: ListTaxRegistrationsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listTaxRegistrations, + inputKey: \ListTaxRegistrationsRequest.nextToken, + outputKey: \ListTaxRegistrationsResponse.nextToken, + logger: logger + ) + } +} + +extension TaxSettings.ListTaxRegistrationsRequest: AWSPaginateToken { + public func usingPaginationToken(_ token: String) -> TaxSettings.ListTaxRegistrationsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token + ) + } +} diff --git a/Sources/Soto/Services/TaxSettings/TaxSettings_shapes.swift b/Sources/Soto/Services/TaxSettings/TaxSettings_shapes.swift new file mode 100644 index 0000000000..98c95e0dad --- /dev/null +++ b/Sources/Soto/Services/TaxSettings/TaxSettings_shapes.swift @@ -0,0 +1,1343 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2023 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_spi(SotoInternal) import SotoCore + +extension TaxSettings { + // MARK: Enums + + public enum AddressRoleType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case billingAddress = "BillingAddress" + case contactAddress = "ContactAddress" + case taxAddress = "TaxAddress" + public var description: String { return self.rawValue } + } + + public enum Industries: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case banks = "Banks" + case circulatingOrg = "CirculatingOrg" + case developmentAgencies = "DevelopmentAgencies" + case insurance = "Insurance" + case pensionAndBenefitFunds = "PensionAndBenefitFunds" + case professionalOrg = "ProfessionalOrg" + public var description: String { return self.rawValue } + } + + public enum IsraelCustomerType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case business = "Business" + case individual = "Individual" + public var description: String { return self.rawValue } + } + + public enum IsraelDealerType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case authorized = "Authorized" + case nonAuthorized = "Non-authorized" + public var description: String { return self.rawValue } + } + + public enum MalaysiaServiceTaxCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case consultancy = "Consultancy" + case digitalSvcElectronicMedium = "Digital Service And Electronic Medium" + case itServices = "IT Services" + case trainingOrCoaching = "Training Or Coaching" + public var description: String { return self.rawValue } + } + + public enum PersonType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case business = "Business" + case legalPerson = "Legal Person" + case physicalPerson = "Physical Person" + public var description: String { return self.rawValue } + } + + public enum RegistrationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case intraEu = "Intra-EU" + case local = "Local" + public var description: String { return self.rawValue } + } + + public enum SaudiArabiaTaxRegistrationNumberType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case commercialRegistrationNumber = "CommercialRegistrationNumber" + case taxIdentificationNumber = "TaxIdentificationNumber" + case taxRegistrationNumber = "TaxRegistrationNumber" + public var description: String { return self.rawValue } + } + + public enum Sector: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case business = "Business" + case individual = "Individual" + case publicInstitutions = "Government" + public var description: String { return self.rawValue } + } + + public enum TaxRegistrationNumberType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case localRegistrationNumber = "LocalRegistrationNumber" + case taxRegistrationNumber = "TaxRegistrationNumber" + public var description: String { return self.rawValue } + } + + public enum TaxRegistrationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case deleted = "Deleted" + case pending = "Pending" + case rejected = "Rejected" + case verified = "Verified" + public var description: String { return self.rawValue } + } + + public enum TaxRegistrationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case cnpj = "CNPJ" + case cpf = "CPF" + case gst = "GST" + case sst = "SST" + case vat = "VAT" + public var description: String { return self.rawValue } + } + + public enum UkraineTrnType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case business = "Business" + case individual = "Individual" + public var description: String { return self.rawValue } + } + + // MARK: Shapes + + public struct AccountDetails: AWSDecodableShape { + /// List of unique account identifiers. + public let accountId: String? + /// The meta data information associated with the account. + public let accountMetaData: AccountMetaData? + /// Tax inheritance information associated with the account. + public let taxInheritanceDetails: TaxInheritanceDetails? + /// Your TRN information. Instead of having full legal address, here TRN information will have jurisdiction details (for example, country code and state/region/province if applicable). + public let taxRegistration: TaxRegistrationWithJurisdiction? + + public init(accountId: String? = nil, accountMetaData: AccountMetaData? = nil, taxInheritanceDetails: TaxInheritanceDetails? = nil, taxRegistration: TaxRegistrationWithJurisdiction? = nil) { + self.accountId = accountId + self.accountMetaData = accountMetaData + self.taxInheritanceDetails = taxInheritanceDetails + self.taxRegistration = taxRegistration + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + case accountMetaData = "accountMetaData" + case taxInheritanceDetails = "taxInheritanceDetails" + case taxRegistration = "taxRegistration" + } + } + + public struct AccountMetaData: AWSDecodableShape { + /// The Amazon Web Services accounts name. + public let accountName: String? + public let address: Address? + /// Address roles associated with the account containing country code information. + public let addressRoleMap: [AddressRoleType: Jurisdiction]? + /// The type of address associated with the legal profile. + public let addressType: AddressRoleType? + /// Seller information associated with the account. + public let seller: String? + + public init(accountName: String? = nil, address: Address? = nil, addressRoleMap: [AddressRoleType: Jurisdiction]? = nil, addressType: AddressRoleType? = nil, seller: String? = nil) { + self.accountName = accountName + self.address = address + self.addressRoleMap = addressRoleMap + self.addressType = addressType + self.seller = seller + } + + private enum CodingKeys: String, CodingKey { + case accountName = "accountName" + case address = "address" + case addressRoleMap = "addressRoleMap" + case addressType = "addressType" + case seller = "seller" + } + } + + public struct AdditionalInfoRequest: AWSEncodableShape { + /// Additional tax information associated with your TRN in Canada. + public let canadaAdditionalInfo: CanadaAdditionalInfo? + /// Additional tax information to specify for a TRN in Estonia. + public let estoniaAdditionalInfo: EstoniaAdditionalInfo? + /// Additional tax information to specify for a TRN in Georgia. + public let georgiaAdditionalInfo: GeorgiaAdditionalInfo? + /// Additional tax information to specify for a TRN in Israel. + public let israelAdditionalInfo: IsraelAdditionalInfo? + /// Additional tax information to specify for a TRN in Italy. + public let italyAdditionalInfo: ItalyAdditionalInfo? + /// Additional tax information to specify for a TRN in Kenya. + public let kenyaAdditionalInfo: KenyaAdditionalInfo? + /// Additional tax information to specify for a TRN in Malaysia. + public let malaysiaAdditionalInfo: MalaysiaAdditionalInfo? + /// Additional tax information associated with your TRN in Poland. + public let polandAdditionalInfo: PolandAdditionalInfo? + /// Additional tax information to specify for a TRN in Romania. + public let romaniaAdditionalInfo: RomaniaAdditionalInfo? + /// Additional tax information associated with your TRN in Saudi Arabia. + public let saudiArabiaAdditionalInfo: SaudiArabiaAdditionalInfo? + /// Additional tax information to specify for a TRN in South Korea. + public let southKoreaAdditionalInfo: SouthKoreaAdditionalInfo? + /// Additional tax information to specify for a TRN in Spain. + public let spainAdditionalInfo: SpainAdditionalInfo? + /// Additional tax information to specify for a TRN in Turkey. + public let turkeyAdditionalInfo: TurkeyAdditionalInfo? + /// Additional tax information associated with your TRN in Ukraine. + public let ukraineAdditionalInfo: UkraineAdditionalInfo? + + public init(canadaAdditionalInfo: CanadaAdditionalInfo? = nil, estoniaAdditionalInfo: EstoniaAdditionalInfo? = nil, georgiaAdditionalInfo: GeorgiaAdditionalInfo? = nil, israelAdditionalInfo: IsraelAdditionalInfo? = nil, italyAdditionalInfo: ItalyAdditionalInfo? = nil, kenyaAdditionalInfo: KenyaAdditionalInfo? = nil, malaysiaAdditionalInfo: MalaysiaAdditionalInfo? = nil, polandAdditionalInfo: PolandAdditionalInfo? = nil, romaniaAdditionalInfo: RomaniaAdditionalInfo? = nil, saudiArabiaAdditionalInfo: SaudiArabiaAdditionalInfo? = nil, southKoreaAdditionalInfo: SouthKoreaAdditionalInfo? = nil, spainAdditionalInfo: SpainAdditionalInfo? = nil, turkeyAdditionalInfo: TurkeyAdditionalInfo? = nil, ukraineAdditionalInfo: UkraineAdditionalInfo? = nil) { + self.canadaAdditionalInfo = canadaAdditionalInfo + self.estoniaAdditionalInfo = estoniaAdditionalInfo + self.georgiaAdditionalInfo = georgiaAdditionalInfo + self.israelAdditionalInfo = israelAdditionalInfo + self.italyAdditionalInfo = italyAdditionalInfo + self.kenyaAdditionalInfo = kenyaAdditionalInfo + self.malaysiaAdditionalInfo = malaysiaAdditionalInfo + self.polandAdditionalInfo = polandAdditionalInfo + self.romaniaAdditionalInfo = romaniaAdditionalInfo + self.saudiArabiaAdditionalInfo = saudiArabiaAdditionalInfo + self.southKoreaAdditionalInfo = southKoreaAdditionalInfo + self.spainAdditionalInfo = spainAdditionalInfo + self.turkeyAdditionalInfo = turkeyAdditionalInfo + self.ukraineAdditionalInfo = ukraineAdditionalInfo + } + + public func validate(name: String) throws { + try self.canadaAdditionalInfo?.validate(name: "\(name).canadaAdditionalInfo") + try self.estoniaAdditionalInfo?.validate(name: "\(name).estoniaAdditionalInfo") + try self.italyAdditionalInfo?.validate(name: "\(name).italyAdditionalInfo") + try self.malaysiaAdditionalInfo?.validate(name: "\(name).malaysiaAdditionalInfo") + try self.polandAdditionalInfo?.validate(name: "\(name).polandAdditionalInfo") + try self.southKoreaAdditionalInfo?.validate(name: "\(name).southKoreaAdditionalInfo") + try self.turkeyAdditionalInfo?.validate(name: "\(name).turkeyAdditionalInfo") + } + + private enum CodingKeys: String, CodingKey { + case canadaAdditionalInfo = "canadaAdditionalInfo" + case estoniaAdditionalInfo = "estoniaAdditionalInfo" + case georgiaAdditionalInfo = "georgiaAdditionalInfo" + case israelAdditionalInfo = "israelAdditionalInfo" + case italyAdditionalInfo = "italyAdditionalInfo" + case kenyaAdditionalInfo = "kenyaAdditionalInfo" + case malaysiaAdditionalInfo = "malaysiaAdditionalInfo" + case polandAdditionalInfo = "polandAdditionalInfo" + case romaniaAdditionalInfo = "romaniaAdditionalInfo" + case saudiArabiaAdditionalInfo = "saudiArabiaAdditionalInfo" + case southKoreaAdditionalInfo = "southKoreaAdditionalInfo" + case spainAdditionalInfo = "spainAdditionalInfo" + case turkeyAdditionalInfo = "turkeyAdditionalInfo" + case ukraineAdditionalInfo = "ukraineAdditionalInfo" + } + } + + public struct AdditionalInfoResponse: AWSDecodableShape { + /// Additional tax information associated with your TRN in Brazil. The Tax Settings API returns this information in your response when any additional information is present with your TRN in Brazil. + public let brazilAdditionalInfo: BrazilAdditionalInfo? + /// Additional tax information associated with your TRN in Canada. + public let canadaAdditionalInfo: CanadaAdditionalInfo? + /// Additional tax information associated with your TRN in Estonia. + public let estoniaAdditionalInfo: EstoniaAdditionalInfo? + /// Additional tax information associated with your TRN in Georgia. + public let georgiaAdditionalInfo: GeorgiaAdditionalInfo? + /// Additional tax information in India. + public let indiaAdditionalInfo: IndiaAdditionalInfo? + /// Additional tax information associated with your TRN in Israel. + public let israelAdditionalInfo: IsraelAdditionalInfo? + /// Additional tax information associated with your TRN in Italy. + public let italyAdditionalInfo: ItalyAdditionalInfo? + /// Additional tax information associated with your TRN in Kenya. + public let kenyaAdditionalInfo: KenyaAdditionalInfo? + /// Additional tax information associated with your TRN in Malaysia. + public let malaysiaAdditionalInfo: MalaysiaAdditionalInfo? + /// Additional tax information associated with your TRN in Poland. + public let polandAdditionalInfo: PolandAdditionalInfo? + /// Additional tax information to specify for a TRN in Romania. + public let romaniaAdditionalInfo: RomaniaAdditionalInfo? + /// Additional tax information associated with your TRN in Saudi Arabia. + public let saudiArabiaAdditionalInfo: SaudiArabiaAdditionalInfo? + /// Additional tax information associated with your TRN in South Korea. + public let southKoreaAdditionalInfo: SouthKoreaAdditionalInfo? + /// Additional tax information associated with your TRN in Spain. + public let spainAdditionalInfo: SpainAdditionalInfo? + /// Additional tax information associated with your TRN in Turkey. + public let turkeyAdditionalInfo: TurkeyAdditionalInfo? + /// Additional tax information associated with your TRN in Ukraine. + public let ukraineAdditionalInfo: UkraineAdditionalInfo? + + public init(brazilAdditionalInfo: BrazilAdditionalInfo? = nil, canadaAdditionalInfo: CanadaAdditionalInfo? = nil, estoniaAdditionalInfo: EstoniaAdditionalInfo? = nil, georgiaAdditionalInfo: GeorgiaAdditionalInfo? = nil, indiaAdditionalInfo: IndiaAdditionalInfo? = nil, israelAdditionalInfo: IsraelAdditionalInfo? = nil, italyAdditionalInfo: ItalyAdditionalInfo? = nil, kenyaAdditionalInfo: KenyaAdditionalInfo? = nil, malaysiaAdditionalInfo: MalaysiaAdditionalInfo? = nil, polandAdditionalInfo: PolandAdditionalInfo? = nil, romaniaAdditionalInfo: RomaniaAdditionalInfo? = nil, saudiArabiaAdditionalInfo: SaudiArabiaAdditionalInfo? = nil, southKoreaAdditionalInfo: SouthKoreaAdditionalInfo? = nil, spainAdditionalInfo: SpainAdditionalInfo? = nil, turkeyAdditionalInfo: TurkeyAdditionalInfo? = nil, ukraineAdditionalInfo: UkraineAdditionalInfo? = nil) { + self.brazilAdditionalInfo = brazilAdditionalInfo + self.canadaAdditionalInfo = canadaAdditionalInfo + self.estoniaAdditionalInfo = estoniaAdditionalInfo + self.georgiaAdditionalInfo = georgiaAdditionalInfo + self.indiaAdditionalInfo = indiaAdditionalInfo + self.israelAdditionalInfo = israelAdditionalInfo + self.italyAdditionalInfo = italyAdditionalInfo + self.kenyaAdditionalInfo = kenyaAdditionalInfo + self.malaysiaAdditionalInfo = malaysiaAdditionalInfo + self.polandAdditionalInfo = polandAdditionalInfo + self.romaniaAdditionalInfo = romaniaAdditionalInfo + self.saudiArabiaAdditionalInfo = saudiArabiaAdditionalInfo + self.southKoreaAdditionalInfo = southKoreaAdditionalInfo + self.spainAdditionalInfo = spainAdditionalInfo + self.turkeyAdditionalInfo = turkeyAdditionalInfo + self.ukraineAdditionalInfo = ukraineAdditionalInfo + } + + private enum CodingKeys: String, CodingKey { + case brazilAdditionalInfo = "brazilAdditionalInfo" + case canadaAdditionalInfo = "canadaAdditionalInfo" + case estoniaAdditionalInfo = "estoniaAdditionalInfo" + case georgiaAdditionalInfo = "georgiaAdditionalInfo" + case indiaAdditionalInfo = "indiaAdditionalInfo" + case israelAdditionalInfo = "israelAdditionalInfo" + case italyAdditionalInfo = "italyAdditionalInfo" + case kenyaAdditionalInfo = "kenyaAdditionalInfo" + case malaysiaAdditionalInfo = "malaysiaAdditionalInfo" + case polandAdditionalInfo = "polandAdditionalInfo" + case romaniaAdditionalInfo = "romaniaAdditionalInfo" + case saudiArabiaAdditionalInfo = "saudiArabiaAdditionalInfo" + case southKoreaAdditionalInfo = "southKoreaAdditionalInfo" + case spainAdditionalInfo = "spainAdditionalInfo" + case turkeyAdditionalInfo = "turkeyAdditionalInfo" + case ukraineAdditionalInfo = "ukraineAdditionalInfo" + } + } + + public struct Address: AWSEncodableShape & AWSDecodableShape { + /// The first line of the address. + public let addressLine1: String + /// The second line of the address, if applicable. + public let addressLine2: String? + /// The third line of the address, if applicable. Currently, the Tax Settings API accepts the addressLine3 parameter only for Saudi Arabia. When you specify a TRN in Saudi Arabia, you must enter the addressLine3 and specify the building number for the address. For example, you might enter 1234. + public let addressLine3: String? + /// The city that the address is in. + public let city: String + /// The country code for the country that the address is in. + public let countryCode: String + /// The district or county the address is located. For addresses in Brazil, this parameter uses the name of the neighborhood. When you set a TRN in Brazil, use districtOrCounty for the neighborhood name. + public let districtOrCounty: String? + /// The postal code associated with the address. + public let postalCode: String + /// The state, region, or province that the address is located. If this is required for tax settings, use the same name as shown on the Tax Settings page. + public let stateOrRegion: String? + + public init(addressLine1: String, addressLine2: String? = nil, addressLine3: String? = nil, city: String, countryCode: String, districtOrCounty: String? = nil, postalCode: String, stateOrRegion: String? = nil) { + self.addressLine1 = addressLine1 + self.addressLine2 = addressLine2 + self.addressLine3 = addressLine3 + self.city = city + self.countryCode = countryCode + self.districtOrCounty = districtOrCounty + self.postalCode = postalCode + self.stateOrRegion = stateOrRegion + } + + public func validate(name: String) throws { + try self.validate(self.addressLine1, name: "addressLine1", parent: name, max: 180) + try self.validate(self.addressLine1, name: "addressLine1", parent: name, min: 1) + try self.validate(self.addressLine1, name: "addressLine1", parent: name, pattern: "^(?!\\s*$)[\\s\\S]+$") + try self.validate(self.addressLine2, name: "addressLine2", parent: name, max: 60) + try self.validate(self.addressLine2, name: "addressLine2", parent: name, min: 1) + try self.validate(self.addressLine2, name: "addressLine2", parent: name, pattern: "^(?!\\s*$)[\\s\\S]+$") + try self.validate(self.addressLine3, name: "addressLine3", parent: name, max: 60) + try self.validate(self.addressLine3, name: "addressLine3", parent: name, min: 1) + try self.validate(self.addressLine3, name: "addressLine3", parent: name, pattern: "^(?!\\s*$)[\\s\\S]+$") + try self.validate(self.city, name: "city", parent: name, max: 50) + try self.validate(self.city, name: "city", parent: name, min: 1) + try self.validate(self.city, name: "city", parent: name, pattern: "^(?!\\s*$)[\\s\\S]+$") + try self.validate(self.countryCode, name: "countryCode", parent: name, max: 2) + try self.validate(self.countryCode, name: "countryCode", parent: name, min: 2) + try self.validate(self.countryCode, name: "countryCode", parent: name, pattern: "^[a-zA-Z]+$") + try self.validate(self.districtOrCounty, name: "districtOrCounty", parent: name, max: 50) + try self.validate(self.districtOrCounty, name: "districtOrCounty", parent: name, min: 1) + try self.validate(self.districtOrCounty, name: "districtOrCounty", parent: name, pattern: "^(?!\\s*$)[\\s\\S]+$") + try self.validate(self.postalCode, name: "postalCode", parent: name, max: 20) + try self.validate(self.postalCode, name: "postalCode", parent: name, min: 1) + try self.validate(self.postalCode, name: "postalCode", parent: name, pattern: "^(?!\\s*$)[\\s\\S]+$") + try self.validate(self.stateOrRegion, name: "stateOrRegion", parent: name, max: 50) + try self.validate(self.stateOrRegion, name: "stateOrRegion", parent: name, min: 1) + try self.validate(self.stateOrRegion, name: "stateOrRegion", parent: name, pattern: "^(?!\\s*$)[\\s\\S]+$") + } + + private enum CodingKeys: String, CodingKey { + case addressLine1 = "addressLine1" + case addressLine2 = "addressLine2" + case addressLine3 = "addressLine3" + case city = "city" + case countryCode = "countryCode" + case districtOrCounty = "districtOrCounty" + case postalCode = "postalCode" + case stateOrRegion = "stateOrRegion" + } + } + + public struct BatchDeleteTaxRegistrationError: AWSDecodableShape { + /// The unique account identifier for the account whose tax registration couldn't be deleted during the BatchDeleteTaxRegistration operation. + public let accountId: String + /// The error code for an individual failure in BatchDeleteTaxRegistration operation. + public let code: String? + /// The error message for an individual failure in the BatchDeleteTaxRegistration operation. + public let message: String + + public init(accountId: String, code: String? = nil, message: String) { + self.accountId = accountId + self.code = code + self.message = message + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + case code = "code" + case message = "message" + } + } + + public struct BatchDeleteTaxRegistrationRequest: AWSEncodableShape { + /// List of unique account identifiers. + public let accountIds: [String] + + public init(accountIds: [String]) { + self.accountIds = accountIds + } + + public func validate(name: String) throws { + try self.accountIds.forEach { + try validate($0, name: "accountIds[]", parent: name, max: 12) + try validate($0, name: "accountIds[]", parent: name, min: 12) + try validate($0, name: "accountIds[]", parent: name, pattern: "^\\d+$") + } + try self.validate(self.accountIds, name: "accountIds", parent: name, max: 5) + try self.validate(self.accountIds, name: "accountIds", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case accountIds = "accountIds" + } + } + + public struct BatchDeleteTaxRegistrationResponse: AWSDecodableShape { + /// The list of errors for the accounts the TRN information could not be deleted for. + public let errors: [BatchDeleteTaxRegistrationError] + + public init(errors: [BatchDeleteTaxRegistrationError]) { + self.errors = errors + } + + private enum CodingKeys: String, CodingKey { + case errors = "errors" + } + } + + public struct BatchPutTaxRegistrationError: AWSDecodableShape { + /// The unique account identifier for the account that the tax registration couldn't be added, or updated during the BatchPutTaxRegistration operation. + public let accountId: String + /// The error code for an individual failure in the BatchPutTaxRegistration operation. + public let code: String? + /// The error message for an individual failure in the BatchPutTaxRegistration operation. + public let message: String + + public init(accountId: String, code: String? = nil, message: String) { + self.accountId = accountId + self.code = code + self.message = message + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + case code = "code" + case message = "message" + } + } + + public struct BatchPutTaxRegistrationRequest: AWSEncodableShape { + /// List of unique account identifiers. + public let accountIds: [String] + /// Your TRN information that will be stored to the accounts mentioned in putEntries. + public let taxRegistrationEntry: TaxRegistrationEntry + + public init(accountIds: [String], taxRegistrationEntry: TaxRegistrationEntry) { + self.accountIds = accountIds + self.taxRegistrationEntry = taxRegistrationEntry + } + + public func validate(name: String) throws { + try self.accountIds.forEach { + try validate($0, name: "accountIds[]", parent: name, max: 12) + try validate($0, name: "accountIds[]", parent: name, min: 12) + try validate($0, name: "accountIds[]", parent: name, pattern: "^\\d+$") + } + try self.validate(self.accountIds, name: "accountIds", parent: name, max: 5) + try self.validate(self.accountIds, name: "accountIds", parent: name, min: 1) + try self.taxRegistrationEntry.validate(name: "\(name).taxRegistrationEntry") + } + + private enum CodingKeys: String, CodingKey { + case accountIds = "accountIds" + case taxRegistrationEntry = "taxRegistrationEntry" + } + } + + public struct BatchPutTaxRegistrationResponse: AWSDecodableShape { + /// List of errors for the accounts the TRN information could not be added or updated to. + public let errors: [BatchPutTaxRegistrationError] + /// The status of your TRN stored in the system after processing. Based on the validation occurring on the TRN, the status can be Verified, Pending or Rejected. + public let status: TaxRegistrationStatus? + + public init(errors: [BatchPutTaxRegistrationError], status: TaxRegistrationStatus? = nil) { + self.errors = errors + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case errors = "errors" + case status = "status" + } + } + + public struct BrazilAdditionalInfo: AWSDecodableShape { + /// The Cadastro de Contribuintes Mobiliários (CCM) code for your TRN in Brazil. This only applies for a CNPJ tax type for the São Paulo municipality. + public let ccmCode: String? + /// Legal nature of business, based on your TRN in Brazil. This only applies for a CNPJ tax type. + public let legalNatureCode: String? + + public init(ccmCode: String? = nil, legalNatureCode: String? = nil) { + self.ccmCode = ccmCode + self.legalNatureCode = legalNatureCode + } + + private enum CodingKeys: String, CodingKey { + case ccmCode = "ccmCode" + case legalNatureCode = "legalNatureCode" + } + } + + public struct CanadaAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// The Quebec Sales Tax ID number. Leave blank if you do not have a Quebec Sales Tax ID number. + public let canadaQuebecSalesTaxNumber: String? + /// Manitoba Retail Sales Tax ID number. Customers purchasing Amazon Web Services for resale in Manitoba must provide a valid Retail Sales Tax ID number for Manitoba. Leave this blank if you do not have a Retail Sales Tax ID number in Manitoba or are not purchasing Amazon Web Services for resale. + public let canadaRetailSalesTaxNumber: String? + /// The value for this parameter must be true if the provincialSalesTaxId value is provided for a TRN in British Columbia, Saskatchewan, or Manitoba provinces. To claim a provincial sales tax (PST) and retail sales tax (RST) reseller exemption, you must confirm that purchases from this account were made for resale. Otherwise, remove the PST or RST number from the provincialSalesTaxId parameter from your request. + public let isResellerAccount: Bool? + /// The provincial sales tax ID for your TRN in Canada. This parameter can represent the following: Provincial sales tax ID number for British Columbia and Saskatchewan provinces Manitoba retail sales tax ID number for Manitoba province Quebec sales tax ID number for Quebec province The Tax Setting API only accepts this parameter if the TRN is specified for the previous provinces. For other provinces, the Tax Settings API doesn't accept this parameter. + public let provincialSalesTaxId: String? + + public init(canadaQuebecSalesTaxNumber: String? = nil, canadaRetailSalesTaxNumber: String? = nil, isResellerAccount: Bool? = nil, provincialSalesTaxId: String? = nil) { + self.canadaQuebecSalesTaxNumber = canadaQuebecSalesTaxNumber + self.canadaRetailSalesTaxNumber = canadaRetailSalesTaxNumber + self.isResellerAccount = isResellerAccount + self.provincialSalesTaxId = provincialSalesTaxId + } + + public func validate(name: String) throws { + try self.validate(self.canadaQuebecSalesTaxNumber, name: "canadaQuebecSalesTaxNumber", parent: name, pattern: "^([0-9]{10})(TQ[0-9]{4})?$") + try self.validate(self.canadaRetailSalesTaxNumber, name: "canadaRetailSalesTaxNumber", parent: name, pattern: "^([0-9]{6}-[0-9]{1})$") + try self.validate(self.provincialSalesTaxId, name: "provincialSalesTaxId", parent: name, max: 16) + try self.validate(self.provincialSalesTaxId, name: "provincialSalesTaxId", parent: name, min: 7) + try self.validate(self.provincialSalesTaxId, name: "provincialSalesTaxId", parent: name, pattern: "^([0-9A-Z/-]+)$") + } + + private enum CodingKeys: String, CodingKey { + case canadaQuebecSalesTaxNumber = "canadaQuebecSalesTaxNumber" + case canadaRetailSalesTaxNumber = "canadaRetailSalesTaxNumber" + case isResellerAccount = "isResellerAccount" + case provincialSalesTaxId = "provincialSalesTaxId" + } + } + + public struct DeleteTaxRegistrationRequest: AWSEncodableShape { + /// Unique account identifier for the TRN information that needs to be deleted. If this isn't passed, the account ID corresponding to the credentials of the API caller will be used for this parameter. + public let accountId: String? + + public init(accountId: String? = nil) { + self.accountId = accountId + } + + public func validate(name: String) throws { + try self.validate(self.accountId, name: "accountId", parent: name, max: 12) + try self.validate(self.accountId, name: "accountId", parent: name, min: 12) + try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d+$") + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + } + } + + public struct DeleteTaxRegistrationResponse: AWSDecodableShape { + public init() {} + } + + public struct DestinationS3Location: AWSEncodableShape { + /// The name of your Amazon S3 bucket that you specify to download your tax documents to. + public let bucket: String + /// The Amazon S3 object prefix that you specify for your tax document file. + public let prefix: String? + + public init(bucket: String, prefix: String? = nil) { + self.bucket = bucket + self.prefix = prefix + } + + public func validate(name: String) throws { + try self.validate(self.bucket, name: "bucket", parent: name, max: 63) + try self.validate(self.bucket, name: "bucket", parent: name, min: 3) + try self.validate(self.bucket, name: "bucket", parent: name, pattern: "^(?=^.{3,63}$)(?!^(\\d+\\.)+\\d+$)(^(([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])\\.)*([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])$)$") + try self.validate(self.prefix, name: "prefix", parent: name, max: 512) + try self.validate(self.prefix, name: "prefix", parent: name, pattern: "^.*\\S.*$") + } + + private enum CodingKeys: String, CodingKey { + case bucket = "bucket" + case prefix = "prefix" + } + } + + public struct EstoniaAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// Registry commercial code (RCC) for your TRN in Estonia. This value is an eight-numeric string, such as 12345678. + public let registryCommercialCode: String + + public init(registryCommercialCode: String) { + self.registryCommercialCode = registryCommercialCode + } + + public func validate(name: String) throws { + try self.validate(self.registryCommercialCode, name: "registryCommercialCode", parent: name, max: 8) + try self.validate(self.registryCommercialCode, name: "registryCommercialCode", parent: name, min: 8) + try self.validate(self.registryCommercialCode, name: "registryCommercialCode", parent: name, pattern: "^\\d+$") + } + + private enum CodingKeys: String, CodingKey { + case registryCommercialCode = "registryCommercialCode" + } + } + + public struct GeorgiaAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// The legal person or physical person assigned to this TRN in Georgia. + public let personType: PersonType + + public init(personType: PersonType) { + self.personType = personType + } + + private enum CodingKeys: String, CodingKey { + case personType = "personType" + } + } + + public struct GetTaxRegistrationDocumentRequest: AWSEncodableShape { + /// The Amazon S3 bucket that you specify to download your tax documents to. + public let destinationS3Location: DestinationS3Location + /// The metadata for your tax document. + public let taxDocumentMetadata: TaxDocumentMetadata + + public init(destinationS3Location: DestinationS3Location, taxDocumentMetadata: TaxDocumentMetadata) { + self.destinationS3Location = destinationS3Location + self.taxDocumentMetadata = taxDocumentMetadata + } + + public func validate(name: String) throws { + try self.destinationS3Location.validate(name: "\(name).destinationS3Location") + try self.taxDocumentMetadata.validate(name: "\(name).taxDocumentMetadata") + } + + private enum CodingKeys: String, CodingKey { + case destinationS3Location = "destinationS3Location" + case taxDocumentMetadata = "taxDocumentMetadata" + } + } + + public struct GetTaxRegistrationDocumentResponse: AWSDecodableShape { + /// The file path of the Amazon S3 bucket where you want to download your tax document to. + public let destinationFilePath: String? + + public init(destinationFilePath: String? = nil) { + self.destinationFilePath = destinationFilePath + } + + private enum CodingKeys: String, CodingKey { + case destinationFilePath = "destinationFilePath" + } + } + + public struct GetTaxRegistrationRequest: AWSEncodableShape { + /// Your unique account identifier. + public let accountId: String? + + public init(accountId: String? = nil) { + self.accountId = accountId + } + + public func validate(name: String) throws { + try self.validate(self.accountId, name: "accountId", parent: name, max: 12) + try self.validate(self.accountId, name: "accountId", parent: name, min: 12) + try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d+$") + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + } + } + + public struct GetTaxRegistrationResponse: AWSDecodableShape { + /// TRN information of the account mentioned in the request. + public let taxRegistration: TaxRegistration? + + public init(taxRegistration: TaxRegistration? = nil) { + self.taxRegistration = taxRegistration + } + + private enum CodingKeys: String, CodingKey { + case taxRegistration = "taxRegistration" + } + } + + public struct IndiaAdditionalInfo: AWSDecodableShape { + /// India pan information associated with the account. + public let pan: String? + + public init(pan: String? = nil) { + self.pan = pan + } + + private enum CodingKeys: String, CodingKey { + case pan = "pan" + } + } + + public struct IsraelAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// Customer type for your TRN in Israel. The value can be Business or Individual. Use Businessfor entities such as not-for-profit and financial institutions. + public let customerType: IsraelCustomerType + /// Dealer type for your TRN in Israel. If you're not a local authorized dealer with an Israeli VAT ID, specify your tax identification number so that Amazon Web Services can send you a compliant tax invoice. + public let dealerType: IsraelDealerType + + public init(customerType: IsraelCustomerType, dealerType: IsraelDealerType) { + self.customerType = customerType + self.dealerType = dealerType + } + + private enum CodingKeys: String, CodingKey { + case customerType = "customerType" + case dealerType = "dealerType" + } + } + + public struct ItalyAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// The tender procedure identification code. + public let cigNumber: String? + /// Additional tax information to specify for a TRN in Italy. This is managed by the Interministerial Committee for Economic Planning (CIPE) which characterizes every public investment project (Individual Project Code). + public let cupNumber: String? + /// Additional tax information to specify for a TRN in Italy. Use CodiceDestinatario to receive your invoices via web service (API) or FTP. + public let sdiAccountId: String? + /// List of service tax codes for your TRN in Italy. You can use your customer tax code as part of a VAT Group. + public let taxCode: String? + + public init(cigNumber: String? = nil, cupNumber: String? = nil, sdiAccountId: String? = nil, taxCode: String? = nil) { + self.cigNumber = cigNumber + self.cupNumber = cupNumber + self.sdiAccountId = sdiAccountId + self.taxCode = taxCode + } + + public func validate(name: String) throws { + try self.validate(self.cigNumber, name: "cigNumber", parent: name, pattern: "^([0-9A-Z]{1,15})$") + try self.validate(self.cupNumber, name: "cupNumber", parent: name, pattern: "^([0-9A-Z]{1,15})$") + try self.validate(self.sdiAccountId, name: "sdiAccountId", parent: name, pattern: "^[0-9A-Z]{6,7}$") + try self.validate(self.taxCode, name: "taxCode", parent: name, pattern: "^([0-9]{11}|[A-Z]{6}[0-9]{2}[A-Z][0-9]{2}[A-Z][0-9]{3}[A-Z])$") + } + + private enum CodingKeys: String, CodingKey { + case cigNumber = "cigNumber" + case cupNumber = "cupNumber" + case sdiAccountId = "sdiAccountId" + case taxCode = "taxCode" + } + } + + public struct Jurisdiction: AWSDecodableShape { + /// The country code of the jurisdiction. + public let countryCode: String + /// The state, region, or province associated with the country of the jurisdiction, if applicable. + public let stateOrRegion: String? + + public init(countryCode: String, stateOrRegion: String? = nil) { + self.countryCode = countryCode + self.stateOrRegion = stateOrRegion + } + + private enum CodingKeys: String, CodingKey { + case countryCode = "countryCode" + case stateOrRegion = "stateOrRegion" + } + } + + public struct KenyaAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// The legal person or physical person assigned to this TRN in Kenya. + public let personType: PersonType + + public init(personType: PersonType) { + self.personType = personType + } + + private enum CodingKeys: String, CodingKey { + case personType = "personType" + } + } + + public struct ListTaxRegistrationsRequest: AWSEncodableShape { + /// Number of accountDetails results you want in one response. + public let maxResults: Int? + /// The token to retrieve the next set of results. + public let nextToken: String? + + public init(maxResults: Int? = nil, nextToken: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2000) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[-A-Za-z0-9_+\\=\\/]+$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "maxResults" + case nextToken = "nextToken" + } + } + + public struct ListTaxRegistrationsResponse: AWSDecodableShape { + /// The list of account details. This contains account Ids and TRN Information for each of the linked accounts. + public let accountDetails: [AccountDetails] + /// The token to retrieve the next set of results. + public let nextToken: String? + + public init(accountDetails: [AccountDetails], nextToken: String? = nil) { + self.accountDetails = accountDetails + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case accountDetails = "accountDetails" + case nextToken = "nextToken" + } + } + + public struct MalaysiaAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// List of service tax codes for your TRN in Malaysia. + public let serviceTaxCodes: [MalaysiaServiceTaxCode] + + public init(serviceTaxCodes: [MalaysiaServiceTaxCode]) { + self.serviceTaxCodes = serviceTaxCodes + } + + public func validate(name: String) throws { + try self.validate(self.serviceTaxCodes, name: "serviceTaxCodes", parent: name, max: 4) + try self.validate(self.serviceTaxCodes, name: "serviceTaxCodes", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case serviceTaxCodes = "serviceTaxCodes" + } + } + + public struct PolandAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// The individual tax registration number (NIP). Individual NIP is valid for other taxes excluding VAT purposes. + public let individualRegistrationNumber: String? + /// True if your business is a member of a VAT group with a NIP active for VAT purposes. Otherwise, this is false. + public let isGroupVatEnabled: Bool? + + public init(individualRegistrationNumber: String? = nil, isGroupVatEnabled: Bool? = nil) { + self.individualRegistrationNumber = individualRegistrationNumber + self.isGroupVatEnabled = isGroupVatEnabled + } + + public func validate(name: String) throws { + try self.validate(self.individualRegistrationNumber, name: "individualRegistrationNumber", parent: name, pattern: "^([0-9]{10})$") + } + + private enum CodingKeys: String, CodingKey { + case individualRegistrationNumber = "individualRegistrationNumber" + case isGroupVatEnabled = "isGroupVatEnabled" + } + } + + public struct PutTaxRegistrationRequest: AWSEncodableShape { + /// Your unique account identifier. + public let accountId: String? + /// Your TRN information that will be stored to the account mentioned in accountId. + public let taxRegistrationEntry: TaxRegistrationEntry + + public init(accountId: String? = nil, taxRegistrationEntry: TaxRegistrationEntry) { + self.accountId = accountId + self.taxRegistrationEntry = taxRegistrationEntry + } + + public func validate(name: String) throws { + try self.validate(self.accountId, name: "accountId", parent: name, max: 12) + try self.validate(self.accountId, name: "accountId", parent: name, min: 12) + try self.validate(self.accountId, name: "accountId", parent: name, pattern: "^\\d+$") + try self.taxRegistrationEntry.validate(name: "\(name).taxRegistrationEntry") + } + + private enum CodingKeys: String, CodingKey { + case accountId = "accountId" + case taxRegistrationEntry = "taxRegistrationEntry" + } + } + + public struct PutTaxRegistrationResponse: AWSDecodableShape { + /// The status of your TRN stored in the system after processing. Based on the validation occurring on the TRN, the status can be Verified, Pending or Rejected. + public let status: TaxRegistrationStatus? + + public init(status: TaxRegistrationStatus? = nil) { + self.status = status + } + + private enum CodingKeys: String, CodingKey { + case status = "status" + } + } + + public struct RomaniaAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// The tax registration number type. The value can be TaxRegistrationNumber or LocalRegistrationNumber. + public let taxRegistrationNumberType: TaxRegistrationNumberType + + public init(taxRegistrationNumberType: TaxRegistrationNumberType) { + self.taxRegistrationNumberType = taxRegistrationNumberType + } + + private enum CodingKeys: String, CodingKey { + case taxRegistrationNumberType = "taxRegistrationNumberType" + } + } + + public struct SaudiArabiaAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// The tax registration number type. + public let taxRegistrationNumberType: SaudiArabiaTaxRegistrationNumberType? + + public init(taxRegistrationNumberType: SaudiArabiaTaxRegistrationNumberType? = nil) { + self.taxRegistrationNumberType = taxRegistrationNumberType + } + + private enum CodingKeys: String, CodingKey { + case taxRegistrationNumberType = "taxRegistrationNumberType" + } + } + + public struct SourceS3Location: AWSEncodableShape { + /// The name of your Amazon S3 bucket that your tax document is located. + public let bucket: String + /// The object key of your tax document object in Amazon S3. + public let key: String + + public init(bucket: String, key: String) { + self.bucket = bucket + self.key = key + } + + public func validate(name: String) throws { + try self.validate(self.bucket, name: "bucket", parent: name, max: 63) + try self.validate(self.bucket, name: "bucket", parent: name, min: 3) + try self.validate(self.bucket, name: "bucket", parent: name, pattern: "^(?=^.{3,63}$)(?!^(\\d+\\.)+\\d+$)(^(([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])\\.)*([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])$)$") + try self.validate(self.key, name: "key", parent: name, max: 1024) + try self.validate(self.key, name: "key", parent: name, min: 1) + try self.validate(self.key, name: "key", parent: name, pattern: "^.*\\S.*$") + } + + private enum CodingKeys: String, CodingKey { + case bucket = "bucket" + case key = "key" + } + } + + public struct SouthKoreaAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// The business legal name based on the most recently uploaded tax registration certificate. + public let businessRepresentativeName: String + /// Item of business based on the most recently uploaded tax registration certificate. + public let itemOfBusiness: String + /// Line of business based on the most recently uploaded tax registration certificate. + public let lineOfBusiness: String + + public init(businessRepresentativeName: String, itemOfBusiness: String, lineOfBusiness: String) { + self.businessRepresentativeName = businessRepresentativeName + self.itemOfBusiness = itemOfBusiness + self.lineOfBusiness = lineOfBusiness + } + + public func validate(name: String) throws { + try self.validate(self.businessRepresentativeName, name: "businessRepresentativeName", parent: name, max: 200) + try self.validate(self.businessRepresentativeName, name: "businessRepresentativeName", parent: name, min: 1) + try self.validate(self.businessRepresentativeName, name: "businessRepresentativeName", parent: name, pattern: "^[0-9\\u3130-\\u318F\\uAC00-\\uD7AF,.( )-\\\\s]*$") + try self.validate(self.itemOfBusiness, name: "itemOfBusiness", parent: name, max: 100) + try self.validate(self.itemOfBusiness, name: "itemOfBusiness", parent: name, min: 1) + try self.validate(self.itemOfBusiness, name: "itemOfBusiness", parent: name, pattern: "^[0-9\\u3130-\\u318F\\uAC00-\\uD7AF,.( )-\\\\s]*$") + try self.validate(self.lineOfBusiness, name: "lineOfBusiness", parent: name, max: 100) + try self.validate(self.lineOfBusiness, name: "lineOfBusiness", parent: name, min: 1) + try self.validate(self.lineOfBusiness, name: "lineOfBusiness", parent: name, pattern: "^[0-9\\u3130-\\u318F\\uAC00-\\uD7AF,.( )-\\\\s]*$") + } + + private enum CodingKeys: String, CodingKey { + case businessRepresentativeName = "businessRepresentativeName" + case itemOfBusiness = "itemOfBusiness" + case lineOfBusiness = "lineOfBusiness" + } + } + + public struct SpainAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// The registration type in Spain. + public let registrationType: RegistrationType + + public init(registrationType: RegistrationType) { + self.registrationType = registrationType + } + + private enum CodingKeys: String, CodingKey { + case registrationType = "registrationType" + } + } + + public struct TaxDocumentMetadata: AWSEncodableShape & AWSDecodableShape { + /// The tax document access token, which contains information that the Tax Settings API uses to locate the tax document. If you update your tax registration, the existing taxDocumentAccessToken won't be valid. To get the latest token, call the GetTaxRegistration or ListTaxRegistrations API operation. This token is valid for 24 hours. + public let taxDocumentAccessToken: String + /// The name of your tax document. + public let taxDocumentName: String + + public init(taxDocumentAccessToken: String, taxDocumentName: String) { + self.taxDocumentAccessToken = taxDocumentAccessToken + self.taxDocumentName = taxDocumentName + } + + public func validate(name: String) throws { + try self.validate(self.taxDocumentAccessToken, name: "taxDocumentAccessToken", parent: name, pattern: "^[\\s\\S]*$") + try self.validate(self.taxDocumentName, name: "taxDocumentName", parent: name, pattern: "^[\\s\\S]*$") + } + + private enum CodingKeys: String, CodingKey { + case taxDocumentAccessToken = "taxDocumentAccessToken" + case taxDocumentName = "taxDocumentName" + } + } + + public struct TaxInheritanceDetails: AWSDecodableShape { + /// Tax inheritance reason information associated with the account. + public let inheritanceObtainedReason: String? + /// Tax inheritance parent account information associated with the account. + public let parentEntityId: String? + + public init(inheritanceObtainedReason: String? = nil, parentEntityId: String? = nil) { + self.inheritanceObtainedReason = inheritanceObtainedReason + self.parentEntityId = parentEntityId + } + + private enum CodingKeys: String, CodingKey { + case inheritanceObtainedReason = "inheritanceObtainedReason" + case parentEntityId = "parentEntityId" + } + } + + public struct TaxRegistration: AWSDecodableShape { + /// Additional tax information associated with your TRN. + public let additionalTaxInformation: AdditionalInfoResponse? + /// The email address to receive VAT invoices. + public let certifiedEmailId: String? + /// The legal address associated with your TRN registration. + public let legalAddress: Address + /// The legal name associated with your TRN registration. + public let legalName: String + /// Your tax registration unique identifier. + public let registrationId: String + /// Type of your tax registration. This can be either VAT or GST. + public let registrationType: TaxRegistrationType + /// The industry that describes your business. For business-to-business (B2B) customers, specify Business. For business-to-consumer (B2C) customers, specify Individual. For business-to-government (B2G), specify Government. Note that certain values may not applicable for the request country. Please refer to country specific information in API document. + public let sector: Sector? + /// The status of your TRN. This can be either Verified, Pending, Deleted, or Rejected. + public let status: TaxRegistrationStatus + /// The metadata for your tax document. + public let taxDocumentMetadatas: [TaxDocumentMetadata]? + + public init(additionalTaxInformation: AdditionalInfoResponse? = nil, certifiedEmailId: String? = nil, legalAddress: Address, legalName: String, registrationId: String, registrationType: TaxRegistrationType, sector: Sector? = nil, status: TaxRegistrationStatus, taxDocumentMetadatas: [TaxDocumentMetadata]? = nil) { + self.additionalTaxInformation = additionalTaxInformation + self.certifiedEmailId = certifiedEmailId + self.legalAddress = legalAddress + self.legalName = legalName + self.registrationId = registrationId + self.registrationType = registrationType + self.sector = sector + self.status = status + self.taxDocumentMetadatas = taxDocumentMetadatas + } + + private enum CodingKeys: String, CodingKey { + case additionalTaxInformation = "additionalTaxInformation" + case certifiedEmailId = "certifiedEmailId" + case legalAddress = "legalAddress" + case legalName = "legalName" + case registrationId = "registrationId" + case registrationType = "registrationType" + case sector = "sector" + case status = "status" + case taxDocumentMetadatas = "taxDocumentMetadatas" + } + } + + public struct TaxRegistrationDocument: AWSEncodableShape { + /// The Amazon S3 location where your tax registration document is stored. + public let s3Location: SourceS3Location + + public init(s3Location: SourceS3Location) { + self.s3Location = s3Location + } + + public func validate(name: String) throws { + try self.s3Location.validate(name: "\(name).s3Location") + } + + private enum CodingKeys: String, CodingKey { + case s3Location = "s3Location" + } + } + + public struct TaxRegistrationEntry: AWSEncodableShape { + /// Additional tax information associated with your TRN. You only need to specify this parameter if Amazon Web Services collects any additional information for your country within AdditionalInfoRequest. + public let additionalTaxInformation: AdditionalInfoRequest? + /// The email address to receive VAT invoices. + public let certifiedEmailId: String? + /// The legal address associated with your TRN. If you're setting a TRN in Brazil for the CNPJ tax type, you don't need to specify the legal address. For TRNs in other countries and for CPF tax types Brazil, you must specify the legal address. + public let legalAddress: Address? + /// The legal name associated with your TRN. If you're setting a TRN in Brazil, you don't need to specify the legal name. For TRNs in other countries, you must specify the legal name. + public let legalName: String? + /// Your tax registration unique identifier. + public let registrationId: String + /// Your tax registration type. This can be either VAT or GST. + public let registrationType: TaxRegistrationType + /// The industry that describes your business. For business-to-business (B2B) customers, specify Business. For business-to-consumer (B2C) customers, specify Individual. For business-to-government (B2G), specify Government.Note that certain values may not applicable for the request country. Please refer to country specific information in API document. + public let sector: Sector? + /// Additional details needed to verify your TRN information in Brazil. You only need to specify this parameter when you set a TRN in Brazil that is the CPF tax type. Don't specify this parameter to set a TRN in Brazil of the CNPJ tax type or to set a TRN for another country. + public let verificationDetails: VerificationDetails? + + public init(additionalTaxInformation: AdditionalInfoRequest? = nil, certifiedEmailId: String? = nil, legalAddress: Address? = nil, legalName: String? = nil, registrationId: String, registrationType: TaxRegistrationType, sector: Sector? = nil, verificationDetails: VerificationDetails? = nil) { + self.additionalTaxInformation = additionalTaxInformation + self.certifiedEmailId = certifiedEmailId + self.legalAddress = legalAddress + self.legalName = legalName + self.registrationId = registrationId + self.registrationType = registrationType + self.sector = sector + self.verificationDetails = verificationDetails + } + + public func validate(name: String) throws { + try self.additionalTaxInformation?.validate(name: "\(name).additionalTaxInformation") + try self.validate(self.certifiedEmailId, name: "certifiedEmailId", parent: name, pattern: "^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,20}$") + try self.legalAddress?.validate(name: "\(name).legalAddress") + try self.validate(self.legalName, name: "legalName", parent: name, max: 200) + try self.validate(self.legalName, name: "legalName", parent: name, min: 1) + try self.validate(self.legalName, name: "legalName", parent: name, pattern: "^(?!\\s*$)[\\s\\S]+$") + try self.validate(self.registrationId, name: "registrationId", parent: name, max: 20) + try self.validate(self.registrationId, name: "registrationId", parent: name, min: 1) + try self.validate(self.registrationId, name: "registrationId", parent: name, pattern: "^(?!\\s*$)[\\s\\S]+$") + try self.verificationDetails?.validate(name: "\(name).verificationDetails") + } + + private enum CodingKeys: String, CodingKey { + case additionalTaxInformation = "additionalTaxInformation" + case certifiedEmailId = "certifiedEmailId" + case legalAddress = "legalAddress" + case legalName = "legalName" + case registrationId = "registrationId" + case registrationType = "registrationType" + case sector = "sector" + case verificationDetails = "verificationDetails" + } + } + + public struct TaxRegistrationWithJurisdiction: AWSDecodableShape { + /// Additional tax information associated with your TRN. + public let additionalTaxInformation: AdditionalInfoResponse? + /// The email address to receive VAT invoices. + public let certifiedEmailId: String? + /// The jurisdiction associated with your TRN information. + public let jurisdiction: Jurisdiction + /// The legal name associated with your TRN information. + public let legalName: String + /// Your tax registration unique identifier. + public let registrationId: String + /// The type of your tax registration. This can be either VAT or GST. + public let registrationType: TaxRegistrationType + /// The industry that describes your business. For business-to-business (B2B) customers, specify Business. For business-to-consumer (B2C) customers, specify Individual. For business-to-government (B2G), specify Government.Note that certain values may not applicable for the request country. Please refer to country specific information in API document. + public let sector: Sector? + /// The status of your TRN. This can be either Verified, Pending, Deleted, or Rejected. + public let status: TaxRegistrationStatus + /// The metadata for your tax document. + public let taxDocumentMetadatas: [TaxDocumentMetadata]? + + public init(additionalTaxInformation: AdditionalInfoResponse? = nil, certifiedEmailId: String? = nil, jurisdiction: Jurisdiction, legalName: String, registrationId: String, registrationType: TaxRegistrationType, sector: Sector? = nil, status: TaxRegistrationStatus, taxDocumentMetadatas: [TaxDocumentMetadata]? = nil) { + self.additionalTaxInformation = additionalTaxInformation + self.certifiedEmailId = certifiedEmailId + self.jurisdiction = jurisdiction + self.legalName = legalName + self.registrationId = registrationId + self.registrationType = registrationType + self.sector = sector + self.status = status + self.taxDocumentMetadatas = taxDocumentMetadatas + } + + private enum CodingKeys: String, CodingKey { + case additionalTaxInformation = "additionalTaxInformation" + case certifiedEmailId = "certifiedEmailId" + case jurisdiction = "jurisdiction" + case legalName = "legalName" + case registrationId = "registrationId" + case registrationType = "registrationType" + case sector = "sector" + case status = "status" + case taxDocumentMetadatas = "taxDocumentMetadatas" + } + } + + public struct TurkeyAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// The industry information that tells the Tax Settings API if you're subject to additional withholding taxes. This information required for business-to-business (B2B) customers. This information is conditionally mandatory for B2B customers who are subject to KDV tax. + public let industries: Industries? + /// The Registered Electronic Mail (REM) that is used to send notarized communication. This parameter is optional for business-to-business (B2B) and business-to-government (B2G) customers. It's not required for business-to-consumer (B2C) customers. + public let kepEmailId: String? + /// Secondary tax ID (“harcama birimi VKN”si”). If one isn't provided, we will use your VKN as the secondary ID. + public let secondaryTaxId: String? + /// The tax office where you're registered. You can enter this information as a string. The Tax Settings API will add this information to your invoice. This parameter is required for business-to-business (B2B) and business-to-government customers. It's not required for business-to-consumer (B2C) customers. + public let taxOffice: String? + + public init(industries: Industries? = nil, kepEmailId: String? = nil, secondaryTaxId: String? = nil, taxOffice: String? = nil) { + self.industries = industries + self.kepEmailId = kepEmailId + self.secondaryTaxId = secondaryTaxId + self.taxOffice = taxOffice + } + + public func validate(name: String) throws { + try self.validate(self.kepEmailId, name: "kepEmailId", parent: name, pattern: "^[\\s\\S]*$") + try self.validate(self.secondaryTaxId, name: "secondaryTaxId", parent: name, pattern: "^([0-9]{10})$") + try self.validate(self.taxOffice, name: "taxOffice", parent: name, pattern: "^[\\s\\S]*$") + } + + private enum CodingKeys: String, CodingKey { + case industries = "industries" + case kepEmailId = "kepEmailId" + case secondaryTaxId = "secondaryTaxId" + case taxOffice = "taxOffice" + } + } + + public struct UkraineAdditionalInfo: AWSEncodableShape & AWSDecodableShape { + /// The tax registration type. + public let ukraineTrnType: UkraineTrnType + + public init(ukraineTrnType: UkraineTrnType) { + self.ukraineTrnType = ukraineTrnType + } + + private enum CodingKeys: String, CodingKey { + case ukraineTrnType = "ukraineTrnType" + } + } + + public struct VerificationDetails: AWSEncodableShape { + /// Date of birth to verify your submitted TRN. Use the YYYY-MM-DD format. + public let dateOfBirth: String? + /// The tax registration document, which is required for specific countries such as Bangladesh, Kenya, South Korea and Spain. + public let taxRegistrationDocuments: [TaxRegistrationDocument]? + + public init(dateOfBirth: String? = nil, taxRegistrationDocuments: [TaxRegistrationDocument]? = nil) { + self.dateOfBirth = dateOfBirth + self.taxRegistrationDocuments = taxRegistrationDocuments + } + + public func validate(name: String) throws { + try self.validate(self.dateOfBirth, name: "dateOfBirth", parent: name, max: 10) + try self.validate(self.dateOfBirth, name: "dateOfBirth", parent: name, min: 10) + try self.validate(self.dateOfBirth, name: "dateOfBirth", parent: name, pattern: "^(\\d{4}-(0[0-9]|1[0-2])-([0-2][0-9]|3[0-1]))$") + try self.taxRegistrationDocuments?.forEach { + try $0.validate(name: "\(name).taxRegistrationDocuments[]") + } + try self.validate(self.taxRegistrationDocuments, name: "taxRegistrationDocuments", parent: name, max: 5) + try self.validate(self.taxRegistrationDocuments, name: "taxRegistrationDocuments", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case dateOfBirth = "dateOfBirth" + case taxRegistrationDocuments = "taxRegistrationDocuments" + } + } +} + +// MARK: - Errors + +/// Error enum for TaxSettings +public struct TaxSettingsErrorType: AWSErrorType { + enum Code: String { + case conflictException = "ConflictException" + case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" + case validationException = "ValidationException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize TaxSettings + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// The exception when the input is creating conflict with the given state. + public static var conflictException: Self { .init(.conflictException) } + /// The exception thrown when an unexpected error occurs when processing a request. + public static var internalServerException: Self { .init(.internalServerException) } + /// The exception thrown when the input doesn't have a resource associated to it. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// The exception when the input doesn't pass validation for at least one of the input parameters. + public static var validationException: Self { .init(.validationException) } +} + +extension TaxSettingsErrorType: Equatable { + public static func == (lhs: TaxSettingsErrorType, rhs: TaxSettingsErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension TaxSettingsErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/Transfer/Transfer_api.swift b/Sources/Soto/Services/Transfer/Transfer_api.swift index 49a1ecfae3..e291438d85 100644 --- a/Sources/Soto/Services/Transfer/Transfer_api.swift +++ b/Sources/Soto/Services/Transfer/Transfer_api.swift @@ -76,6 +76,7 @@ public struct Transfer: AWSService { static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.fips]: .init(endpoints: [ "ca-central-1": "transfer-fips.ca-central-1.amazonaws.com", + "ca-west-1": "transfer-fips.ca-west-1.amazonaws.com", "us-east-1": "transfer-fips.us-east-1.amazonaws.com", "us-east-2": "transfer-fips.us-east-2.amazonaws.com", "us-gov-east-1": "transfer-fips.us-gov-east-1.amazonaws.com", diff --git a/Sources/Soto/Services/Transfer/Transfer_shapes.swift b/Sources/Soto/Services/Transfer/Transfer_shapes.swift index d066dfe451..94392fb816 100644 --- a/Sources/Soto/Services/Transfer/Transfer_shapes.swift +++ b/Sources/Soto/Services/Transfer/Transfer_shapes.swift @@ -784,6 +784,7 @@ extension Transfer { try self.validate(self.serverId, name: "serverId", parent: name, min: 19) try self.validate(self.serverId, name: "serverId", parent: name, pattern: "^s-([0-9a-f]{17})$") try self.validate(self.sshPublicKeyBody, name: "sshPublicKeyBody", parent: name, max: 2048) + try self.validate(self.sshPublicKeyBody, name: "sshPublicKeyBody", parent: name, pattern: "^\\s*(ssh|ecdsa)-[a-z0-9-]+[ \\t]+(([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{1,3})?(={0,3})?)(\\s*|[ \\t]+[\\S \\t]*\\s*)$") try self.tags?.forEach { try $0.validate(name: "\(name).tags[]") } @@ -1720,7 +1721,7 @@ extension Transfer { public let tags: [Tag]? /// If a private key has been specified for the certificate, its type is CERTIFICATE_WITH_PRIVATE_KEY. If there is no private key, the type is CERTIFICATE. public let type: CertificateType? - /// Specifies whether this certificate is used for signing or encryption. + /// Specifies how this certificate is used. It can be used in the following ways: SIGNING: For signing AS2 messages ENCRYPTION: For encrypting AS2 messages TLS: For securing AS2 communications sent over HTTPS public let usage: CertificateUsageType? public init(activeDate: Date? = nil, arn: String, certificate: String? = nil, certificateChain: String? = nil, certificateId: String? = nil, description: String? = nil, inactiveDate: Date? = nil, notAfterDate: Date? = nil, notBeforeDate: Date? = nil, serial: String? = nil, status: CertificateStatusType? = nil, tags: [Tag]? = nil, type: CertificateType? = nil, usage: CertificateUsageType? = nil) { @@ -1969,7 +1970,7 @@ extension Transfer { public let as2ServiceManagedEgressIpAddresses: [String]? /// Specifies the ARN of the Amazon Web ServicesCertificate Manager (ACM) certificate. Required when Protocols is set to FTPS. public let certificate: String? - /// Specifies the domain of the storage system that is used for file transfers. + /// Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3. public let domain: Domain? /// The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint. public let endpointDetails: EndpointDetails? @@ -2363,7 +2364,7 @@ extension Transfer { public let privateKey: String? /// Key-value pairs that can be used to group and search for certificates. public let tags: [Tag]? - /// Specifies whether this certificate is used for signing or encryption. + /// Specifies how this certificate is used. It can be used in the following ways: SIGNING: For signing AS2 messages ENCRYPTION: For encrypting AS2 messages TLS: For securing AS2 communications sent over HTTPS public let usage: CertificateUsageType public init(activeDate: Date? = nil, certificate: String, certificateChain: String? = nil, description: String? = nil, inactiveDate: Date? = nil, privateKey: String? = nil, tags: [Tag]? = nil, usage: CertificateUsageType) { @@ -2497,6 +2498,7 @@ extension Transfer { try self.validate(self.serverId, name: "serverId", parent: name, min: 19) try self.validate(self.serverId, name: "serverId", parent: name, pattern: "^s-([0-9a-f]{17})$") try self.validate(self.sshPublicKeyBody, name: "sshPublicKeyBody", parent: name, max: 2048) + try self.validate(self.sshPublicKeyBody, name: "sshPublicKeyBody", parent: name, pattern: "^\\s*(ssh|ecdsa)-[a-z0-9-]+[ \\t]+(([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{1,3})?(={0,3})?)(\\s*|[ \\t]+[\\S \\t]*\\s*)$") try self.validate(self.userName, name: "userName", parent: name, max: 100) try self.validate(self.userName, name: "userName", parent: name, min: 3) try self.validate(self.userName, name: "userName", parent: name, pattern: "^[\\w][\\w@.-]{2,99}$") @@ -3187,7 +3189,7 @@ extension Transfer { public let status: CertificateStatusType? /// The type for the certificate. If a private key has been specified for the certificate, its type is CERTIFICATE_WITH_PRIVATE_KEY. If there is no private key, the type is CERTIFICATE. public let type: CertificateType? - /// Specifies whether this certificate is used for signing or encryption. + /// Specifies how this certificate is used. It can be used in the following ways: SIGNING: For signing AS2 messages ENCRYPTION: For encrypting AS2 messages TLS: For securing AS2 communications sent over HTTPS public let usage: CertificateUsageType? public init(activeDate: Date? = nil, arn: String? = nil, certificateId: String? = nil, description: String? = nil, inactiveDate: Date? = nil, status: CertificateStatusType? = nil, type: CertificateType? = nil, usage: CertificateUsageType? = nil) { @@ -3320,7 +3322,7 @@ extension Transfer { public struct ListedServer: AWSDecodableShape { /// Specifies the unique Amazon Resource Name (ARN) for a server to be listed. public let arn: String - /// Specifies the domain of the storage system that is used for file transfers. + /// Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3. public let domain: Domain? /// Specifies the type of VPC endpoint that your server is connected to. If your server is connected to a VPC endpoint, your server isn't accessible over the public internet. public let endpointType: EndpointType? diff --git a/Sources/Soto/Services/VPCLattice/VPCLattice_api.swift b/Sources/Soto/Services/VPCLattice/VPCLattice_api.swift index 434ff0f9cc..d59b5f96b6 100644 --- a/Sources/Soto/Services/VPCLattice/VPCLattice_api.swift +++ b/Sources/Soto/Services/VPCLattice/VPCLattice_api.swift @@ -73,7 +73,7 @@ public struct VPCLattice: AWSService { // MARK: API Calls - /// Updates the listener rules in a batch. You can use this operation to change the priority of listener rules. This can be useful when bulk updating or swapping rule priority. + /// Updates the listener rules in a batch. You can use this operation to change the priority of listener rules. This can be useful when bulk updating or swapping rule priority. Required permissions: vpc-lattice:UpdateRule For more information, see How Amazon VPC Lattice works with IAM in the Amazon VPC Lattice User Guide. @Sendable public func batchUpdateRule(_ input: BatchUpdateRuleRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchUpdateRuleResponse { return try await self.client.execute( @@ -86,7 +86,7 @@ public struct VPCLattice: AWSService { ) } - /// Enables access logs to be sent to Amazon CloudWatch, Amazon S3, and Amazon Kinesis Data Firehose. The service network owner can use the access logs to audit the services in the network. The service network owner will only see access logs from clients and services that are associated with their service network. Access log entries represent traffic originated from VPCs associated with that network. For more information, see Access logs in the Amazon VPC Lattice User Guide. + /// Enables access logs to be sent to Amazon CloudWatch, Amazon S3, and Amazon Kinesis Data Firehose. The service network owner can use the access logs to audit the services in the network. The service network owner can only see access logs from clients and services that are associated with their service network. Access log entries represent traffic originated from VPCs associated with that network. For more information, see Access logs in the Amazon VPC Lattice User Guide. @Sendable public func createAccessLogSubscription(_ input: CreateAccessLogSubscriptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAccessLogSubscriptionResponse { return try await self.client.execute( @@ -151,7 +151,7 @@ public struct VPCLattice: AWSService { ) } - /// Associates a service with a service network. You can't use this operation if the service and service network are already associated or if there is a disassociation or deletion in progress. If the association fails, you can retry the operation by deleting the association and recreating it. You cannot associate a service and service network that are shared with a caller. The caller must own either the service or the service network. As a result of this operation, the association is created in the service network account and the association owner account. + /// Associates a service with a service network. For more information, see Manage service associations in the Amazon VPC Lattice User Guide. You can't use this operation if the service and service network are already associated or if there is a disassociation or deletion in progress. If the association fails, you can retry the operation by deleting the association and recreating it. You cannot associate a service and service network that are shared with a caller. The caller must own either the service or the service network. As a result of this operation, the association is created in the service network account and the association owner account. @Sendable public func createServiceNetworkServiceAssociation(_ input: CreateServiceNetworkServiceAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateServiceNetworkServiceAssociationResponse { return try await self.client.execute( @@ -164,7 +164,7 @@ public struct VPCLattice: AWSService { ) } - /// Associates a VPC with a service network. When you associate a VPC with the service network, it enables all the resources within that VPC to be clients and communicate with other services in the service network. For more information, see Manage VPC associations in the Amazon VPC Lattice User Guide. You can't use this operation if there is a disassociation in progress. If the association fails, retry by deleting the association and recreating it. As a result of this operation, the association gets created in the service network account and the VPC owner account. Once a security group is added to the VPC association it cannot be removed. You can add or update the security groups being used for the VPC association once a security group is attached. To remove all security groups you must reassociate the VPC. + /// Associates a VPC with a service network. When you associate a VPC with the service network, it enables all the resources within that VPC to be clients and communicate with other services in the service network. For more information, see Manage VPC associations in the Amazon VPC Lattice User Guide. You can't use this operation if there is a disassociation in progress. If the association fails, retry by deleting the association and recreating it. As a result of this operation, the association gets created in the service network account and the VPC owner account. If you add a security group to the service network and VPC association, the association must continue to always have at least one security group. You can add or edit security groups at any time. However, to remove all security groups, you must first delete the association and recreate it without security groups. @Sendable public func createServiceNetworkVpcAssociation(_ input: CreateServiceNetworkVpcAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateServiceNetworkVpcAssociationResponse { return try await self.client.execute( @@ -203,7 +203,7 @@ public struct VPCLattice: AWSService { ) } - /// Deletes the specified auth policy. If an auth is set to Amazon Web Services_IAM and the auth policy is deleted, all requests will be denied by default. If you are trying to remove the auth policy completely, you must set the auth_type to NONE. If auth is enabled on the resource, but no auth policy is set, all requests will be denied. + /// Deletes the specified auth policy. If an auth is set to AWS_IAM and the auth policy is deleted, all requests are denied. If you are trying to remove the auth policy completely, you must set the auth type to NONE. If auth is enabled on the resource, but no auth policy is set, all requests are denied. @Sendable public func deleteAuthPolicy(_ input: DeleteAuthPolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAuthPolicyResponse { return try await self.client.execute( @@ -281,7 +281,7 @@ public struct VPCLattice: AWSService { ) } - /// Deletes the association between a specified service and the specific service network. This request will fail if an association is still in progress. + /// Deletes the association between a specified service and the specific service network. This operation fails if an association is still in progress. @Sendable public func deleteServiceNetworkServiceAssociation(_ input: DeleteServiceNetworkServiceAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteServiceNetworkServiceAssociationResponse { return try await self.client.execute( @@ -372,7 +372,7 @@ public struct VPCLattice: AWSService { ) } - /// Retrieves information about the resource policy. The resource policy is an IAM policy created by AWS RAM on behalf of the resource owner when they share a resource. + /// Retrieves information about the resource policy. The resource policy is an IAM policy created on behalf of the resource owner when they share a resource. @Sendable public func getResourcePolicy(_ input: GetResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetResourcePolicyResponse { return try await self.client.execute( @@ -502,7 +502,7 @@ public struct VPCLattice: AWSService { ) } - /// Lists the associations between the service network and the service. You can filter the list either by service or service network. You must provide either the service network identifier or the service identifier. Every association in Amazon VPC Lattice is given a unique Amazon Resource Name (ARN), such as when a service network is associated with a VPC or when a service is associated with a service network. If the association is for a resource that is shared with another account, the association will include the local account ID as the prefix in the ARN for each account the resource is shared with. + /// Lists the associations between the service network and the service. You can filter the list either by service or service network. You must provide either the service network identifier or the service identifier. Every association in Amazon VPC Lattice is given a unique Amazon Resource Name (ARN), such as when a service network is associated with a VPC or when a service is associated with a service network. If the association is for a resource that is shared with another account, the association includes the local account ID as the prefix in the ARN for each account the resource is shared with. @Sendable public func listServiceNetworkServiceAssociations(_ input: ListServiceNetworkServiceAssociationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListServiceNetworkServiceAssociationsResponse { return try await self.client.execute( @@ -593,7 +593,7 @@ public struct VPCLattice: AWSService { ) } - /// Creates or updates the auth policy. + /// Creates or updates the auth policy. The policy string in JSON must not contain newlines or blank lines. For more information, see Auth policies in the Amazon VPC Lattice User Guide. @Sendable public func putAuthPolicy(_ input: PutAuthPolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutAuthPolicyResponse { return try await self.client.execute( @@ -723,7 +723,7 @@ public struct VPCLattice: AWSService { ) } - /// Updates the service network and VPC association. Once you add a security group, it cannot be removed. + /// Updates the service network and VPC association. If you add a security group to the service network and VPC association, the association must continue to always have at least one security group. You can add or edit security groups at any time. However, to remove all security groups, you must first delete the association and recreate it without security groups. @Sendable public func updateServiceNetworkVpcAssociation(_ input: UpdateServiceNetworkVpcAssociationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateServiceNetworkVpcAssociationResponse { return try await self.client.execute( @@ -820,7 +820,7 @@ extension VPCLattice { ) } - /// Lists the associations between the service network and the service. You can filter the list either by service or service network. You must provide either the service network identifier or the service identifier. Every association in Amazon VPC Lattice is given a unique Amazon Resource Name (ARN), such as when a service network is associated with a VPC or when a service is associated with a service network. If the association is for a resource that is shared with another account, the association will include the local account ID as the prefix in the ARN for each account the resource is shared with. + /// Lists the associations between the service network and the service. You can filter the list either by service or service network. You must provide either the service network identifier or the service identifier. Every association in Amazon VPC Lattice is given a unique Amazon Resource Name (ARN), such as when a service network is associated with a VPC or when a service is associated with a service network. If the association is for a resource that is shared with another account, the association includes the local account ID as the prefix in the ARN for each account the resource is shared with. /// Return PaginatorSequence for operation. /// /// - Parameters: diff --git a/Sources/Soto/Services/VPCLattice/VPCLattice_shapes.swift b/Sources/Soto/Services/VPCLattice/VPCLattice_shapes.swift index b3c7a1537c..cd2b95f6bf 100644 --- a/Sources/Soto/Services/VPCLattice/VPCLattice_shapes.swift +++ b/Sources/Soto/Services/VPCLattice/VPCLattice_shapes.swift @@ -67,6 +67,8 @@ extension VPCLattice { case http = "HTTP" /// Indicates HTTPS protocol case https = "HTTPS" + /// Indicates TLS_PASSTHROUGH protocol + case tlsPassthrough = "TLS_PASSTHROUGH" public var description: String { return self.rawValue } } @@ -121,6 +123,8 @@ extension VPCLattice { case http = "HTTP" /// Indicates HTTPS protocol case https = "HTTPS" + /// Indicates TCP protocol + case tcp = "TCP" public var description: String { return self.rawValue } } @@ -177,11 +181,11 @@ extension VPCLattice { } public enum HeaderMatchType: AWSEncodableShape & AWSDecodableShape, Sendable { - /// Specifies a contains type match. + /// A contains type match. case contains(String) - /// Specifies an exact type match. + /// An exact type match. case exact(String) - /// Specifies a prefix type match. Matches the value with the prefix. + /// A prefix type match. Matches the value with the prefix. case prefix(String) public init(from decoder: Decoder) throws { @@ -294,7 +298,7 @@ extension VPCLattice { } public enum RuleAction: AWSEncodableShape & AWSDecodableShape, Sendable { - /// Describes the rule action that returns a custom HTTP response. + /// The fixed response action. The rule returns a custom HTTP response. case fixedResponse(FixedResponseAction) /// The forward action. Traffic that matches the rule is forwarded to the specified target groups. case forward(ForwardAction) @@ -517,13 +521,13 @@ extension VPCLattice { public struct CreateListenerRequest: AWSEncodableShape { /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you retry a request that completed successfully using the same client token and parameters, the retry succeeds without performing any actions. If the parameters aren't identical, the retry fails. public let clientToken: String? - /// The action for the default rule. Each listener has a default rule. Each rule consists of a priority, one or more actions, and one or more conditions. The default rule is the rule that's used if no other rules match. Each rule must include exactly one of the following types of actions: forward or fixed-response, and it must be the last action to be performed. + /// The action for the default rule. Each listener has a default rule. The default rule is used if no other rules match. public let defaultAction: RuleAction /// The name of the listener. A listener name must be unique within a service. The valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen. public let name: String /// The listener port. You can specify a value from 1 to 65535. For HTTP, the default is 80. For HTTPS, the default is 443. public let port: Int? - /// The listener protocol HTTP or HTTPS. + /// The listener protocol. public let `protocol`: ListenerProtocol /// The ID or Amazon Resource Name (ARN) of the service. public let serviceIdentifier: String @@ -702,7 +706,7 @@ extension VPCLattice { } public struct CreateRuleResponse: AWSDecodableShape { - /// The rule action. Each rule must include exactly one of the following types of actions: forward or fixed-response, and it must be the last action to be performed. + /// The rule action. public let action: RuleAction? /// The Amazon Resource Name (ARN) of the rule. public let arn: String? @@ -853,7 +857,7 @@ extension VPCLattice { public let dnsEntry: DnsEntry? /// The ID of the association. public let id: String? - /// The operation's status. + /// The association status. public let status: ServiceNetworkServiceAssociationStatus? public init(arn: String? = nil, createdBy: String? = nil, customDomainName: String? = nil, dnsEntry: DnsEntry? = nil, id: String? = nil, status: ServiceNetworkServiceAssociationStatus? = nil) { @@ -936,7 +940,7 @@ extension VPCLattice { public let id: String? /// The IDs of the security groups. public let securityGroupIds: [String]? - /// The operation's status. + /// The association status. public let status: ServiceNetworkVpcAssociationStatus? public init(arn: String? = nil, createdBy: String? = nil, id: String? = nil, securityGroupIds: [String]? = nil, status: ServiceNetworkVpcAssociationStatus? = nil) { @@ -1023,7 +1027,7 @@ extension VPCLattice { public let id: String? /// The name of the service. public let name: String? - /// The status. If the status is CREATE_FAILED, you will have to delete and recreate the service. + /// The status. If the status is CREATE_FAILED, you must delete and recreate the service. public let status: ServiceStatus? public init(arn: String? = nil, authType: AuthType? = nil, certificateArn: String? = nil, customDomainName: String? = nil, dnsEntry: DnsEntry? = nil, id: String? = nil, name: String? = nil, status: ServiceStatus? = nil) { @@ -1052,7 +1056,7 @@ extension VPCLattice { public struct CreateTargetGroupRequest: AWSEncodableShape { /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you retry a request that completed successfully using the same client token and parameters, the retry succeeds without performing any actions. If the parameters aren't identical, the retry fails. public let clientToken: String? - /// The target group configuration. If type is set to LAMBDA, this parameter doesn't apply. + /// The target group configuration. public let config: TargetGroupConfig? /// The name of the target group. The name must be unique within the account. The valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen. public let name: String @@ -1097,13 +1101,13 @@ extension VPCLattice { public struct CreateTargetGroupResponse: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the target group. public let arn: String? - /// The target group configuration. If type is set to LAMBDA, this parameter doesn't apply. + /// The target group configuration. public let config: TargetGroupConfig? /// The ID of the target group. public let id: String? /// The name of the target group. public let name: String? - /// The operation's status. You can retry the operation if the status is CREATE_FAILED. However, if you retry it while the status is CREATE_IN_PROGRESS, there is no change in the status. + /// The status. You can retry the operation if the status is CREATE_FAILED. However, if you retry it while the status is CREATE_IN_PROGRESS, there is no change in the status. public let status: TargetGroupStatus? /// The type of target group. public let type: TargetGroupType? @@ -1338,7 +1342,7 @@ extension VPCLattice { public let arn: String? /// The ID of the association. public let id: String? - /// The operation's status. You can retry the operation if the status is DELETE_FAILED. However, if you retry it when the status is DELETE_IN_PROGRESS, there is no change in the status. + /// The status. You can retry the operation if the status is DELETE_FAILED. However, if you retry it when the status is DELETE_IN_PROGRESS, there is no change in the status. public let status: ServiceNetworkServiceAssociationStatus? public init(arn: String? = nil, id: String? = nil, status: ServiceNetworkServiceAssociationStatus? = nil) { @@ -1382,7 +1386,7 @@ extension VPCLattice { public let arn: String? /// The ID of the association. public let id: String? - /// The status. You can retry the operation if the status is DELETE_FAILED. However, if you retry it when the status is DELETE_IN_PROGRESS, there is no change in the status. + /// The status. You can retry the operation if the status is DELETE_FAILED. However, if you retry it while the status is DELETE_IN_PROGRESS, there is no change in the status. public let status: ServiceNetworkVpcAssociationStatus? public init(arn: String? = nil, id: String? = nil, status: ServiceNetworkVpcAssociationStatus? = nil) { @@ -1575,7 +1579,7 @@ extension VPCLattice { } public struct ForwardAction: AWSEncodableShape & AWSDecodableShape { - /// The target groups. Traffic matching the rule is forwarded to the specified target groups. With forward actions, you can assign a weight that controls the prioritization and selection of each target group. This means that requests are distributed to individual target groups based on their weights. For example, if two target groups have the same weight, each target group receives half of the traffic. The default value is 1. This means that if only one target group is provided, there is no need to set the weight; 100% of traffic will go to that target group. + /// The target groups. Traffic matching the rule is forwarded to the specified target groups. With forward actions, you can assign a weight that controls the prioritization and selection of each target group. This means that requests are distributed to individual target groups based on their weights. For example, if two target groups have the same weight, each target group receives half of the traffic. The default value is 1. This means that if only one target group is provided, there is no need to set the weight; 100% of the traffic goes to that target group. public let targetGroups: [WeightedTargetGroup] public init(targetGroups: [WeightedTargetGroup]) { @@ -1586,7 +1590,7 @@ extension VPCLattice { try self.targetGroups.forEach { try $0.validate(name: "\(name).targetGroups[]") } - try self.validate(self.targetGroups, name: "targetGroups", parent: name, max: 2) + try self.validate(self.targetGroups, name: "targetGroups", parent: name, max: 10) try self.validate(self.targetGroups, name: "targetGroups", parent: name, min: 1) } @@ -1689,7 +1693,7 @@ extension VPCLattice { public var lastUpdatedAt: Date? /// The auth policy. public let policy: String? - /// The state of the auth policy. The auth policy is only active when the auth type is set to Amazon Web Services_IAM. If you provide a policy, then authentication and authorization decisions are made based on this policy and the client's IAM policy. If the auth type is NONE, then any auth policy you provide will remain inactive. For more information, see Create a service network in the Amazon VPC Lattice User Guide. + /// The state of the auth policy. The auth policy is only active when the auth type is set to AWS_IAM. If you provide a policy, then authentication and authorization decisions are made based on this policy and the client's IAM policy. If the auth type is NONE, then any auth policy that you provide remains inactive. For more information, see Create a service network in the Amazon VPC Lattice User Guide. public let state: AuthPolicyState? public init(createdAt: Date? = nil, lastUpdatedAt: Date? = nil, policy: String? = nil, state: AuthPolicyState? = nil) { @@ -1789,7 +1793,7 @@ extension VPCLattice { } public struct GetResourcePolicyRequest: AWSEncodableShape { - /// An IAM policy. + /// The Amazon Resource Name (ARN) of the service network or service. public let resourceArn: String public init(resourceArn: String) { @@ -1812,7 +1816,7 @@ extension VPCLattice { } public struct GetResourcePolicyResponse: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the service network or service. + /// An IAM policy. public let policy: String? public init(policy: String? = nil) { @@ -2314,7 +2318,7 @@ extension VPCLattice { } public struct HeaderMatch: AWSEncodableShape & AWSDecodableShape { - /// Indicates whether the match is case sensitive. Defaults to false. + /// Indicates whether the match is case sensitive. public let caseSensitive: Bool? /// The header match type. public let match: HeaderMatchType @@ -2349,7 +2353,7 @@ extension VPCLattice { public let healthCheckTimeoutSeconds: Int? /// The number of consecutive successful health checks required before considering an unhealthy target healthy. The range is 2–10. The default is 5. public let healthyThresholdCount: Int? - /// The codes to use when checking for a successful response from a target. These are called Success codes in the console. + /// The codes to use when checking for a successful response from a target. public let matcher: Matcher? /// The destination for health checks on the targets. If the protocol version is HTTP/1.1 or HTTP/2, specify a valid URI (for example, /path?query). The default path is /. Health checks are not supported if the protocol version is gRPC, however, you can choose HTTP/1.1 or HTTP/2 and specify a valid URI. public let path: String? @@ -2472,7 +2476,7 @@ extension VPCLattice { } public struct ListAccessLogSubscriptionsResponse: AWSDecodableShape { - /// The access log subscriptions. + /// Information about the access log subscriptions. public let items: [AccessLogSubscriptionSummary] /// A pagination token for the next page of results. public let nextToken: String? @@ -2791,7 +2795,7 @@ extension VPCLattice { } public struct ListServicesResponse: AWSDecodableShape { - /// The services. + /// Information about the services. public let items: [ServiceSummary]? /// If there are additional results, a pagination token for the next page of results. public let nextToken: String? @@ -2830,7 +2834,7 @@ extension VPCLattice { } public struct ListTagsForResourceResponse: AWSDecodableShape { - /// The tags. + /// Information about the tags. public let tags: [String: String]? public init(tags: [String: String]? = nil) { @@ -2849,7 +2853,7 @@ extension VPCLattice { public let nextToken: String? /// The target group type. public let targetGroupType: TargetGroupType? - /// The ID or Amazon Resource Name (ARN) of the service. + /// The ID or Amazon Resource Name (ARN) of the VPC. public let vpcIdentifier: String? public init(maxResults: Int? = nil, nextToken: String? = nil, targetGroupType: TargetGroupType? = nil, vpcIdentifier: String? = nil) { @@ -2905,7 +2909,7 @@ extension VPCLattice { public let nextToken: String? /// The ID or Amazon Resource Name (ARN) of the target group. public let targetGroupIdentifier: String - /// The targets to list. + /// The targets. public let targets: [Target]? public init(maxResults: Int? = nil, nextToken: String? = nil, targetGroupIdentifier: String, targets: [Target]? = nil) { @@ -2999,7 +3003,7 @@ extension VPCLattice { } public struct PathMatch: AWSEncodableShape & AWSDecodableShape { - /// Indicates whether the match is case sensitive. Defaults to false. + /// Indicates whether the match is case sensitive. public let caseSensitive: Bool? /// The type of path match. public let match: PathMatchType @@ -3020,7 +3024,7 @@ extension VPCLattice { } public struct PutAuthPolicyRequest: AWSEncodableShape { - /// The auth policy. + /// The auth policy. The policy string in JSON must not contain newlines or blank lines. public let policy: String /// The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created. public let resourceIdentifier: String @@ -3050,9 +3054,9 @@ extension VPCLattice { } public struct PutAuthPolicyResponse: AWSDecodableShape { - /// The auth policy. + /// The auth policy. The policy string in JSON must not contain newlines or blank lines. public let policy: String? - /// The state of the auth policy. The auth policy is only active when the auth type is set to Amazon Web Services_IAM. If you provide a policy, then authentication and authorization decisions are made based on this policy and the client's IAM policy. If the Auth type is NONE, then, any auth policy you provide will remain inactive. For more information, see Create a service network in the Amazon VPC Lattice User Guide. + /// The state of the auth policy. The auth policy is only active when the auth type is set to AWS_IAM. If you provide a policy, then authentication and authorization decisions are made based on this policy and the client's IAM policy. If the Auth type is NONE, then, any auth policy that you provide remains inactive. For more information, see Create a service network in the Amazon VPC Lattice User Guide. public let state: AuthPolicyState? public init(policy: String? = nil, state: AuthPolicyState? = nil) { @@ -3067,7 +3071,7 @@ extension VPCLattice { } public struct PutResourcePolicyRequest: AWSEncodableShape { - /// An IAM policy. + /// An IAM policy. The policy string in JSON must not contain newlines or blank lines. public let policy: String /// The ID or Amazon Resource Name (ARN) of the service network or service for which the policy is created. public let resourceArn: String @@ -3159,14 +3163,14 @@ extension VPCLattice { public var createdAt: Date? /// The ID of the rule. public let id: String? - /// Indicates whether this is the default rule. Listener rules are created when you create a listener. Each listener has a default rule for checking connection requests. + /// Indicates whether this is the default listener rule. public let isDefault: Bool? /// The date and time that the listener rule was last updated, specified in ISO-8601 format. @OptionalCustomCoding public var lastUpdatedAt: Date? /// The name of the rule. public let name: String? - /// The priority of the rule. + /// The priority of the rule. public let priority: Int? public init(arn: String? = nil, createdAt: Date? = nil, id: String? = nil, isDefault: Bool? = nil, lastUpdatedAt: Date? = nil, name: String? = nil, priority: Int? = nil) { @@ -3247,7 +3251,7 @@ extension VPCLattice { } public struct RuleUpdateSuccess: AWSDecodableShape { - /// The action for the default rule. + /// The action for the rule. public let action: RuleAction? /// The Amazon Resource Name (ARN) of the listener. public let arn: String? @@ -3291,9 +3295,9 @@ extension VPCLattice { public var createdAt: Date? /// The account that created the association. public let createdBy: String? - /// The custom domain name of the service. + /// The custom domain name of the service. public let customDomainName: String? - /// DNS information about the service. + /// The DNS information. public let dnsEntry: DnsEntry? /// The ID of the association. public let id: String? @@ -3441,9 +3445,9 @@ extension VPCLattice { /// The date and time that the service was created, specified in ISO-8601 format. @OptionalCustomCoding public var createdAt: Date? - /// The custom domain name of the service. + /// The custom domain name of the service. public let customDomainName: String? - /// DNS information about the service. + /// The DNS information. public let dnsEntry: DnsEntry? /// The ID of the service. public let id: String? @@ -3517,7 +3521,7 @@ extension VPCLattice { } public struct Target: AWSEncodableShape & AWSDecodableShape { - /// The ID of the target. If the target type of the target group is INSTANCE, this is an instance ID. If the target type is IP , this is an IP address. If the target type is LAMBDA, this is the ARN of the Lambda function. If the target type is ALB, this is the ARN of the Application Load Balancer. + /// The ID of the target. If the target group type is INSTANCE, this is an instance ID. If the target group type is IP, this is an IP address. If the target group type is LAMBDA, this is the ARN of a Lambda function. If the target group type is ALB, this is the ARN of an Application Load Balancer. public let id: String /// The port on which the target is listening. For HTTP, the default is 80. For HTTPS, the default is 443. public let port: Int? @@ -3543,7 +3547,7 @@ extension VPCLattice { public let failureCode: String? /// The failure message. public let failureMessage: String? - /// The ID of the target. If the target type of the target group is INSTANCE, this is an instance ID. If the target type is IP , this is an IP address. If the target type is LAMBDA, this is the ARN of the Lambda function. If the target type is ALB, this is the ARN of the Application Load Balancer. + /// The ID of the target. If the target group type is INSTANCE, this is an instance ID. If the target group type is IP, this is an IP address. If the target group type is LAMBDA, this is the ARN of a Lambda function. If the target group type is ALB, this is the ARN of an Application Load Balancer. public let id: String? /// The port on which the target is listening. This parameter doesn't apply if the target is a Lambda function. public let port: Int? @@ -3564,19 +3568,19 @@ extension VPCLattice { } public struct TargetGroupConfig: AWSEncodableShape & AWSDecodableShape { - /// The health check configuration. + /// The health check configuration. Not supported if the target group type is LAMBDA or ALB. public let healthCheck: HealthCheckConfig? - /// The type of IP address used for the target group. The possible values are ipv4 and ipv6. This is an optional parameter. If not specified, the IP address type defaults to ipv4. + /// The type of IP address used for the target group. Supported only if the target group type is IP. The default is IPV4. public let ipAddressType: IpAddressType? - /// Lambda event structure version + /// The version of the event structure that your Lambda function receives. Supported only if the target group type is LAMBDA. The default is V1. public let lambdaEventStructureVersion: LambdaEventStructureVersion? - /// The port on which the targets are listening. For HTTP, the default is 80. For HTTPS, the default is 443 + /// The port on which the targets are listening. For HTTP, the default is 80. For HTTPS, the default is 443. Not supported if the target group type is LAMBDA. public let port: Int? - /// The protocol to use for routing traffic to the targets. Default is the protocol of a target group. + /// The protocol to use for routing traffic to the targets. The default is the protocol of the target group. Not supported if the target group type is LAMBDA. public let `protocol`: TargetGroupProtocol? - /// The protocol version. Default value is HTTP1. + /// The protocol version. The default is HTTP1. Not supported if the target group type is LAMBDA. public let protocolVersion: TargetGroupProtocolVersion? - /// The ID of the VPC. + /// The ID of the VPC. Not supported if the target group type is LAMBDA. public let vpcIdentifier: String? public init(healthCheck: HealthCheckConfig? = nil, ipAddressType: IpAddressType? = nil, lambdaEventStructureVersion: LambdaEventStructureVersion? = nil, port: Int? = nil, protocol: TargetGroupProtocol? = nil, protocolVersion: TargetGroupProtocolVersion? = nil, vpcIdentifier: String? = nil) { @@ -3617,9 +3621,9 @@ extension VPCLattice { public var createdAt: Date? /// The ID of the target group. public let id: String? - /// The type of IP address used for the target group. The possible values are ipv4 and ipv6. This is an optional parameter. If not specified, the IP address type defaults to ipv4. + /// The type of IP address used for the target group. The possible values are IPV4 and IPV6. This is an optional parameter. If not specified, the default is IPV4. public let ipAddressType: IpAddressType? - /// Lambda event structure version + /// The version of the event structure that your Lambda function receives. Supported only if the target group type is LAMBDA. public let lambdaEventStructureVersion: LambdaEventStructureVersion? /// The date and time that the target group was last updated, specified in ISO-8601 format. @OptionalCustomCoding @@ -3630,7 +3634,7 @@ extension VPCLattice { public let port: Int? /// The protocol of the target group. public let `protocol`: TargetGroupProtocol? - /// The list of Amazon Resource Names (ARNs) of the service. + /// The Amazon Resource Names (ARNs) of the service. public let serviceArns: [String]? /// The status. public let status: TargetGroupStatus? @@ -3673,13 +3677,13 @@ extension VPCLattice { } public struct TargetSummary: AWSDecodableShape { - /// The ID of the target. If the target type of the target group is INSTANCE, this is an instance ID. If the target type is IP , this is an IP address. If the target type is LAMBDA, this is the ARN of the Lambda function. If the target type is ALB, this is the ARN of the Application Load Balancer. + /// The ID of the target. If the target group type is INSTANCE, this is an instance ID. If the target group type is IP, this is an IP address. If the target group type is LAMBDA, this is the ARN of a Lambda function. If the target type is ALB, this is the ARN of an Application Load Balancer. public let id: String? /// The port on which the target is listening. public let port: Int? /// The code for why the target status is what it is. public let reasonCode: String? - /// The status of the target. Draining: The target is being deregistered. No new connections will be sent to this target while current connections are being drained. Default draining time is 5 minutes. Unavailable: Health checks are unavailable for the target group. Healthy: The target is healthy. Unhealthy: The target is unhealthy. Initial: Initial health checks on the target are being performed. Unused: Target group is not used in a service. + /// The status of the target. DRAINING: The target is being deregistered. No new connections are sent to this target while current connections are being drained. The default draining time is 5 minutes. UNAVAILABLE: Health checks are unavailable for the target group. HEALTHY: The target is healthy. UNHEALTHY: The target is unhealthy. INITIAL: Initial health checks on the target are being performed. UNUSED: Target group is not used in a service. public let status: TargetStatus? public init(id: String? = nil, port: Int? = nil, reasonCode: String? = nil, status: TargetStatus? = nil) { @@ -4020,7 +4024,7 @@ extension VPCLattice { } public struct UpdateServiceNetworkVpcAssociationRequest: AWSEncodableShape { - /// The IDs of the security groups. Once you add a security group, it cannot be removed. + /// The IDs of the security groups. public let securityGroupIds: [String] /// The ID or Amazon Resource Name (ARN) of the association. public let serviceNetworkVpcAssociationIdentifier: String @@ -4217,7 +4221,7 @@ extension VPCLattice { public struct WeightedTargetGroup: AWSEncodableShape & AWSDecodableShape { /// The ID or Amazon Resource Name (ARN) of the target group. public let targetGroupIdentifier: String - /// Only required if you specify multiple target groups for a forward action. The "weight" determines how requests are distributed to the target group. For example, if you specify two target groups, each with a weight of 10, each target group receives half the requests. If you specify two target groups, one with a weight of 10 and the other with a weight of 20, the target group with a weight of 20 receives twice as many requests as the other target group. If there's only one target group specified, then the default value is 100. + /// Only required if you specify multiple target groups for a forward action. The weight determines how requests are distributed to the target group. For example, if you specify two target groups, each with a weight of 10, each target group receives half the requests. If you specify two target groups, one with a weight of 10 and the other with a weight of 20, the target group with a weight of 20 receives twice as many requests as the other target group. If there's only one target group specified, then the default value is 100. public let weight: Int? public init(targetGroupIdentifier: String, weight: Int? = nil) { diff --git a/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_api.swift b/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_api.swift index 682a3eebfa..7029ba8b58 100644 --- a/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_api.swift +++ b/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_api.swift @@ -114,7 +114,7 @@ public struct VerifiedPermissions: AWSService { ) } - /// Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP). After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken operation. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens and access tokens, and Verified Permissions can use either or both. Any combination of identity and access tokens results in the same Cedar principal. Verified Permissions automatically translates the information about the identities into the standard Cedar attributes that can be evaluated by your policies. Because the Amazon Cognito identity and access tokens can contain different information, the tokens you choose to use determine which principal attributes are available to access when evaluating Cedar policies. If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire. To reference a user from this identity source in your Cedar policies, use the following syntax. IdentityType::"<CognitoUserPoolIdentifier>|<CognitoClientId> Where IdentityType is the string that you provide to the PrincipalEntityType parameter for this operation. The CognitoUserPoolId and CognitoClientId are defined by the Amazon Cognito user pool. Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations. + /// Adds an identity source to a policy store–an Amazon Cognito user pool or OpenID Connect (OIDC) identity provider (IdP). After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken or BatchIsAuthorizedWithToken API operations. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Identity sources provide identity (ID) tokens and access tokens. Verified Permissions derives information about your user and session from token claims. Access tokens provide action context to your policies, and ID tokens provide principal Attributes. Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store To reference a user from this identity source in your Cedar policies, refer to the following syntax examples. Amazon Cognito user pool: Namespace::[Entity type]::[User pool ID]|[user principal attribute], for example MyCorp::User::us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111. OpenID Connect (OIDC) provider: Namespace::[Entity type]::[principalIdClaim]|[user principal attribute], for example MyCorp::User::MyOIDCProvider|a1b2c3d4-5678-90ab-cdef-EXAMPLE22222. Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations. @Sendable public func createIdentitySource(_ input: CreateIdentitySourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateIdentitySourceOutput { return try await self.client.execute( @@ -296,7 +296,7 @@ public struct VerifiedPermissions: AWSService { ) } - /// Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source in the form of an identity token formatted as a JSON web token (JWT). The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision. At this time, Verified Permissions accepts tokens from only Amazon Cognito. Verified Permissions validates each token that is specified in a request by checking its expiration date and its signature. If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire. + /// Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source in the form of an identity token formatted as a JSON web token (JWT). The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision. At this time, Verified Permissions accepts tokens from only Amazon Cognito. Verified Permissions validates each token that is specified in a request by checking its expiration date and its signature. Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store @Sendable public func isAuthorizedWithToken(_ input: IsAuthorizedWithTokenInput, logger: Logger = AWSClient.loggingDisabled) async throws -> IsAuthorizedWithTokenOutput { return try await self.client.execute( @@ -374,7 +374,7 @@ public struct VerifiedPermissions: AWSService { ) } - /// Updates the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type. Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations. + /// Updates the specified identity source to use a new identity provider (IdP), or to change the mapping of identities from the IdP to a different principal entity type. Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations. @Sendable public func updateIdentitySource(_ input: UpdateIdentitySourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateIdentitySourceOutput { return try await self.client.execute( diff --git a/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_shapes.swift b/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_shapes.swift index a801656baa..159a953207 100644 --- a/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_shapes.swift +++ b/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_shapes.swift @@ -37,6 +37,12 @@ extension VerifiedPermissions { public var description: String { return self.rawValue } } + public enum PolicyEffect: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case forbid = "Forbid" + case permit = "Permit" + public var description: String { return self.rawValue } + } + public enum PolicyType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case `static` = "STATIC" case templateLinked = "TEMPLATE_LINKED" @@ -139,6 +145,99 @@ extension VerifiedPermissions { } } + public enum Configuration: AWSEncodableShape, Sendable { + /// Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool and one or more application client IDs. Example: "configuration":{"cognitoUserPoolConfiguration":{"userPoolArn":"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5","clientIds": ["a1b2c3d4e5f6g7h8i9j0kalbmc"],"groupConfiguration": {"groupEntityType": "MyCorp::Group"}}} + case cognitoUserPoolConfiguration(CognitoUserPoolConfiguration) + /// Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details. Example:"configuration":{"openIdConnectConfiguration":{"issuer":"https://auth.example.com","tokenSelection":{"accessTokenOnly":{"audiences":["https://myapp.example.com","https://myapp2.example.com"],"principalIdClaim":"sub"}},"entityIdPrefix":"MyOIDCProvider","groupConfiguration":{"groupClaim":"groups","groupEntityType":"MyCorp::UserGroup"}}} + case openIdConnectConfiguration(OpenIdConnectConfiguration) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .cognitoUserPoolConfiguration(let value): + try container.encode(value, forKey: .cognitoUserPoolConfiguration) + case .openIdConnectConfiguration(let value): + try container.encode(value, forKey: .openIdConnectConfiguration) + } + } + + public func validate(name: String) throws { + switch self { + case .cognitoUserPoolConfiguration(let value): + try value.validate(name: "\(name).cognitoUserPoolConfiguration") + case .openIdConnectConfiguration(let value): + try value.validate(name: "\(name).openIdConnectConfiguration") + } + } + + private enum CodingKeys: String, CodingKey { + case cognitoUserPoolConfiguration = "cognitoUserPoolConfiguration" + case openIdConnectConfiguration = "openIdConnectConfiguration" + } + } + + public enum ConfigurationDetail: AWSDecodableShape, Sendable { + /// Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool, the policy store entity that you want to assign to user groups, and one or more application client IDs. Example: "configuration":{"cognitoUserPoolConfiguration":{"userPoolArn":"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5","clientIds": ["a1b2c3d4e5f6g7h8i9j0kalbmc"],"groupConfiguration": {"groupEntityType": "MyCorp::Group"}}} + case cognitoUserPoolConfiguration(CognitoUserPoolConfigurationDetail) + /// Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details. Example:"configuration":{"openIdConnectConfiguration":{"issuer":"https://auth.example.com","tokenSelection":{"accessTokenOnly":{"audiences":["https://myapp.example.com","https://myapp2.example.com"],"principalIdClaim":"sub"}},"entityIdPrefix":"MyOIDCProvider","groupConfiguration":{"groupClaim":"groups","groupEntityType":"MyCorp::UserGroup"}}} + case openIdConnectConfiguration(OpenIdConnectConfigurationDetail) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .cognitoUserPoolConfiguration: + let value = try container.decode(CognitoUserPoolConfigurationDetail.self, forKey: .cognitoUserPoolConfiguration) + self = .cognitoUserPoolConfiguration(value) + case .openIdConnectConfiguration: + let value = try container.decode(OpenIdConnectConfigurationDetail.self, forKey: .openIdConnectConfiguration) + self = .openIdConnectConfiguration(value) + } + } + + private enum CodingKeys: String, CodingKey { + case cognitoUserPoolConfiguration = "cognitoUserPoolConfiguration" + case openIdConnectConfiguration = "openIdConnectConfiguration" + } + } + + public enum ConfigurationItem: AWSDecodableShape, Sendable { + /// Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool, the policy store entity that you want to assign to user groups, and one or more application client IDs. Example: "configuration":{"cognitoUserPoolConfiguration":{"userPoolArn":"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5","clientIds": ["a1b2c3d4e5f6g7h8i9j0kalbmc"],"groupConfiguration": {"groupEntityType": "MyCorp::Group"}}} + case cognitoUserPoolConfiguration(CognitoUserPoolConfigurationItem) + /// Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details. Example:"configuration":{"openIdConnectConfiguration":{"issuer":"https://auth.example.com","tokenSelection":{"accessTokenOnly":{"audiences":["https://myapp.example.com","https://myapp2.example.com"],"principalIdClaim":"sub"}},"entityIdPrefix":"MyOIDCProvider","groupConfiguration":{"groupClaim":"groups","groupEntityType":"MyCorp::UserGroup"}}} + case openIdConnectConfiguration(OpenIdConnectConfigurationItem) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .cognitoUserPoolConfiguration: + let value = try container.decode(CognitoUserPoolConfigurationItem.self, forKey: .cognitoUserPoolConfiguration) + self = .cognitoUserPoolConfiguration(value) + case .openIdConnectConfiguration: + let value = try container.decode(OpenIdConnectConfigurationItem.self, forKey: .openIdConnectConfiguration) + self = .openIdConnectConfiguration(value) + } + } + + private enum CodingKeys: String, CodingKey { + case cognitoUserPoolConfiguration = "cognitoUserPoolConfiguration" + case openIdConnectConfiguration = "openIdConnectConfiguration" + } + } + public enum EntityReference: AWSEncodableShape, Sendable { /// The identifier of the entity. It can consist of either an EntityType and EntityId, a principal, or a resource. case identifier(EntityIdentifier) @@ -170,6 +269,99 @@ extension VerifiedPermissions { } } + public enum OpenIdConnectTokenSelection: AWSEncodableShape, Sendable { + /// The OIDC configuration for processing access tokens. Contains allowed audience claims, for example https://auth.example.com, and the claim that you want to map to the principal, for example sub. + case accessTokenOnly(OpenIdConnectAccessTokenConfiguration) + /// The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID claims, for example 1example23456789, and the claim that you want to map to the principal, for example sub. + case identityTokenOnly(OpenIdConnectIdentityTokenConfiguration) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .accessTokenOnly(let value): + try container.encode(value, forKey: .accessTokenOnly) + case .identityTokenOnly(let value): + try container.encode(value, forKey: .identityTokenOnly) + } + } + + public func validate(name: String) throws { + switch self { + case .accessTokenOnly(let value): + try value.validate(name: "\(name).accessTokenOnly") + case .identityTokenOnly(let value): + try value.validate(name: "\(name).identityTokenOnly") + } + } + + private enum CodingKeys: String, CodingKey { + case accessTokenOnly = "accessTokenOnly" + case identityTokenOnly = "identityTokenOnly" + } + } + + public enum OpenIdConnectTokenSelectionDetail: AWSDecodableShape, Sendable { + /// The OIDC configuration for processing access tokens. Contains allowed audience claims, for example https://auth.example.com, and the claim that you want to map to the principal, for example sub. + case accessTokenOnly(OpenIdConnectAccessTokenConfigurationDetail) + /// The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID claims, for example 1example23456789, and the claim that you want to map to the principal, for example sub. + case identityTokenOnly(OpenIdConnectIdentityTokenConfigurationDetail) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .accessTokenOnly: + let value = try container.decode(OpenIdConnectAccessTokenConfigurationDetail.self, forKey: .accessTokenOnly) + self = .accessTokenOnly(value) + case .identityTokenOnly: + let value = try container.decode(OpenIdConnectIdentityTokenConfigurationDetail.self, forKey: .identityTokenOnly) + self = .identityTokenOnly(value) + } + } + + private enum CodingKeys: String, CodingKey { + case accessTokenOnly = "accessTokenOnly" + case identityTokenOnly = "identityTokenOnly" + } + } + + public enum OpenIdConnectTokenSelectionItem: AWSDecodableShape, Sendable { + /// The OIDC configuration for processing access tokens. Contains allowed audience claims, for example https://auth.example.com, and the claim that you want to map to the principal, for example sub. + case accessTokenOnly(OpenIdConnectAccessTokenConfigurationItem) + /// The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID claims, for example 1example23456789, and the claim that you want to map to the principal, for example sub. + case identityTokenOnly(OpenIdConnectIdentityTokenConfigurationItem) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .accessTokenOnly: + let value = try container.decode(OpenIdConnectAccessTokenConfigurationItem.self, forKey: .accessTokenOnly) + self = .accessTokenOnly(value) + case .identityTokenOnly: + let value = try container.decode(OpenIdConnectIdentityTokenConfigurationItem.self, forKey: .identityTokenOnly) + self = .identityTokenOnly(value) + } + } + + private enum CodingKeys: String, CodingKey { + case accessTokenOnly = "accessTokenOnly" + case identityTokenOnly = "identityTokenOnly" + } + } + public enum PolicyDefinition: AWSEncodableShape, Sendable { /// A structure that describes a static policy. An static policy doesn't use a template or allow placeholders for entities. case `static`(StaticPolicyDefinition) @@ -263,6 +455,68 @@ extension VerifiedPermissions { } } + public enum UpdateConfiguration: AWSEncodableShape, Sendable { + /// Contains configuration details of a Amazon Cognito user pool. + case cognitoUserPoolConfiguration(UpdateCognitoUserPoolConfiguration) + /// Contains configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. It specifies the issuer URL, token type that you want to use, and policy store entity details. + case openIdConnectConfiguration(UpdateOpenIdConnectConfiguration) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .cognitoUserPoolConfiguration(let value): + try container.encode(value, forKey: .cognitoUserPoolConfiguration) + case .openIdConnectConfiguration(let value): + try container.encode(value, forKey: .openIdConnectConfiguration) + } + } + + public func validate(name: String) throws { + switch self { + case .cognitoUserPoolConfiguration(let value): + try value.validate(name: "\(name).cognitoUserPoolConfiguration") + case .openIdConnectConfiguration(let value): + try value.validate(name: "\(name).openIdConnectConfiguration") + } + } + + private enum CodingKeys: String, CodingKey { + case cognitoUserPoolConfiguration = "cognitoUserPoolConfiguration" + case openIdConnectConfiguration = "openIdConnectConfiguration" + } + } + + public enum UpdateOpenIdConnectTokenSelection: AWSEncodableShape, Sendable { + /// The OIDC configuration for processing access tokens. Contains allowed audience claims, for example https://auth.example.com, and the claim that you want to map to the principal, for example sub. + case accessTokenOnly(UpdateOpenIdConnectAccessTokenConfiguration) + /// The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID claims, for example 1example23456789, and the claim that you want to map to the principal, for example sub. + case identityTokenOnly(UpdateOpenIdConnectIdentityTokenConfiguration) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .accessTokenOnly(let value): + try container.encode(value, forKey: .accessTokenOnly) + case .identityTokenOnly(let value): + try container.encode(value, forKey: .identityTokenOnly) + } + } + + public func validate(name: String) throws { + switch self { + case .accessTokenOnly(let value): + try value.validate(name: "\(name).accessTokenOnly") + case .identityTokenOnly(let value): + try value.validate(name: "\(name).identityTokenOnly") + } + } + + private enum CodingKeys: String, CodingKey { + case accessTokenOnly = "accessTokenOnly" + case identityTokenOnly = "identityTokenOnly" + } + } + // MARK: Shapes public struct ActionIdentifier: AWSEncodableShape & AWSDecodableShape { @@ -556,7 +810,7 @@ extension VerifiedPermissions { public struct CognitoUserPoolConfiguration: AWSEncodableShape { /// The unique application client IDs that are associated with the specified Amazon Cognito user pool. Example: "ClientIds": ["&ExampleCogClientId;"] public let clientIds: [String]? - /// The configuration of the user groups from an Amazon Cognito user pool identity source. + /// The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source. public let groupConfiguration: CognitoGroupConfiguration? /// The Amazon Resource Name (ARN) of the Amazon Cognito user pool that contains the identities to be authorized. Example: "UserPoolArn": "arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5" public let userPoolArn: String @@ -590,7 +844,7 @@ extension VerifiedPermissions { public struct CognitoUserPoolConfigurationDetail: AWSDecodableShape { /// The unique application client IDs that are associated with the specified Amazon Cognito user pool. Example: "clientIds": ["&ExampleCogClientId;"] public let clientIds: [String] - /// The configuration of the user groups from an Amazon Cognito user pool identity source. + /// The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source. public let groupConfiguration: CognitoGroupConfigurationDetail? /// The OpenID Connect (OIDC) issuer ID of the Amazon Cognito user pool that contains the identities to be authorized. Example: "issuer": "https://cognito-idp.us-east-1.amazonaws.com/us-east-1_1a2b3c4d5" public let issuer: String @@ -615,7 +869,7 @@ extension VerifiedPermissions { public struct CognitoUserPoolConfigurationItem: AWSDecodableShape { /// The unique application client IDs that are associated with the specified Amazon Cognito user pool. Example: "clientIds": ["&ExampleCogClientId;"] public let clientIds: [String] - /// The configuration of the user groups from an Amazon Cognito user pool identity source. + /// The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source. public let groupConfiguration: CognitoGroupConfigurationItem? /// The OpenID Connect (OIDC) issuer ID of the Amazon Cognito user pool that contains the identities to be authorized. Example: "issuer": "https://cognito-idp.us-east-1.amazonaws.com/us-east-1_1a2b3c4d5" public let issuer: String @@ -640,7 +894,7 @@ extension VerifiedPermissions { public struct CreateIdentitySourceInput: AWSEncodableShape { /// Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value.. If you don't provide this value, then Amazon Web Services generates a random one for you. If you retry the operation with the same ClientToken, but with different parameters, the retry fails with an ConflictException error. Verified Permissions recognizes a ClientToken for eight hours. After eight hours, the next request with the same parameters performs the operation again regardless of the value of ClientToken. public let clientToken: String? - /// Specifies the details required to communicate with the identity provider (IdP) associated with this identity source. At this time, the only valid member of this structure is a Amazon Cognito user pool configuration. You must specify a UserPoolArn, and optionally, a ClientId. + /// Specifies the details required to communicate with the identity provider (IdP) associated with this identity source. public let configuration: Configuration /// Specifies the ID of the policy store in which you want to store this identity source. Only policies and requests made using this policy store can reference identities from the identity provider configured in the new identity source. public let policyStoreId: String @@ -734,9 +988,16 @@ extension VerifiedPermissions { } public struct CreatePolicyOutput: AWSDecodableShape { + /// The action that a policy permits or forbids. For example, + /// {"actions": [{"actionId": "ViewPhoto", "actionType": "PhotoFlash::Action"}, {"entityID": "SharePhoto", + /// "entityType": "PhotoFlash::Action"}]}. + public let actions: [ActionIdentifier]? /// The date and time the policy was originally created. @CustomCoding public var createdDate: Date + /// The effect of the decision that a policy returns to an authorization + /// request. For example, "effect": "Permit". + public let effect: PolicyEffect? /// The date and time the policy was last updated. @CustomCoding public var lastUpdatedDate: Date @@ -751,8 +1012,10 @@ extension VerifiedPermissions { /// The resource specified in the new policy's scope. This response element isn't present when the resource isn't specified in the policy content. public let resource: EntityIdentifier? - public init(createdDate: Date, lastUpdatedDate: Date, policyId: String, policyStoreId: String, policyType: PolicyType, principal: EntityIdentifier? = nil, resource: EntityIdentifier? = nil) { + public init(actions: [ActionIdentifier]? = nil, createdDate: Date, effect: PolicyEffect? = nil, lastUpdatedDate: Date, policyId: String, policyStoreId: String, policyType: PolicyType, principal: EntityIdentifier? = nil, resource: EntityIdentifier? = nil) { + self.actions = actions self.createdDate = createdDate + self.effect = effect self.lastUpdatedDate = lastUpdatedDate self.policyId = policyId self.policyStoreId = policyStoreId @@ -762,7 +1025,9 @@ extension VerifiedPermissions { } private enum CodingKeys: String, CodingKey { + case actions = "actions" case createdDate = "createdDate" + case effect = "effect" case lastUpdatedDate = "lastUpdatedDate" case policyId = "policyId" case policyStoreId = "policyStoreId" @@ -1048,7 +1313,7 @@ extension VerifiedPermissions { public let attributes: [String: AttributeValue]? /// The identifier of the entity. public let identifier: EntityIdentifier - /// The parents in the hierarchy that contains the entity. + /// The parent entities in the hierarchy that contains the entity. A principal or resource entity can be defined with at most 99 transitive parents per authorization request. A transitive parent is an entity in the hierarchy of entities including all direct parents, and parents of parents. For example, a user can be a member of 91 groups if one of those groups is a member of eight groups, for a total of 100: one entity, 91 entity parents, and eight parents of parents. public let parents: [EntityIdentifier]? public init(attributes: [String: AttributeValue]? = nil, identifier: EntityIdentifier, parents: [EntityIdentifier]? = nil) { @@ -1065,7 +1330,6 @@ extension VerifiedPermissions { try self.parents?.forEach { try $0.validate(name: "\(name).parents[]") } - try self.validate(self.parents, name: "parents", parent: name, max: 100) } private enum CodingKeys: String, CodingKey { @@ -1191,11 +1455,18 @@ extension VerifiedPermissions { } public struct GetPolicyOutput: AWSDecodableShape { + /// The action that a policy permits or forbids. For example, + /// {"actions": [{"actionId": "ViewPhoto", "actionType": "PhotoFlash::Action"}, {"entityID": "SharePhoto", + /// "entityType": "PhotoFlash::Action"}]}. + public let actions: [ActionIdentifier]? /// The date and time that the policy was originally created. @CustomCoding public var createdDate: Date /// The definition of the requested policy. public let definition: PolicyDefinitionDetail + /// The effect of the decision that a policy returns to an authorization + /// request. For example, "effect": "Permit". + public let effect: PolicyEffect? /// The date and time that the policy was last updated. @CustomCoding public var lastUpdatedDate: Date @@ -1210,9 +1481,11 @@ extension VerifiedPermissions { /// The resource specified in the policy's scope. This element isn't included in the response when Resource isn't present in the policy content. public let resource: EntityIdentifier? - public init(createdDate: Date, definition: PolicyDefinitionDetail, lastUpdatedDate: Date, policyId: String, policyStoreId: String, policyType: PolicyType, principal: EntityIdentifier? = nil, resource: EntityIdentifier? = nil) { + public init(actions: [ActionIdentifier]? = nil, createdDate: Date, definition: PolicyDefinitionDetail, effect: PolicyEffect? = nil, lastUpdatedDate: Date, policyId: String, policyStoreId: String, policyType: PolicyType, principal: EntityIdentifier? = nil, resource: EntityIdentifier? = nil) { + self.actions = actions self.createdDate = createdDate self.definition = definition + self.effect = effect self.lastUpdatedDate = lastUpdatedDate self.policyId = policyId self.policyStoreId = policyStoreId @@ -1222,8 +1495,10 @@ extension VerifiedPermissions { } private enum CodingKeys: String, CodingKey { + case actions = "actions" case createdDate = "createdDate" case definition = "definition" + case effect = "effect" case lastUpdatedDate = "lastUpdatedDate" case policyId = "policyId" case policyStoreId = "policyStoreId" @@ -1679,7 +1954,7 @@ extension VerifiedPermissions { public struct ListIdentitySourcesInput: AWSEncodableShape { /// Specifies characteristics of an identity source that you can use to limit the output to matching identity sources. public let filters: [IdentitySourceFilter]? - /// Specifies the total number of results that you want included in each response. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next set of results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. If you do not specify this parameter, the operation defaults to 10 identity sources per response. You can specify a maximum of 200 identity sources per response. + /// Specifies the total number of results that you want included in each response. If additional items exist beyond the number you specify, the NextToken response element is returned with a value (not null). Include the specified value as the NextToken request parameter in the next call to the operation to get the next set of results. Note that the service might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. If you do not specify this parameter, the operation defaults to 10 identity sources per response. You can specify a maximum of 50 identity sources per response. public let maxResults: Int? /// Specifies that you want to receive the next page of results. Valid only if you received a NextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's NextToken response to request the next page of results. public let nextToken: String? @@ -1874,6 +2149,271 @@ extension VerifiedPermissions { } } + public struct OpenIdConnectAccessTokenConfiguration: AWSEncodableShape { + /// The access token aud claim values that you want to accept in your policy store. For example, https://myapp.example.com, https://myapp2.example.com. + public let audiences: [String]? + /// The claim that determines the principal in OIDC access tokens. For example, sub. + public let principalIdClaim: String? + + public init(audiences: [String]? = nil, principalIdClaim: String? = nil) { + self.audiences = audiences + self.principalIdClaim = principalIdClaim + } + + public func validate(name: String) throws { + try self.audiences?.forEach { + try validate($0, name: "audiences[]", parent: name, max: 255) + try validate($0, name: "audiences[]", parent: name, min: 1) + } + try self.validate(self.audiences, name: "audiences", parent: name, max: 255) + try self.validate(self.audiences, name: "audiences", parent: name, min: 1) + try self.validate(self.principalIdClaim, name: "principalIdClaim", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case audiences = "audiences" + case principalIdClaim = "principalIdClaim" + } + } + + public struct OpenIdConnectAccessTokenConfigurationDetail: AWSDecodableShape { + /// The access token aud claim values that you want to accept in your policy store. For example, https://myapp.example.com, https://myapp2.example.com. + public let audiences: [String]? + /// The claim that determines the principal in OIDC access tokens. For example, sub. + public let principalIdClaim: String? + + public init(audiences: [String]? = nil, principalIdClaim: String? = nil) { + self.audiences = audiences + self.principalIdClaim = principalIdClaim + } + + private enum CodingKeys: String, CodingKey { + case audiences = "audiences" + case principalIdClaim = "principalIdClaim" + } + } + + public struct OpenIdConnectAccessTokenConfigurationItem: AWSDecodableShape { + /// The access token aud claim values that you want to accept in your policy store. For example, https://myapp.example.com, https://myapp2.example.com. + public let audiences: [String]? + /// The claim that determines the principal in OIDC access tokens. For example, sub. + public let principalIdClaim: String? + + public init(audiences: [String]? = nil, principalIdClaim: String? = nil) { + self.audiences = audiences + self.principalIdClaim = principalIdClaim + } + + private enum CodingKeys: String, CodingKey { + case audiences = "audiences" + case principalIdClaim = "principalIdClaim" + } + } + + public struct OpenIdConnectConfiguration: AWSEncodableShape { + /// A descriptive string that you want to prefix to user entities from your OIDC identity provider. For example, if you set an entityIdPrefix of MyOIDCProvider, you can reference principals in your policies in the format MyCorp::User::MyOIDCProvider|Carlos. + public let entityIdPrefix: String? + /// The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup. + public let groupConfiguration: OpenIdConnectGroupConfiguration? + /// The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery endpoint at the path .well-known/openid-configuration. + public let issuer: String + /// The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source. + public let tokenSelection: OpenIdConnectTokenSelection + + public init(entityIdPrefix: String? = nil, groupConfiguration: OpenIdConnectGroupConfiguration? = nil, issuer: String, tokenSelection: OpenIdConnectTokenSelection) { + self.entityIdPrefix = entityIdPrefix + self.groupConfiguration = groupConfiguration + self.issuer = issuer + self.tokenSelection = tokenSelection + } + + public func validate(name: String) throws { + try self.validate(self.entityIdPrefix, name: "entityIdPrefix", parent: name, max: 100) + try self.validate(self.entityIdPrefix, name: "entityIdPrefix", parent: name, min: 1) + try self.groupConfiguration?.validate(name: "\(name).groupConfiguration") + try self.validate(self.issuer, name: "issuer", parent: name, max: 2048) + try self.validate(self.issuer, name: "issuer", parent: name, min: 1) + try self.validate(self.issuer, name: "issuer", parent: name, pattern: "^https://.*$") + try self.tokenSelection.validate(name: "\(name).tokenSelection") + } + + private enum CodingKeys: String, CodingKey { + case entityIdPrefix = "entityIdPrefix" + case groupConfiguration = "groupConfiguration" + case issuer = "issuer" + case tokenSelection = "tokenSelection" + } + } + + public struct OpenIdConnectConfigurationDetail: AWSDecodableShape { + /// A descriptive string that you want to prefix to user entities from your OIDC identity provider. For example, if you set an entityIdPrefix of MyOIDCProvider, you can reference principals in your policies in the format MyCorp::User::MyOIDCProvider|Carlos. + public let entityIdPrefix: String? + /// The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup. + public let groupConfiguration: OpenIdConnectGroupConfigurationDetail? + /// The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery endpoint at the path .well-known/openid-configuration. + public let issuer: String + /// The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source. + public let tokenSelection: OpenIdConnectTokenSelectionDetail + + public init(entityIdPrefix: String? = nil, groupConfiguration: OpenIdConnectGroupConfigurationDetail? = nil, issuer: String, tokenSelection: OpenIdConnectTokenSelectionDetail) { + self.entityIdPrefix = entityIdPrefix + self.groupConfiguration = groupConfiguration + self.issuer = issuer + self.tokenSelection = tokenSelection + } + + private enum CodingKeys: String, CodingKey { + case entityIdPrefix = "entityIdPrefix" + case groupConfiguration = "groupConfiguration" + case issuer = "issuer" + case tokenSelection = "tokenSelection" + } + } + + public struct OpenIdConnectConfigurationItem: AWSDecodableShape { + /// A descriptive string that you want to prefix to user entities from your OIDC identity provider. For example, if you set an entityIdPrefix of MyOIDCProvider, you can reference principals in your policies in the format MyCorp::User::MyOIDCProvider|Carlos. + public let entityIdPrefix: String? + /// The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup. + public let groupConfiguration: OpenIdConnectGroupConfigurationItem? + /// The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery endpoint at the path .well-known/openid-configuration. + public let issuer: String + /// The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source. + public let tokenSelection: OpenIdConnectTokenSelectionItem + + public init(entityIdPrefix: String? = nil, groupConfiguration: OpenIdConnectGroupConfigurationItem? = nil, issuer: String, tokenSelection: OpenIdConnectTokenSelectionItem) { + self.entityIdPrefix = entityIdPrefix + self.groupConfiguration = groupConfiguration + self.issuer = issuer + self.tokenSelection = tokenSelection + } + + private enum CodingKeys: String, CodingKey { + case entityIdPrefix = "entityIdPrefix" + case groupConfiguration = "groupConfiguration" + case issuer = "issuer" + case tokenSelection = "tokenSelection" + } + } + + public struct OpenIdConnectGroupConfiguration: AWSEncodableShape { + /// The token claim that you want Verified Permissions to interpret as group membership. For example, groups. + public let groupClaim: String + /// The policy store entity type that you want to map your users' group claim to. For example, MyCorp::UserGroup. A group entity type is an entity that can have a user entity type as a member. + public let groupEntityType: String + + public init(groupClaim: String, groupEntityType: String) { + self.groupClaim = groupClaim + self.groupEntityType = groupEntityType + } + + public func validate(name: String) throws { + try self.validate(self.groupClaim, name: "groupClaim", parent: name, min: 1) + try self.validate(self.groupEntityType, name: "groupEntityType", parent: name, max: 200) + try self.validate(self.groupEntityType, name: "groupEntityType", parent: name, min: 1) + try self.validate(self.groupEntityType, name: "groupEntityType", parent: name, pattern: "^([_a-zA-Z][_a-zA-Z0-9]*::)*[_a-zA-Z][_a-zA-Z0-9]*$") + } + + private enum CodingKeys: String, CodingKey { + case groupClaim = "groupClaim" + case groupEntityType = "groupEntityType" + } + } + + public struct OpenIdConnectGroupConfigurationDetail: AWSDecodableShape { + /// The token claim that you want Verified Permissions to interpret as group membership. For example, groups. + public let groupClaim: String + /// The policy store entity type that you want to map your users' group claim to. For example, MyCorp::UserGroup. A group entity type is an entity that can have a user entity type as a member. + public let groupEntityType: String + + public init(groupClaim: String, groupEntityType: String) { + self.groupClaim = groupClaim + self.groupEntityType = groupEntityType + } + + private enum CodingKeys: String, CodingKey { + case groupClaim = "groupClaim" + case groupEntityType = "groupEntityType" + } + } + + public struct OpenIdConnectGroupConfigurationItem: AWSDecodableShape { + /// The token claim that you want Verified Permissions to interpret as group membership. For example, groups. + public let groupClaim: String + /// The policy store entity type that you want to map your users' group claim to. For example, MyCorp::UserGroup. A group entity type is an entity that can have a user entity type as a member. + public let groupEntityType: String + + public init(groupClaim: String, groupEntityType: String) { + self.groupClaim = groupClaim + self.groupEntityType = groupEntityType + } + + private enum CodingKeys: String, CodingKey { + case groupClaim = "groupClaim" + case groupEntityType = "groupEntityType" + } + } + + public struct OpenIdConnectIdentityTokenConfiguration: AWSEncodableShape { + /// The ID token audience, or client ID, claim values that you want to accept in your policy store from an OIDC identity provider. For example, 1example23456789, 2example10111213. + public let clientIds: [String]? + /// The claim that determines the principal in OIDC access tokens. For example, sub. + public let principalIdClaim: String? + + public init(clientIds: [String]? = nil, principalIdClaim: String? = nil) { + self.clientIds = clientIds + self.principalIdClaim = principalIdClaim + } + + public func validate(name: String) throws { + try self.clientIds?.forEach { + try validate($0, name: "clientIds[]", parent: name, max: 255) + try validate($0, name: "clientIds[]", parent: name, min: 1) + try validate($0, name: "clientIds[]", parent: name, pattern: "^.*$") + } + try self.validate(self.clientIds, name: "clientIds", parent: name, max: 1000) + try self.validate(self.principalIdClaim, name: "principalIdClaim", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientIds = "clientIds" + case principalIdClaim = "principalIdClaim" + } + } + + public struct OpenIdConnectIdentityTokenConfigurationDetail: AWSDecodableShape { + /// The ID token audience, or client ID, claim values that you want to accept in your policy store from an OIDC identity provider. For example, 1example23456789, 2example10111213. + public let clientIds: [String]? + /// The claim that determines the principal in OIDC access tokens. For example, sub. + public let principalIdClaim: String? + + public init(clientIds: [String]? = nil, principalIdClaim: String? = nil) { + self.clientIds = clientIds + self.principalIdClaim = principalIdClaim + } + + private enum CodingKeys: String, CodingKey { + case clientIds = "clientIds" + case principalIdClaim = "principalIdClaim" + } + } + + public struct OpenIdConnectIdentityTokenConfigurationItem: AWSDecodableShape { + /// The ID token audience, or client ID, claim values that you want to accept in your policy store from an OIDC identity provider. For example, 1example23456789, 2example10111213. + public let clientIds: [String]? + /// The claim that determines the principal in OIDC access tokens. For example, sub. + public let principalIdClaim: String? + + public init(clientIds: [String]? = nil, principalIdClaim: String? = nil) { + self.clientIds = clientIds + self.principalIdClaim = principalIdClaim + } + + private enum CodingKeys: String, CodingKey { + case clientIds = "clientIds" + case principalIdClaim = "principalIdClaim" + } + } + public struct PolicyFilter: AWSEncodableShape { /// Filters the output to only template-linked policies that were instantiated from the specified policy template. public let policyTemplateId: String? @@ -1908,11 +2448,18 @@ extension VerifiedPermissions { } public struct PolicyItem: AWSDecodableShape { + /// The action that a policy permits or forbids. For example, + /// {"actions": [{"actionId": "ViewPhoto", "actionType": "PhotoFlash::Action"}, {"entityID": "SharePhoto", + /// "entityType": "PhotoFlash::Action"}]}. + public let actions: [ActionIdentifier]? /// The date and time the policy was created. @CustomCoding public var createdDate: Date /// The policy definition of an item in the list of policies returned. public let definition: PolicyDefinitionItem + /// The effect of the decision that a policy returns to an authorization + /// request. For example, "effect": "Permit". + public let effect: PolicyEffect? /// The date and time the policy was most recently updated. @CustomCoding public var lastUpdatedDate: Date @@ -1927,9 +2474,11 @@ extension VerifiedPermissions { /// The resource associated with the policy. public let resource: EntityIdentifier? - public init(createdDate: Date, definition: PolicyDefinitionItem, lastUpdatedDate: Date, policyId: String, policyStoreId: String, policyType: PolicyType, principal: EntityIdentifier? = nil, resource: EntityIdentifier? = nil) { + public init(actions: [ActionIdentifier]? = nil, createdDate: Date, definition: PolicyDefinitionItem, effect: PolicyEffect? = nil, lastUpdatedDate: Date, policyId: String, policyStoreId: String, policyType: PolicyType, principal: EntityIdentifier? = nil, resource: EntityIdentifier? = nil) { + self.actions = actions self.createdDate = createdDate self.definition = definition + self.effect = effect self.lastUpdatedDate = lastUpdatedDate self.policyId = policyId self.policyStoreId = policyStoreId @@ -1939,8 +2488,10 @@ extension VerifiedPermissions { } private enum CodingKeys: String, CodingKey { + case actions = "actions" case createdDate = "createdDate" case definition = "definition" + case effect = "effect" case lastUpdatedDate = "lastUpdatedDate" case policyId = "policyId" case policyStoreId = "policyStoreId" @@ -2305,6 +2856,119 @@ extension VerifiedPermissions { } } + public struct UpdateOpenIdConnectAccessTokenConfiguration: AWSEncodableShape { + /// The access token aud claim values that you want to accept in your policy store. For example, https://myapp.example.com, https://myapp2.example.com. + public let audiences: [String]? + /// The claim that determines the principal in OIDC access tokens. For example, sub. + public let principalIdClaim: String? + + public init(audiences: [String]? = nil, principalIdClaim: String? = nil) { + self.audiences = audiences + self.principalIdClaim = principalIdClaim + } + + public func validate(name: String) throws { + try self.audiences?.forEach { + try validate($0, name: "audiences[]", parent: name, max: 255) + try validate($0, name: "audiences[]", parent: name, min: 1) + } + try self.validate(self.audiences, name: "audiences", parent: name, max: 255) + try self.validate(self.audiences, name: "audiences", parent: name, min: 1) + try self.validate(self.principalIdClaim, name: "principalIdClaim", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case audiences = "audiences" + case principalIdClaim = "principalIdClaim" + } + } + + public struct UpdateOpenIdConnectConfiguration: AWSEncodableShape { + /// A descriptive string that you want to prefix to user entities from your OIDC identity provider. For example, if you set an entityIdPrefix of MyOIDCProvider, you can reference principals in your policies in the format MyCorp::User::MyOIDCProvider|Carlos. + public let entityIdPrefix: String? + /// The claim in OIDC identity provider tokens that indicates a user's group membership, and the entity type that you want to map it to. For example, this object can map the contents of a groups claim to MyCorp::UserGroup. + public let groupConfiguration: UpdateOpenIdConnectGroupConfiguration? + /// The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery endpoint at the path .well-known/openid-configuration. + public let issuer: String + /// The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source. + public let tokenSelection: UpdateOpenIdConnectTokenSelection + + public init(entityIdPrefix: String? = nil, groupConfiguration: UpdateOpenIdConnectGroupConfiguration? = nil, issuer: String, tokenSelection: UpdateOpenIdConnectTokenSelection) { + self.entityIdPrefix = entityIdPrefix + self.groupConfiguration = groupConfiguration + self.issuer = issuer + self.tokenSelection = tokenSelection + } + + public func validate(name: String) throws { + try self.validate(self.entityIdPrefix, name: "entityIdPrefix", parent: name, max: 100) + try self.validate(self.entityIdPrefix, name: "entityIdPrefix", parent: name, min: 1) + try self.groupConfiguration?.validate(name: "\(name).groupConfiguration") + try self.validate(self.issuer, name: "issuer", parent: name, max: 2048) + try self.validate(self.issuer, name: "issuer", parent: name, min: 1) + try self.validate(self.issuer, name: "issuer", parent: name, pattern: "^https://.*$") + try self.tokenSelection.validate(name: "\(name).tokenSelection") + } + + private enum CodingKeys: String, CodingKey { + case entityIdPrefix = "entityIdPrefix" + case groupConfiguration = "groupConfiguration" + case issuer = "issuer" + case tokenSelection = "tokenSelection" + } + } + + public struct UpdateOpenIdConnectGroupConfiguration: AWSEncodableShape { + /// The token claim that you want Verified Permissions to interpret as group membership. For example, groups. + public let groupClaim: String + /// The policy store entity type that you want to map your users' group claim to. For example, MyCorp::UserGroup. A group entity type is an entity that can have a user entity type as a member. + public let groupEntityType: String + + public init(groupClaim: String, groupEntityType: String) { + self.groupClaim = groupClaim + self.groupEntityType = groupEntityType + } + + public func validate(name: String) throws { + try self.validate(self.groupClaim, name: "groupClaim", parent: name, min: 1) + try self.validate(self.groupEntityType, name: "groupEntityType", parent: name, max: 200) + try self.validate(self.groupEntityType, name: "groupEntityType", parent: name, min: 1) + try self.validate(self.groupEntityType, name: "groupEntityType", parent: name, pattern: "^([_a-zA-Z][_a-zA-Z0-9]*::)*[_a-zA-Z][_a-zA-Z0-9]*$") + } + + private enum CodingKeys: String, CodingKey { + case groupClaim = "groupClaim" + case groupEntityType = "groupEntityType" + } + } + + public struct UpdateOpenIdConnectIdentityTokenConfiguration: AWSEncodableShape { + /// The ID token audience, or client ID, claim values that you want to accept in your policy store from an OIDC identity provider. For example, 1example23456789, 2example10111213. + public let clientIds: [String]? + /// The claim that determines the principal in OIDC access tokens. For example, sub. + public let principalIdClaim: String? + + public init(clientIds: [String]? = nil, principalIdClaim: String? = nil) { + self.clientIds = clientIds + self.principalIdClaim = principalIdClaim + } + + public func validate(name: String) throws { + try self.clientIds?.forEach { + try validate($0, name: "clientIds[]", parent: name, max: 255) + try validate($0, name: "clientIds[]", parent: name, min: 1) + try validate($0, name: "clientIds[]", parent: name, pattern: "^.*$") + } + try self.validate(self.clientIds, name: "clientIds", parent: name, max: 1000) + try self.validate(self.principalIdClaim, name: "principalIdClaim", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientIds = "clientIds" + case principalIdClaim = "principalIdClaim" + } + } + public struct UpdatePolicyInput: AWSEncodableShape { /// Specifies the updated policy content that you want to replace on the specified policy. The content must be valid Cedar policy language text. You can change only the following elements from the policy definition: The action referenced by the policy. Any conditional clauses, such as when or unless clauses. You can't change the following elements: Changing from static to templateLinked. Changing the effect of the policy from permit or forbid. The principal referenced by the policy. The resource referenced by the policy. public let definition: UpdatePolicyDefinition @@ -2337,9 +3001,16 @@ extension VerifiedPermissions { } public struct UpdatePolicyOutput: AWSDecodableShape { + /// The action that a policy permits or forbids. For example, + /// {"actions": [{"actionId": "ViewPhoto", "actionType": "PhotoFlash::Action"}, {"entityID": "SharePhoto", + /// "entityType": "PhotoFlash::Action"}]}. + public let actions: [ActionIdentifier]? /// The date and time that the policy was originally created. @CustomCoding public var createdDate: Date + /// The effect of the decision that a policy returns to an authorization + /// request. For example, "effect": "Permit". + public let effect: PolicyEffect? /// The date and time that the policy was most recently updated. @CustomCoding public var lastUpdatedDate: Date @@ -2354,8 +3025,10 @@ extension VerifiedPermissions { /// The resource specified in the policy's scope. This element isn't included in the response when Resource isn't present in the policy content. public let resource: EntityIdentifier? - public init(createdDate: Date, lastUpdatedDate: Date, policyId: String, policyStoreId: String, policyType: PolicyType, principal: EntityIdentifier? = nil, resource: EntityIdentifier? = nil) { + public init(actions: [ActionIdentifier]? = nil, createdDate: Date, effect: PolicyEffect? = nil, lastUpdatedDate: Date, policyId: String, policyStoreId: String, policyType: PolicyType, principal: EntityIdentifier? = nil, resource: EntityIdentifier? = nil) { + self.actions = actions self.createdDate = createdDate + self.effect = effect self.lastUpdatedDate = lastUpdatedDate self.policyId = policyId self.policyStoreId = policyStoreId @@ -2365,7 +3038,9 @@ extension VerifiedPermissions { } private enum CodingKeys: String, CodingKey { + case actions = "actions" case createdDate = "createdDate" + case effect = "effect" case lastUpdatedDate = "lastUpdatedDate" case policyId = "policyId" case policyStoreId = "policyStoreId" @@ -2530,49 +3205,6 @@ extension VerifiedPermissions { } } - public struct Configuration: AWSEncodableShape { - /// Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool and one or more application client IDs. Example: "configuration":{"cognitoUserPoolConfiguration":{"userPoolArn":"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5","clientIds": ["a1b2c3d4e5f6g7h8i9j0kalbmc"],"groupConfiguration": {"groupEntityType": "MyCorp::Group"}}} - public let cognitoUserPoolConfiguration: CognitoUserPoolConfiguration? - - public init(cognitoUserPoolConfiguration: CognitoUserPoolConfiguration? = nil) { - self.cognitoUserPoolConfiguration = cognitoUserPoolConfiguration - } - - public func validate(name: String) throws { - try self.cognitoUserPoolConfiguration?.validate(name: "\(name).cognitoUserPoolConfiguration") - } - - private enum CodingKeys: String, CodingKey { - case cognitoUserPoolConfiguration = "cognitoUserPoolConfiguration" - } - } - - public struct ConfigurationDetail: AWSDecodableShape { - /// Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool and one or more application client IDs. Example: "configuration":{"cognitoUserPoolConfiguration":{"userPoolArn":"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5","clientIds": ["a1b2c3d4e5f6g7h8i9j0kalbmc"],"groupConfiguration": {"groupEntityType": "MyCorp::Group"}}} - public let cognitoUserPoolConfiguration: CognitoUserPoolConfigurationDetail? - - public init(cognitoUserPoolConfiguration: CognitoUserPoolConfigurationDetail? = nil) { - self.cognitoUserPoolConfiguration = cognitoUserPoolConfiguration - } - - private enum CodingKeys: String, CodingKey { - case cognitoUserPoolConfiguration = "cognitoUserPoolConfiguration" - } - } - - public struct ConfigurationItem: AWSDecodableShape { - /// Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool and one or more application client IDs. Example: "configuration":{"cognitoUserPoolConfiguration":{"userPoolArn":"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5","clientIds": ["a1b2c3d4e5f6g7h8i9j0kalbmc"],"groupConfiguration": {"groupEntityType": "MyCorp::Group"}}} - public let cognitoUserPoolConfiguration: CognitoUserPoolConfigurationItem? - - public init(cognitoUserPoolConfiguration: CognitoUserPoolConfigurationItem? = nil) { - self.cognitoUserPoolConfiguration = cognitoUserPoolConfiguration - } - - private enum CodingKeys: String, CodingKey { - case cognitoUserPoolConfiguration = "cognitoUserPoolConfiguration" - } - } - public struct ContextDefinition: AWSEncodableShape & AWSDecodableShape { /// An list of attributes that are needed to successfully evaluate an authorization request. Each attribute in this array must include a map of a data type and its value. Example: "contextMap":{"<KeyName1>":{"boolean":true},"<KeyName2>":{"long":1234}} public let contextMap: [String: AttributeValue]? @@ -2629,23 +3261,6 @@ extension VerifiedPermissions { } } - public struct UpdateConfiguration: AWSEncodableShape { - /// Contains configuration details of a Amazon Cognito user pool. - public let cognitoUserPoolConfiguration: UpdateCognitoUserPoolConfiguration? - - public init(cognitoUserPoolConfiguration: UpdateCognitoUserPoolConfiguration? = nil) { - self.cognitoUserPoolConfiguration = cognitoUserPoolConfiguration - } - - public func validate(name: String) throws { - try self.cognitoUserPoolConfiguration?.validate(name: "\(name).cognitoUserPoolConfiguration") - } - - private enum CodingKeys: String, CodingKey { - case cognitoUserPoolConfiguration = "cognitoUserPoolConfiguration" - } - } - public struct UpdatePolicyDefinition: AWSEncodableShape { /// Contains details about the updates to be applied to a static policy. public let `static`: UpdateStaticPolicyDefinition? diff --git a/Sources/Soto/Services/WAFV2/WAFV2_shapes.swift b/Sources/Soto/Services/WAFV2/WAFV2_shapes.swift index be348f058c..1174c82c9f 100644 --- a/Sources/Soto/Services/WAFV2/WAFV2_shapes.swift +++ b/Sources/Soto/Services/WAFV2/WAFV2_shapes.swift @@ -374,6 +374,17 @@ extension WAFV2 { public var description: String { return self.rawValue } } + public enum LogScope: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case customer = "CUSTOMER" + case securityLake = "SECURITY_LAKE" + public var description: String { return self.rawValue } + } + + public enum LogType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case wafLogs = "WAF_LOGS" + public var description: String { return self.rawValue } + } + public enum MapMatchScope: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case all = "ALL" case key = "KEY" @@ -764,7 +775,7 @@ extension WAFV2 { public let fieldToMatch: FieldToMatch /// The area within the portion of the web request that you want WAF to search for SearchString. Valid values include the following: CONTAINS The specified part of the web request must include the value of SearchString, but the location doesn't matter. CONTAINS_WORD The specified part of the web request must include the value of SearchString, and SearchString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must be a word, which means that both of the following are true: SearchString is at the beginning of the specified part of the web request or is preceded by a character other than an alphanumeric character or underscore (_). Examples include the value of a header and ;BadBot. SearchString is at the end of the specified part of the web request or is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; and -BadBot;. EXACTLY The value of the specified part of the web request must exactly match the value of SearchString. STARTS_WITH The value of SearchString must appear at the beginning of the specified part of the web request. ENDS_WITH The value of SearchString must appear at the end of the specified part of the web request. public let positionalConstraint: PositionalConstraint - /// A string value that you want WAF to search for. WAF searches only in the part of web requests that you designate for inspection in FieldToMatch. The maximum length of the value is 200 bytes. Valid values depend on the component that you specify for inspection in FieldToMatch: Method: The HTTP method that you want WAF to search for. This indicates the type of operation specified in the request. UriPath: The value that you want WAF to search for in the URI path, for example, /images/daily-ad.jpg. JA3Fingerprint: Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to EXACTLY. You can obtain the JA3 fingerprint for client requests from the web ACL logs. + /// A string value that you want WAF to search for. WAF searches only in the part of web requests that you designate for inspection in FieldToMatch. The maximum length of the value is 200 bytes. Valid values depend on the component that you specify for inspection in FieldToMatch: Method: The HTTP method that you want WAF to search for. This indicates the type of operation specified in the request. UriPath: The value that you want WAF to search for in the URI path, for example, /images/daily-ad.jpg. JA3Fingerprint: Available for use with Amazon CloudFront distributions and Application Load Balancers. Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to EXACTLY. You can obtain the JA3 fingerprint for client requests from the web ACL logs. /// If WAF is able to calculate the fingerprint, it includes it in the logs. /// For information about the logging fields, /// see Log fields in the WAF Developer Guide. HeaderOrder: The list of header names to match for. WAF creates a string that contains the ordered list of header names, from the headers in the web request, and then matches against that string. If SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive. If you're using the WAF API Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 200 bytes. For example, suppose the value of Type is HEADER and the value of Data is User-Agent. If you want to search the User-Agent header for the value BadBot, you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90, in the value of SearchString. If you're using the CLI or one of the Amazon Web Services SDKs The value that you want WAF to search for. The SDK automatically base64 encodes the value. @@ -1619,10 +1630,16 @@ extension WAFV2 { } public struct DeleteLoggingConfigurationRequest: AWSEncodableShape { + /// The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage. The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see Collecting data from Amazon Web Services services in the Amazon Security Lake user guide. Default: CUSTOMER + public let logScope: LogScope? + /// Used to distinguish between various logging options. Currently, there is one option. Default: WAF_LOGS + public let logType: LogType? /// The Amazon Resource Name (ARN) of the web ACL from which you want to delete the LoggingConfiguration. public let resourceArn: String - public init(resourceArn: String) { + public init(logScope: LogScope? = nil, logType: LogType? = nil, resourceArn: String) { + self.logScope = logScope + self.logType = logType self.resourceArn = resourceArn } @@ -1633,6 +1650,8 @@ extension WAFV2 { } private enum CodingKeys: String, CodingKey { + case logScope = "LogScope" + case logType = "LogType" case resourceArn = "ResourceArn" } } @@ -1997,7 +2016,7 @@ extension WAFV2 { public let headerOrder: HeaderOrder? /// Inspect the request headers. You must configure scope and pattern matching filters in the Headers object, to define the set of headers to and the parts of the headers that WAF inspects. Only the first 8 KB (8192 bytes) of a request's headers and only the first 200 headers are forwarded to WAF for inspection by the underlying host service. You must configure how to handle any oversize header content in the Headers object. WAF applies the pattern matching filters to the headers that it receives from the underlying host service. public let headers: Headers? - /// Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. WAF calculates and logs this fingerprint for each + /// Available for use with Amazon CloudFront distributions and Application Load Balancers. Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. WAF calculates and logs this fingerprint for each /// request that has enough TLS Client Hello information for the calculation. Almost all web requests include this information. You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to EXACTLY. You can obtain the JA3 fingerprint for client requests from the web ACL logs. /// If WAF is able to calculate the fingerprint, it includes it in the logs. /// For information about the logging fields, @@ -2298,10 +2317,16 @@ extension WAFV2 { } public struct GetLoggingConfigurationRequest: AWSEncodableShape { + /// The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage. The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see Collecting data from Amazon Web Services services in the Amazon Security Lake user guide. Default: CUSTOMER + public let logScope: LogScope? + /// Used to distinguish between various logging options. Currently, there is one option. Default: WAF_LOGS + public let logType: LogType? /// The Amazon Resource Name (ARN) of the web ACL for which you want to get the LoggingConfiguration. public let resourceArn: String - public init(resourceArn: String) { + public init(logScope: LogScope? = nil, logType: LogType? = nil, resourceArn: String) { + self.logScope = logScope + self.logType = logType self.resourceArn = resourceArn } @@ -2312,6 +2337,8 @@ extension WAFV2 { } private enum CodingKeys: String, CodingKey { + case logScope = "LogScope" + case logType = "LogType" case resourceArn = "ResourceArn" } } @@ -3357,13 +3384,16 @@ extension WAFV2 { public struct ListLoggingConfigurationsRequest: AWSEncodableShape { /// The maximum number of objects that you want WAF to return for this request. If more objects are available, in the response, WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects. public let limit: Int? + /// The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage. The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see Collecting data from Amazon Web Services services in the Amazon Security Lake user guide. Default: CUSTOMER + public let logScope: LogScope? /// When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request. public let nextMarker: String? /// Specifies whether this is for an Amazon CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance. To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows: CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1. API and SDKs - For all calls, use the Region endpoint us-east-1. public let scope: Scope - public init(limit: Int? = nil, nextMarker: String? = nil, scope: Scope) { + public init(limit: Int? = nil, logScope: LogScope? = nil, nextMarker: String? = nil, scope: Scope) { self.limit = limit + self.logScope = logScope self.nextMarker = nextMarker self.scope = scope } @@ -3378,6 +3408,7 @@ extension WAFV2 { private enum CodingKeys: String, CodingKey { case limit = "Limit" + case logScope = "LogScope" case nextMarker = "NextMarker" case scope = "Scope" } @@ -3720,16 +3751,22 @@ extension WAFV2 { public let logDestinationConfigs: [String] /// Filtering that specifies which web requests are kept in the logs and which are dropped. You can filter on the rule action and on the web request labels that were applied by matching rules during web ACL evaluation. public let loggingFilter: LoggingFilter? + /// The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage. The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see Collecting data from Amazon Web Services services in the Amazon Security Lake user guide. Default: CUSTOMER + public let logScope: LogScope? + /// Used to distinguish between various logging options. Currently, there is one option. Default: WAF_LOGS + public let logType: LogType? /// Indicates whether the logging configuration was created by Firewall Manager, as part of an WAF policy configuration. If true, only Firewall Manager can modify or delete the configuration. public let managedByFirewallManager: Bool? - /// The parts of the request that you want to keep out of the logs. For example, if you redact the SingleHeader field, the HEADER field in the logs will be REDACTED for all rules that use the SingleHeader FieldToMatch setting. Redaction applies only to the component that's specified in the rule's FieldToMatch setting, so the SingleHeader redaction doesn't apply to rules that use the Headers FieldToMatch. You can specify only the following fields for redaction: UriPath, QueryString, SingleHeader, and Method. + /// The parts of the request that you want to keep out of the logs. For example, if you redact the SingleHeader field, the HEADER field in the logs will be REDACTED for all rules that use the SingleHeader FieldToMatch setting. Redaction applies only to the component that's specified in the rule's FieldToMatch setting, so the SingleHeader redaction doesn't apply to rules that use the Headers FieldToMatch. You can specify only the following fields for redaction: UriPath, QueryString, SingleHeader, and Method. This setting has no impact on request sampling. With request sampling, the only way to exclude fields is by disabling sampling in the web ACL visibility configuration. public let redactedFields: [FieldToMatch]? /// The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs. public let resourceArn: String - public init(logDestinationConfigs: [String], loggingFilter: LoggingFilter? = nil, managedByFirewallManager: Bool? = nil, redactedFields: [FieldToMatch]? = nil, resourceArn: String) { + public init(logDestinationConfigs: [String], loggingFilter: LoggingFilter? = nil, logScope: LogScope? = nil, logType: LogType? = nil, managedByFirewallManager: Bool? = nil, redactedFields: [FieldToMatch]? = nil, resourceArn: String) { self.logDestinationConfigs = logDestinationConfigs self.loggingFilter = loggingFilter + self.logScope = logScope + self.logType = logType self.managedByFirewallManager = managedByFirewallManager self.redactedFields = redactedFields self.resourceArn = resourceArn @@ -3756,6 +3793,8 @@ extension WAFV2 { private enum CodingKeys: String, CodingKey { case logDestinationConfigs = "LogDestinationConfigs" case loggingFilter = "LoggingFilter" + case logScope = "LogScope" + case logType = "LogType" case managedByFirewallManager = "ManagedByFirewallManager" case redactedFields = "RedactedFields" case resourceArn = "ResourceArn" @@ -6128,7 +6167,7 @@ extension WAFV2 { public let cloudWatchMetricsEnabled: Bool /// A name of the Amazon CloudWatch metric dimension. The name can contain only the characters: A-Z, a-z, 0-9, - (hyphen), and _ (underscore). The name can be from one to 128 characters long. It can't contain whitespace or metric names that are reserved for WAF, for example All and Default_Action. public let metricName: String - /// Indicates whether WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the WAF console. + /// Indicates whether WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the WAF console. Request sampling doesn't provide a field redaction option, and any field redaction that you specify in your logging configuration doesn't affect sampling. The only way to exclude fields from request sampling is by disabling sampling in the web ACL visibility configuration. public let sampledRequestsEnabled: Bool public init(cloudWatchMetricsEnabled: Bool, metricName: String, sampledRequestsEnabled: Bool) { diff --git a/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift b/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift index dcbd3a3474..98af8825ad 100644 --- a/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift +++ b/Sources/Soto/Services/WorkSpacesThinClient/WorkSpacesThinClient_shapes.swift @@ -120,6 +120,8 @@ extension WorkSpacesThinClient { public let desktopArn: String /// The URL for the identity provider login (only for environments that use AppStream 2.0). public let desktopEndpoint: String? + /// A map of the key-value pairs of the tag or tags to assign to the newly created devices for this environment. + public let deviceCreationTags: [String: String]? /// The Amazon Resource Name (ARN) of the Key Management Service key to use to encrypt the environment. public let kmsKeyArn: String? /// A specification for a time window to apply software updates. @@ -133,11 +135,12 @@ extension WorkSpacesThinClient { /// A map of the key-value pairs of the tag or tags to assign to the resource. public let tags: [String: String]? - public init(clientToken: String? = CreateEnvironmentRequest.idempotencyToken(), desiredSoftwareSetId: String? = nil, desktopArn: String, desktopEndpoint: String? = nil, kmsKeyArn: String? = nil, maintenanceWindow: MaintenanceWindow? = nil, name: String? = nil, softwareSetUpdateMode: SoftwareSetUpdateMode? = nil, softwareSetUpdateSchedule: SoftwareSetUpdateSchedule? = nil, tags: [String: String]? = nil) { + public init(clientToken: String? = CreateEnvironmentRequest.idempotencyToken(), desiredSoftwareSetId: String? = nil, desktopArn: String, desktopEndpoint: String? = nil, deviceCreationTags: [String: String]? = nil, kmsKeyArn: String? = nil, maintenanceWindow: MaintenanceWindow? = nil, name: String? = nil, softwareSetUpdateMode: SoftwareSetUpdateMode? = nil, softwareSetUpdateSchedule: SoftwareSetUpdateSchedule? = nil, tags: [String: String]? = nil) { self.clientToken = clientToken self.desiredSoftwareSetId = desiredSoftwareSetId self.desktopArn = desktopArn self.desktopEndpoint = desktopEndpoint + self.deviceCreationTags = deviceCreationTags self.kmsKeyArn = kmsKeyArn self.maintenanceWindow = maintenanceWindow self.name = name @@ -156,6 +159,14 @@ extension WorkSpacesThinClient { try self.validate(self.desktopEndpoint, name: "desktopEndpoint", parent: name, max: 1024) try self.validate(self.desktopEndpoint, name: "desktopEndpoint", parent: name, min: 1) try self.validate(self.desktopEndpoint, name: "desktopEndpoint", parent: name, pattern: "^(https:\\/\\/)[a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*\\.[a-z]{2,32}(:[0-9]{1,5})?(\\/.*)?$") + try self.deviceCreationTags?.forEach { + try validate($0.key, name: "deviceCreationTags.key", parent: name, max: 128) + try validate($0.key, name: "deviceCreationTags.key", parent: name, min: 1) + try validate($0.key, name: "deviceCreationTags.key", parent: name, pattern: "^(?!aws:)[A-Za-z0-9 _=@:.+-/]+$") + try validate($0.value, name: "deviceCreationTags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "deviceCreationTags[\"\($0.key)\"]", parent: name, pattern: "^[A-Za-z0-9 _=@:.+-/]+$") + } + try self.validate(self.deviceCreationTags, name: "deviceCreationTags", parent: name, max: 50) try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, max: 2048) try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, min: 20) try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, pattern: "^arn:[\\w+=\\/,.@-]+:kms:[a-zA-Z0-9\\-]*:[0-9]{0,12}:key\\/[a-zA-Z0-9-]+$") @@ -168,6 +179,7 @@ extension WorkSpacesThinClient { case desiredSoftwareSetId = "desiredSoftwareSetId" case desktopArn = "desktopArn" case desktopEndpoint = "desktopEndpoint" + case deviceCreationTags = "deviceCreationTags" case kmsKeyArn = "kmsKeyArn" case maintenanceWindow = "maintenanceWindow" case name = "name" @@ -467,6 +479,8 @@ extension WorkSpacesThinClient { public let desktopEndpoint: String? /// The type of streaming desktop for the environment. public let desktopType: DesktopType? + /// "The tag keys and optional values for the newly created devices for this environment." + public let deviceCreationTags: [String: String]? /// The ID of the environment. public let id: String? /// The Amazon Resource Name (ARN) of the Key Management Service key used to encrypt the environment. @@ -492,7 +506,7 @@ extension WorkSpacesThinClient { /// The timestamp of when the device was updated. public let updatedAt: Date? - public init(activationCode: String? = nil, arn: String? = nil, createdAt: Date? = nil, desiredSoftwareSetId: String? = nil, desktopArn: String? = nil, desktopEndpoint: String? = nil, desktopType: DesktopType? = nil, id: String? = nil, kmsKeyArn: String? = nil, maintenanceWindow: MaintenanceWindow? = nil, name: String? = nil, pendingSoftwareSetId: String? = nil, pendingSoftwareSetVersion: String? = nil, registeredDevicesCount: Int? = nil, softwareSetComplianceStatus: EnvironmentSoftwareSetComplianceStatus? = nil, softwareSetUpdateMode: SoftwareSetUpdateMode? = nil, softwareSetUpdateSchedule: SoftwareSetUpdateSchedule? = nil, tags: [String: String]? = nil, updatedAt: Date? = nil) { + public init(activationCode: String? = nil, arn: String? = nil, createdAt: Date? = nil, desiredSoftwareSetId: String? = nil, desktopArn: String? = nil, desktopEndpoint: String? = nil, desktopType: DesktopType? = nil, deviceCreationTags: [String: String]? = nil, id: String? = nil, kmsKeyArn: String? = nil, maintenanceWindow: MaintenanceWindow? = nil, name: String? = nil, pendingSoftwareSetId: String? = nil, pendingSoftwareSetVersion: String? = nil, registeredDevicesCount: Int? = nil, softwareSetComplianceStatus: EnvironmentSoftwareSetComplianceStatus? = nil, softwareSetUpdateMode: SoftwareSetUpdateMode? = nil, softwareSetUpdateSchedule: SoftwareSetUpdateSchedule? = nil, tags: [String: String]? = nil, updatedAt: Date? = nil) { self.activationCode = activationCode self.arn = arn self.createdAt = createdAt @@ -500,6 +514,7 @@ extension WorkSpacesThinClient { self.desktopArn = desktopArn self.desktopEndpoint = desktopEndpoint self.desktopType = desktopType + self.deviceCreationTags = deviceCreationTags self.id = id self.kmsKeyArn = kmsKeyArn self.maintenanceWindow = maintenanceWindow @@ -522,6 +537,7 @@ extension WorkSpacesThinClient { case desktopArn = "desktopArn" case desktopEndpoint = "desktopEndpoint" case desktopType = "desktopType" + case deviceCreationTags = "deviceCreationTags" case id = "id" case kmsKeyArn = "kmsKeyArn" case maintenanceWindow = "maintenanceWindow" @@ -1121,6 +1137,8 @@ extension WorkSpacesThinClient { public let desktopArn: String? /// The URL for the identity provider login (only for environments that use AppStream 2.0). public let desktopEndpoint: String? + /// A map of the key-value pairs of the tag or tags to assign to the newly created devices for this environment. + public let deviceCreationTags: [String: String]? /// The ID of the environment to update. public let id: String /// A specification for a time window to apply software updates. @@ -1132,10 +1150,11 @@ extension WorkSpacesThinClient { /// An option to define if software updates should be applied within a maintenance window. public let softwareSetUpdateSchedule: SoftwareSetUpdateSchedule? - public init(desiredSoftwareSetId: String? = nil, desktopArn: String? = nil, desktopEndpoint: String? = nil, id: String, maintenanceWindow: MaintenanceWindow? = nil, name: String? = nil, softwareSetUpdateMode: SoftwareSetUpdateMode? = nil, softwareSetUpdateSchedule: SoftwareSetUpdateSchedule? = nil) { + public init(desiredSoftwareSetId: String? = nil, desktopArn: String? = nil, desktopEndpoint: String? = nil, deviceCreationTags: [String: String]? = nil, id: String, maintenanceWindow: MaintenanceWindow? = nil, name: String? = nil, softwareSetUpdateMode: SoftwareSetUpdateMode? = nil, softwareSetUpdateSchedule: SoftwareSetUpdateSchedule? = nil) { self.desiredSoftwareSetId = desiredSoftwareSetId self.desktopArn = desktopArn self.desktopEndpoint = desktopEndpoint + self.deviceCreationTags = deviceCreationTags self.id = id self.maintenanceWindow = maintenanceWindow self.name = name @@ -1149,6 +1168,7 @@ extension WorkSpacesThinClient { try container.encodeIfPresent(self.desiredSoftwareSetId, forKey: .desiredSoftwareSetId) try container.encodeIfPresent(self.desktopArn, forKey: .desktopArn) try container.encodeIfPresent(self.desktopEndpoint, forKey: .desktopEndpoint) + try container.encodeIfPresent(self.deviceCreationTags, forKey: .deviceCreationTags) request.encodePath(self.id, key: "id") try container.encodeIfPresent(self.maintenanceWindow, forKey: .maintenanceWindow) try container.encodeIfPresent(self.name, forKey: .name) @@ -1164,6 +1184,14 @@ extension WorkSpacesThinClient { try self.validate(self.desktopEndpoint, name: "desktopEndpoint", parent: name, max: 1024) try self.validate(self.desktopEndpoint, name: "desktopEndpoint", parent: name, min: 1) try self.validate(self.desktopEndpoint, name: "desktopEndpoint", parent: name, pattern: "^(https:\\/\\/)[a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*\\.[a-z]{2,32}(:[0-9]{1,5})?(\\/.*)?$") + try self.deviceCreationTags?.forEach { + try validate($0.key, name: "deviceCreationTags.key", parent: name, max: 128) + try validate($0.key, name: "deviceCreationTags.key", parent: name, min: 1) + try validate($0.key, name: "deviceCreationTags.key", parent: name, pattern: "^(?!aws:)[A-Za-z0-9 _=@:.+-/]+$") + try validate($0.value, name: "deviceCreationTags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "deviceCreationTags[\"\($0.key)\"]", parent: name, pattern: "^[A-Za-z0-9 _=@:.+-/]+$") + } + try self.validate(self.deviceCreationTags, name: "deviceCreationTags", parent: name, max: 50) try self.validate(self.id, name: "id", parent: name, pattern: "^[a-z0-9]{9}$") try self.maintenanceWindow?.validate(name: "\(name).maintenanceWindow") try self.validate(self.name, name: "name", parent: name, pattern: "^[0-9\\p{IsAlphabetic}+:,.@'\" -][0-9\\p{IsAlphabetic}+=:,.@'\" -]{0,63}$") @@ -1173,6 +1201,7 @@ extension WorkSpacesThinClient { case desiredSoftwareSetId = "desiredSoftwareSetId" case desktopArn = "desktopArn" case desktopEndpoint = "desktopEndpoint" + case deviceCreationTags = "deviceCreationTags" case maintenanceWindow = "maintenanceWindow" case name = "name" case softwareSetUpdateMode = "softwareSetUpdateMode" diff --git a/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_api.swift b/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_api.swift index 3c4703d048..889f7b0504 100644 --- a/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_api.swift +++ b/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_api.swift @@ -19,7 +19,7 @@ /// Service object for interacting with AWS WorkSpacesWeb service. /// -/// WorkSpaces Web is a low cost, fully managed WorkSpace built specifically to facilitate secure, web-based workloads. WorkSpaces Web makes it easy for customers to safely provide their employees with access to internal websites and SaaS web applications without the administrative burden of appliances or specialized client software. WorkSpaces Web provides simple policy tools tailored for user interactions, while offloading common tasks like capacity management, scaling, and maintaining browser images. +/// Amazon WorkSpaces Secure Browser is a low cost, fully managed WorkSpace built specifically to facilitate secure, web-based workloads. WorkSpaces Secure Browser makes it easy for customers to safely provide their employees with access to internal websites and SaaS web applications without the administrative burden of appliances or specialized client software. WorkSpaces Secure Browser provides simple policy tools tailored for user interactions, while offloading common tasks like capacity management, scaling, and maintaining browser images. public struct WorkSpacesWeb: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_shapes.swift b/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_shapes.swift index f6bbbaec2b..df24a502c9 100644 --- a/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_shapes.swift +++ b/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_shapes.swift @@ -591,13 +591,16 @@ extension WorkSpacesWeb { public let identityProviderType: IdentityProviderType /// The ARN of the web portal. public let portalArn: String + /// The tags to add to the identity provider resource. A tag is a key-value pair. + public let tags: [Tag]? - public init(clientToken: String? = CreateIdentityProviderRequest.idempotencyToken(), identityProviderDetails: [String: String], identityProviderName: String, identityProviderType: IdentityProviderType, portalArn: String) { + public init(clientToken: String? = CreateIdentityProviderRequest.idempotencyToken(), identityProviderDetails: [String: String], identityProviderName: String, identityProviderType: IdentityProviderType, portalArn: String, tags: [Tag]? = nil) { self.clientToken = clientToken self.identityProviderDetails = identityProviderDetails self.identityProviderName = identityProviderName self.identityProviderType = identityProviderType self.portalArn = portalArn + self.tags = tags } public func validate(name: String) throws { @@ -615,6 +618,10 @@ extension WorkSpacesWeb { try self.validate(self.portalArn, name: "portalArn", parent: name, max: 2048) try self.validate(self.portalArn, name: "portalArn", parent: name, min: 20) try self.validate(self.portalArn, name: "portalArn", parent: name, pattern: "^arn:[\\w+=\\/,.@-]+:[a-zA-Z0-9\\-]+:[a-zA-Z0-9\\-]*:[a-zA-Z0-9]{1,12}:[a-zA-Z]+(\\/[a-fA-F0-9\\-]{36})+$") + try self.tags?.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) } private enum CodingKeys: String, CodingKey { @@ -623,6 +630,7 @@ extension WorkSpacesWeb { case identityProviderName = "identityProviderName" case identityProviderType = "identityProviderType" case portalArn = "portalArn" + case tags = "tags" } } @@ -652,7 +660,7 @@ extension WorkSpacesWeb { public let displayName: String? /// The IP rules of the IP access settings. public let ipRules: [IpRule] - /// The tags to add to the browser settings resource. A tag is a key-value pair. + /// The tags to add to the IP access settings resource. A tag is a key-value pair. public let tags: [Tag]? public init(additionalEncryptionContext: [String: String]? = nil, clientToken: String? = CreateIpAccessSettingsRequest.idempotencyToken(), customerManagedKey: String? = nil, description: String? = nil, displayName: String? = nil, ipRules: [IpRule], tags: [Tag]? = nil) { @@ -967,6 +975,8 @@ extension WorkSpacesWeb { public let copyAllowed: EnabledType /// The customer managed key used to encrypt sensitive information in the user settings. public let customerManagedKey: String? + /// Specifies whether the user can use deep links that open automatically when connecting to a session. + public let deepLinkAllowed: EnabledType? /// The amount of time that a streaming session remains active after users disconnect. public let disconnectTimeoutInMinutes: Int? /// Specifies whether the user can download files from the streaming session to the local device. @@ -982,12 +992,13 @@ extension WorkSpacesWeb { /// Specifies whether the user can upload files from the local device to the streaming session. public let uploadAllowed: EnabledType - public init(additionalEncryptionContext: [String: String]? = nil, clientToken: String? = CreateUserSettingsRequest.idempotencyToken(), cookieSynchronizationConfiguration: CookieSynchronizationConfiguration? = nil, copyAllowed: EnabledType, customerManagedKey: String? = nil, disconnectTimeoutInMinutes: Int? = nil, downloadAllowed: EnabledType, idleDisconnectTimeoutInMinutes: Int? = nil, pasteAllowed: EnabledType, printAllowed: EnabledType, tags: [Tag]? = nil, uploadAllowed: EnabledType) { + public init(additionalEncryptionContext: [String: String]? = nil, clientToken: String? = CreateUserSettingsRequest.idempotencyToken(), cookieSynchronizationConfiguration: CookieSynchronizationConfiguration? = nil, copyAllowed: EnabledType, customerManagedKey: String? = nil, deepLinkAllowed: EnabledType? = nil, disconnectTimeoutInMinutes: Int? = nil, downloadAllowed: EnabledType, idleDisconnectTimeoutInMinutes: Int? = nil, pasteAllowed: EnabledType, printAllowed: EnabledType, tags: [Tag]? = nil, uploadAllowed: EnabledType) { self.additionalEncryptionContext = additionalEncryptionContext self.clientToken = clientToken self.cookieSynchronizationConfiguration = cookieSynchronizationConfiguration self.copyAllowed = copyAllowed self.customerManagedKey = customerManagedKey + self.deepLinkAllowed = deepLinkAllowed self.disconnectTimeoutInMinutes = disconnectTimeoutInMinutes self.downloadAllowed = downloadAllowed self.idleDisconnectTimeoutInMinutes = idleDisconnectTimeoutInMinutes @@ -1026,6 +1037,7 @@ extension WorkSpacesWeb { case cookieSynchronizationConfiguration = "cookieSynchronizationConfiguration" case copyAllowed = "copyAllowed" case customerManagedKey = "customerManagedKey" + case deepLinkAllowed = "deepLinkAllowed" case disconnectTimeoutInMinutes = "disconnectTimeoutInMinutes" case downloadAllowed = "downloadAllowed" case idleDisconnectTimeoutInMinutes = "idleDisconnectTimeoutInMinutes" @@ -3183,6 +3195,8 @@ extension WorkSpacesWeb { public let cookieSynchronizationConfiguration: CookieSynchronizationConfiguration? /// Specifies whether the user can copy text from the streaming session to the local device. public let copyAllowed: EnabledType? + /// Specifies whether the user can use deep links that open automatically when connecting to a session. + public let deepLinkAllowed: EnabledType? /// The amount of time that a streaming session remains active after users disconnect. public let disconnectTimeoutInMinutes: Int? /// Specifies whether the user can download files from the streaming session to the local device. @@ -3198,10 +3212,11 @@ extension WorkSpacesWeb { /// The ARN of the user settings. public let userSettingsArn: String - public init(clientToken: String? = UpdateUserSettingsRequest.idempotencyToken(), cookieSynchronizationConfiguration: CookieSynchronizationConfiguration? = nil, copyAllowed: EnabledType? = nil, disconnectTimeoutInMinutes: Int? = nil, downloadAllowed: EnabledType? = nil, idleDisconnectTimeoutInMinutes: Int? = nil, pasteAllowed: EnabledType? = nil, printAllowed: EnabledType? = nil, uploadAllowed: EnabledType? = nil, userSettingsArn: String) { + public init(clientToken: String? = UpdateUserSettingsRequest.idempotencyToken(), cookieSynchronizationConfiguration: CookieSynchronizationConfiguration? = nil, copyAllowed: EnabledType? = nil, deepLinkAllowed: EnabledType? = nil, disconnectTimeoutInMinutes: Int? = nil, downloadAllowed: EnabledType? = nil, idleDisconnectTimeoutInMinutes: Int? = nil, pasteAllowed: EnabledType? = nil, printAllowed: EnabledType? = nil, uploadAllowed: EnabledType? = nil, userSettingsArn: String) { self.clientToken = clientToken self.cookieSynchronizationConfiguration = cookieSynchronizationConfiguration self.copyAllowed = copyAllowed + self.deepLinkAllowed = deepLinkAllowed self.disconnectTimeoutInMinutes = disconnectTimeoutInMinutes self.downloadAllowed = downloadAllowed self.idleDisconnectTimeoutInMinutes = idleDisconnectTimeoutInMinutes @@ -3217,6 +3232,7 @@ extension WorkSpacesWeb { try container.encodeIfPresent(self.clientToken, forKey: .clientToken) try container.encodeIfPresent(self.cookieSynchronizationConfiguration, forKey: .cookieSynchronizationConfiguration) try container.encodeIfPresent(self.copyAllowed, forKey: .copyAllowed) + try container.encodeIfPresent(self.deepLinkAllowed, forKey: .deepLinkAllowed) try container.encodeIfPresent(self.disconnectTimeoutInMinutes, forKey: .disconnectTimeoutInMinutes) try container.encodeIfPresent(self.downloadAllowed, forKey: .downloadAllowed) try container.encodeIfPresent(self.idleDisconnectTimeoutInMinutes, forKey: .idleDisconnectTimeoutInMinutes) @@ -3243,6 +3259,7 @@ extension WorkSpacesWeb { case clientToken = "clientToken" case cookieSynchronizationConfiguration = "cookieSynchronizationConfiguration" case copyAllowed = "copyAllowed" + case deepLinkAllowed = "deepLinkAllowed" case disconnectTimeoutInMinutes = "disconnectTimeoutInMinutes" case downloadAllowed = "downloadAllowed" case idleDisconnectTimeoutInMinutes = "idleDisconnectTimeoutInMinutes" @@ -3314,6 +3331,8 @@ extension WorkSpacesWeb { public let copyAllowed: EnabledType? /// The customer managed key used to encrypt sensitive information in the user settings. public let customerManagedKey: String? + /// Specifies whether the user can use deep links that open automatically when connecting to a session. + public let deepLinkAllowed: EnabledType? /// The amount of time that a streaming session remains active after users disconnect. public let disconnectTimeoutInMinutes: Int? /// Specifies whether the user can download files from the streaming session to the local device. @@ -3329,12 +3348,13 @@ extension WorkSpacesWeb { /// The ARN of the user settings. public let userSettingsArn: String - public init(additionalEncryptionContext: [String: String]? = nil, associatedPortalArns: [String]? = nil, cookieSynchronizationConfiguration: CookieSynchronizationConfiguration? = nil, copyAllowed: EnabledType? = nil, customerManagedKey: String? = nil, disconnectTimeoutInMinutes: Int? = nil, downloadAllowed: EnabledType? = nil, idleDisconnectTimeoutInMinutes: Int? = nil, pasteAllowed: EnabledType? = nil, printAllowed: EnabledType? = nil, uploadAllowed: EnabledType? = nil, userSettingsArn: String) { + public init(additionalEncryptionContext: [String: String]? = nil, associatedPortalArns: [String]? = nil, cookieSynchronizationConfiguration: CookieSynchronizationConfiguration? = nil, copyAllowed: EnabledType? = nil, customerManagedKey: String? = nil, deepLinkAllowed: EnabledType? = nil, disconnectTimeoutInMinutes: Int? = nil, downloadAllowed: EnabledType? = nil, idleDisconnectTimeoutInMinutes: Int? = nil, pasteAllowed: EnabledType? = nil, printAllowed: EnabledType? = nil, uploadAllowed: EnabledType? = nil, userSettingsArn: String) { self.additionalEncryptionContext = additionalEncryptionContext self.associatedPortalArns = associatedPortalArns self.cookieSynchronizationConfiguration = cookieSynchronizationConfiguration self.copyAllowed = copyAllowed self.customerManagedKey = customerManagedKey + self.deepLinkAllowed = deepLinkAllowed self.disconnectTimeoutInMinutes = disconnectTimeoutInMinutes self.downloadAllowed = downloadAllowed self.idleDisconnectTimeoutInMinutes = idleDisconnectTimeoutInMinutes @@ -3350,6 +3370,7 @@ extension WorkSpacesWeb { case cookieSynchronizationConfiguration = "cookieSynchronizationConfiguration" case copyAllowed = "copyAllowed" case customerManagedKey = "customerManagedKey" + case deepLinkAllowed = "deepLinkAllowed" case disconnectTimeoutInMinutes = "disconnectTimeoutInMinutes" case downloadAllowed = "downloadAllowed" case idleDisconnectTimeoutInMinutes = "idleDisconnectTimeoutInMinutes" @@ -3365,6 +3386,8 @@ extension WorkSpacesWeb { public let cookieSynchronizationConfiguration: CookieSynchronizationConfiguration? /// Specifies whether the user can copy text from the streaming session to the local device. public let copyAllowed: EnabledType? + /// Specifies whether the user can use deep links that open automatically when connecting to a session. + public let deepLinkAllowed: EnabledType? /// The amount of time that a streaming session remains active after users disconnect. public let disconnectTimeoutInMinutes: Int? /// Specifies whether the user can download files from the streaming session to the local device. @@ -3380,9 +3403,10 @@ extension WorkSpacesWeb { /// The ARN of the user settings. public let userSettingsArn: String - public init(cookieSynchronizationConfiguration: CookieSynchronizationConfiguration? = nil, copyAllowed: EnabledType? = nil, disconnectTimeoutInMinutes: Int? = nil, downloadAllowed: EnabledType? = nil, idleDisconnectTimeoutInMinutes: Int? = nil, pasteAllowed: EnabledType? = nil, printAllowed: EnabledType? = nil, uploadAllowed: EnabledType? = nil, userSettingsArn: String) { + public init(cookieSynchronizationConfiguration: CookieSynchronizationConfiguration? = nil, copyAllowed: EnabledType? = nil, deepLinkAllowed: EnabledType? = nil, disconnectTimeoutInMinutes: Int? = nil, downloadAllowed: EnabledType? = nil, idleDisconnectTimeoutInMinutes: Int? = nil, pasteAllowed: EnabledType? = nil, printAllowed: EnabledType? = nil, uploadAllowed: EnabledType? = nil, userSettingsArn: String) { self.cookieSynchronizationConfiguration = cookieSynchronizationConfiguration self.copyAllowed = copyAllowed + self.deepLinkAllowed = deepLinkAllowed self.disconnectTimeoutInMinutes = disconnectTimeoutInMinutes self.downloadAllowed = downloadAllowed self.idleDisconnectTimeoutInMinutes = idleDisconnectTimeoutInMinutes @@ -3395,6 +3419,7 @@ extension WorkSpacesWeb { private enum CodingKeys: String, CodingKey { case cookieSynchronizationConfiguration = "cookieSynchronizationConfiguration" case copyAllowed = "copyAllowed" + case deepLinkAllowed = "deepLinkAllowed" case disconnectTimeoutInMinutes = "disconnectTimeoutInMinutes" case downloadAllowed = "downloadAllowed" case idleDisconnectTimeoutInMinutes = "idleDisconnectTimeoutInMinutes" diff --git a/models/accessanalyzer.json b/models/accessanalyzer.json index 453593dbd4..98a664748d 100644 --- a/models/accessanalyzer.json +++ b/models/accessanalyzer.json @@ -7,16 +7,27 @@ "actions": { "target": "com.amazonaws.accessanalyzer#ActionsList", "traits": { + "smithy.api#addedDefault": {}, + "smithy.api#default": [], "smithy.api#documentation": "

A list of actions for the access permissions. Any strings that can be used as an action\n in an IAM policy can be used in the list of actions to check.

", "smithy.api#length": { "max": 100 - }, - "smithy.api#required": {} + } + } + }, + "resources": { + "target": "com.amazonaws.accessanalyzer#ResourcesList", + "traits": { + "smithy.api#default": [], + "smithy.api#documentation": "

A list of resources for the access permissions. Any strings that can be used as a\n resource in an IAM policy can be used in the list of resources to check.

", + "smithy.api#length": { + "max": 100 + } } } }, "traits": { - "smithy.api#documentation": "

Contains information about actions that define permissions to check against a\n policy.

" + "smithy.api#documentation": "

Contains information about actions and resources that define permissions to check\n against a policy.

" } }, "com.amazonaws.accessanalyzer#AccessAnalyzer": { @@ -35,9 +46,15 @@ { "target": "com.amazonaws.accessanalyzer#CheckNoNewAccess" }, + { + "target": "com.amazonaws.accessanalyzer#CheckNoPublicAccess" + }, { "target": "com.amazonaws.accessanalyzer#CreateAccessPreview" }, + { + "target": "com.amazonaws.accessanalyzer#GenerateFindingRecommendation" + }, { "target": "com.amazonaws.accessanalyzer#GetAccessPreview" }, @@ -47,6 +64,9 @@ { "target": "com.amazonaws.accessanalyzer#GetFinding" }, + { + "target": "com.amazonaws.accessanalyzer#GetFindingRecommendation" + }, { "target": "com.amazonaws.accessanalyzer#GetFindingV2" }, @@ -1156,6 +1176,85 @@ ] } }, + "com.amazonaws.accessanalyzer#AccessCheckResourceType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AWS::DynamoDB::Table", + "name": "DYNAMODB_TABLE" + }, + { + "value": "AWS::DynamoDB::Stream", + "name": "DYNAMODB_STREAM" + }, + { + "value": "AWS::EFS::FileSystem", + "name": "EFS_FILESYSTEM" + }, + { + "value": "AWS::OpenSearchService::Domain", + "name": "OPENSEARCHSERVICE_DOMAIN" + }, + { + "value": "AWS::Kinesis::Stream", + "name": "KINESIS_DATA_STREAM" + }, + { + "value": "AWS::Kinesis::StreamConsumer", + "name": "KINESIS_STREAM_CONSUMER" + }, + { + "value": "AWS::KMS::Key", + "name": "KMS_KEY" + }, + { + "value": "AWS::Lambda::Function", + "name": "LAMBDA_FUNCTION" + }, + { + "value": "AWS::S3::Bucket", + "name": "S3_BUCKET" + }, + { + "value": "AWS::S3::AccessPoint", + "name": "S3_ACCESS_POINT" + }, + { + "value": "AWS::S3Express::DirectoryBucket", + "name": "S3EXPRESS_DIRECTORYBUCKET" + }, + { + "value": "AWS::S3::Glacier", + "name": "S3_GLACIER" + }, + { + "value": "AWS::S3Outposts::Bucket", + "name": "S3_OUTPOSTS_BUCKET" + }, + { + "value": "AWS::S3Outposts::AccessPoint", + "name": "S3_OUTPOSTS_ACCESS_POINT" + }, + { + "value": "AWS::SecretsManager::Secret", + "name": "SECRETSMANAGER_SECRET" + }, + { + "value": "AWS::SNS::Topic", + "name": "SNS_TOPIC" + }, + { + "value": "AWS::SQS::Queue", + "name": "SQS_QUEUE" + }, + { + "value": "AWS::IAM::AssumeRolePolicyDocument", + "name": "ROLE_TRUST" + } + ] + } + }, "com.amazonaws.accessanalyzer#AccessDeniedException": { "type": "structure", "members": { @@ -2003,6 +2102,69 @@ ], "traits": { "smithy.api#documentation": "

Checks whether the specified access isn't allowed by a policy.

", + "smithy.api#examples": [ + { + "title": "Passing check. Restrictive identity policy.", + "input": { + "access": [ + { + "actions": [ + "s3:PutObject" + ] + } + ], + "policyDocument": "{\"Version\":\"2012-10-17\",\"Id\":\"123\",\"Statement\":[{\"Sid\":\"AllowJohnDoe\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:user/JohnDoe\"},\"Action\":\"s3:GetObject\",\"Resource\":\"*\"}]}", + "policyType": "RESOURCE_POLICY" + }, + "output": { + "result": "PASS", + "message": "The policy document does not grant access to perform the listed actions or resources." + } + }, + { + "title": "Passing check. Restrictive S3 Bucket resource policy.", + "input": { + "access": [ + { + "resources": [ + "arn:aws:s3:::sensitive-bucket/*" + ] + } + ], + "policyDocument": "{\"Version\":\"2012-10-17\",\"Id\":\"123\",\"Statement\":[{\"Sid\":\"AllowJohnDoe\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:user/JohnDoe\"},\"Action\":\"s3:PutObject\",\"Resource\":\"arn:aws:s3:::non-sensitive-bucket/*\"}]}", + "policyType": "RESOURCE_POLICY" + }, + "output": { + "result": "PASS", + "message": "The policy document does not grant access to perform the listed actions or resources." + } + }, + { + "title": "Failing check. Permissive S3 Bucket resource policy.", + "input": { + "access": [ + { + "resources": [ + "arn:aws:s3:::my-bucket/*" + ] + } + ], + "policyDocument": "{\"Version\":\"2012-10-17\",\"Id\":\"123\",\"Statement\":[{\"Sid\":\"AllowJohnDoe\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:user/JohnDoe\"},\"Action\":\"s3:PutObject\",\"Resource\":\"arn:aws:s3:::my-bucket/*\"}]}", + "policyType": "RESOURCE_POLICY" + }, + "output": { + "result": "FAIL", + "message": "The policy document grants access to perform one or more of the listed actions or resources.", + "reasons": [ + { + "description": "One or more of the listed actions or resources in the statement with sid: AllowJohnDoe.", + "statementIndex": 0, + "statementId": "AllowJohnDoe" + } + ] + } + } + ], "smithy.api#http": { "uri": "/policy/check-access-not-granted", "method": "POST", @@ -2027,7 +2189,7 @@ "access": { "target": "com.amazonaws.accessanalyzer#AccessList", "traits": { - "smithy.api#documentation": "

An access object containing the permissions that shouldn't be granted by the specified\n policy.

", + "smithy.api#documentation": "

An access object containing the permissions that shouldn't be granted by the specified\n policy. If only actions are specified, IAM Access Analyzer checks for access of the actions on\n all resources in the policy. If only resources are specified, then IAM Access Analyzer checks\n which actions have access to the specified resources. If both actions and resources are\n specified, then IAM Access Analyzer checks which of the specified actions have access to the\n specified resources.

", "smithy.api#length": { "max": 1 }, @@ -2198,6 +2360,141 @@ ] } }, + "com.amazonaws.accessanalyzer#CheckNoPublicAccess": { + "type": "operation", + "input": { + "target": "com.amazonaws.accessanalyzer#CheckNoPublicAccessRequest" + }, + "output": { + "target": "com.amazonaws.accessanalyzer#CheckNoPublicAccessResponse" + }, + "errors": [ + { + "target": "com.amazonaws.accessanalyzer#AccessDeniedException" + }, + { + "target": "com.amazonaws.accessanalyzer#InternalServerException" + }, + { + "target": "com.amazonaws.accessanalyzer#InvalidParameterException" + }, + { + "target": "com.amazonaws.accessanalyzer#ThrottlingException" + }, + { + "target": "com.amazonaws.accessanalyzer#UnprocessableEntityException" + }, + { + "target": "com.amazonaws.accessanalyzer#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Checks whether a resource policy can grant public access to the specified resource\n type.

", + "smithy.api#examples": [ + { + "title": "Passing check. S3 Bucket policy without public access.", + "input": { + "policyDocument": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"Bob\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::111122223333:user/JohnDoe\"},\"Action\":[\"s3:GetObject\"]}]}", + "resourceType": "AWS::S3::Bucket" + }, + "output": { + "result": "PASS", + "message": "The resource policy does not grant public access for the given resource type." + } + }, + { + "title": "Failing check. S3 Bucket policy with public access.", + "input": { + "policyDocument": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"Bob\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Action\":[\"s3:GetObject\"]}]}", + "resourceType": "AWS::S3::Bucket" + }, + "output": { + "result": "FAIL", + "message": "The resource policy grants public access for the given resource type.", + "reasons": [ + { + "description": "Public access granted in the following statement with sid: Bob.", + "statementIndex": 0, + "statementId": "Bob" + } + ] + } + } + ], + "smithy.api#http": { + "uri": "/policy/check-no-public-access", + "method": "POST", + "code": 200 + }, + "smithy.api#readonly": {}, + "smithy.api#suppress": [ + "HttpMethodSemantics" + ] + } + }, + "com.amazonaws.accessanalyzer#CheckNoPublicAccessRequest": { + "type": "structure", + "members": { + "policyDocument": { + "target": "com.amazonaws.accessanalyzer#AccessCheckPolicyDocument", + "traits": { + "smithy.api#documentation": "

The JSON policy document to evaluate for public access.

", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "com.amazonaws.accessanalyzer#AccessCheckResourceType", + "traits": { + "smithy.api#documentation": "

The type of resource to evaluate for public access. For example, to check for public\n access to Amazon S3 buckets, you can choose AWS::S3::Bucket for the resource\n type.

\n

For resource types not supported as valid values, IAM Access Analyzer will return an\n error.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.accessanalyzer#CheckNoPublicAccessResponse": { + "type": "structure", + "members": { + "result": { + "target": "com.amazonaws.accessanalyzer#CheckNoPublicAccessResult", + "traits": { + "smithy.api#documentation": "

The result of the check for public access to the specified resource type. If the result\n is PASS, the policy doesn't allow public access to the specified resource\n type. If the result is FAIL, the policy might allow public access to the\n specified resource type.

" + } + }, + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The message indicating whether the specified policy allows public access to\n resources.

" + } + }, + "reasons": { + "target": "com.amazonaws.accessanalyzer#ReasonSummaryList", + "traits": { + "smithy.api#documentation": "

A list of reasons why the specified resource policy grants public access for the\n resource type.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.accessanalyzer#CheckNoPublicAccessResult": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PASS", + "name": "PASS" + }, + { + "value": "FAIL", + "name": "FAIL" + } + ] + } + }, "com.amazonaws.accessanalyzer#CloudTrailArn": { "type": "string", "traits": { @@ -3478,6 +3775,90 @@ "target": "com.amazonaws.accessanalyzer#FindingSummaryV2" } }, + "com.amazonaws.accessanalyzer#GenerateFindingRecommendation": { + "type": "operation", + "input": { + "target": "com.amazonaws.accessanalyzer#GenerateFindingRecommendationRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.accessanalyzer#AccessDeniedException" + }, + { + "target": "com.amazonaws.accessanalyzer#InternalServerException" + }, + { + "target": "com.amazonaws.accessanalyzer#ThrottlingException" + }, + { + "target": "com.amazonaws.accessanalyzer#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a recommendation for an unused permissions finding.

", + "smithy.api#examples": [ + { + "title": "Successfully started generating finding recommendation", + "input": { + "analyzerArn": "arn:aws:access-analyzer:us-east-1:111122223333:analyzer/a", + "id": "finding-id" + }, + "output": {} + }, + { + "title": "Failed field validation for id value", + "input": { + "analyzerArn": "arn:aws:access-analyzer:us-east-1:111122223333:analyzer/a", + "id": "!" + }, + "error": { + "shapeId": "com.amazonaws.accessanalyzer#ValidationException", + "content": { + "reason": "fieldValidationFailed", + "message": "Invalid id." + } + }, + "allowConstraintErrors": true + } + ], + "smithy.api#http": { + "uri": "/recommendation/{id}", + "method": "POST", + "code": 200 + } + } + }, + "com.amazonaws.accessanalyzer#GenerateFindingRecommendationRequest": { + "type": "structure", + "members": { + "analyzerArn": { + "target": "com.amazonaws.accessanalyzer#AnalyzerArn", + "traits": { + "smithy.api#documentation": "

The ARN of\n the analyzer used to generate the finding recommendation.

", + "smithy.api#httpQuery": "analyzerArn", + "smithy.api#required": {} + } + }, + "id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique ID for the finding recommendation.

", + "smithy.api#httpLabel": {}, + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.accessanalyzer#GeneratedPolicy": { "type": "structure", "members": { @@ -3862,6 +4243,234 @@ "smithy.api#readonly": {} } }, + "com.amazonaws.accessanalyzer#GetFindingRecommendation": { + "type": "operation", + "input": { + "target": "com.amazonaws.accessanalyzer#GetFindingRecommendationRequest" + }, + "output": { + "target": "com.amazonaws.accessanalyzer#GetFindingRecommendationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.accessanalyzer#AccessDeniedException" + }, + { + "target": "com.amazonaws.accessanalyzer#InternalServerException" + }, + { + "target": "com.amazonaws.accessanalyzer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.accessanalyzer#ThrottlingException" + }, + { + "target": "com.amazonaws.accessanalyzer#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves information about a finding recommendation for the specified analyzer.

", + "smithy.api#examples": [ + { + "title": "Successfully fetched finding recommendation", + "input": { + "analyzerArn": "arn:aws:access-analyzer:us-east-1:111122223333:analyzer/a", + "id": "finding-id", + "maxResults": 3, + "nextToken": "token" + }, + "output": { + "startedAt": "2000-01-01T00:00:00Z", + "completedAt": "2000-01-01T00:00:01Z", + "resourceArn": "arn:aws:iam::111122223333:role/test", + "recommendedSteps": [ + { + "unusedPermissionsRecommendedStep": { + "recommendedAction": "DETACH_POLICY", + "existingPolicyId": "policy-id" + } + }, + { + "unusedPermissionsRecommendedStep": { + "recommendedAction": "CREATE_POLICY", + "existingPolicyId": "policy-id", + "recommendedPolicy": "policy-content" + } + } + ], + "recommendationType": "UnusedPermissionRecommendation", + "status": "SUCCEEDED" + } + }, + { + "title": "In progress finding recommendation", + "input": { + "analyzerArn": "arn:aws:access-analyzer:us-east-1:111122223333:analyzer/a", + "id": "finding-id", + "maxResults": 3 + }, + "output": { + "startedAt": "2000-01-01T00:00:00Z", + "resourceArn": "arn:aws:iam::111122223333:role/test", + "recommendationType": "UnusedPermissionRecommendation", + "status": "IN_PROGRESS" + } + }, + { + "title": "Failed finding recommendation", + "input": { + "analyzerArn": "arn:aws:access-analyzer:us-east-1:111122223333:analyzer/a", + "id": "finding-id", + "maxResults": 3 + }, + "output": { + "startedAt": "2000-01-01T00:00:00Z", + "completedAt": "2000-01-01T00:00:01Z", + "error": { + "code": "SERVICE_ERROR", + "message": "Service error. Please try again." + }, + "resourceArn": "arn:aws:iam::111122223333:role/test", + "recommendationType": "UnusedPermissionRecommendation", + "status": "FAILED" + } + }, + { + "title": "Failed field validation for id value", + "input": { + "analyzerArn": "arn:aws:access-analyzer:us-east-1:111122223333:analyzer/a", + "id": "!" + }, + "error": { + "shapeId": "com.amazonaws.accessanalyzer#ValidationException", + "content": { + "reason": "fieldValidationFailed", + "message": "Invalid id." + } + }, + "allowConstraintErrors": true + } + ], + "smithy.api#http": { + "uri": "/recommendation/{id}", + "method": "GET", + "code": 200 + }, + "smithy.api#paginated": { + "items": "recommendedSteps", + "pageSize": "maxResults", + "inputToken": "nextToken", + "outputToken": "nextToken" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.accessanalyzer#GetFindingRecommendationRequest": { + "type": "structure", + "members": { + "analyzerArn": { + "target": "com.amazonaws.accessanalyzer#AnalyzerArn", + "traits": { + "smithy.api#documentation": "

The ARN of\n the analyzer used to generate the finding recommendation.

", + "smithy.api#httpQuery": "analyzerArn", + "smithy.api#required": {} + } + }, + "id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique ID for the finding recommendation.

", + "smithy.api#httpLabel": {}, + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return in the response.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "nextToken": { + "target": "com.amazonaws.accessanalyzer#Token", + "traits": { + "smithy.api#documentation": "

A token used for pagination of results returned.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.accessanalyzer#GetFindingRecommendationResponse": { + "type": "structure", + "members": { + "startedAt": { + "target": "com.amazonaws.accessanalyzer#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which the retrieval of the finding recommendation was started.

", + "smithy.api#required": {} + } + }, + "completedAt": { + "target": "com.amazonaws.accessanalyzer#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which the retrieval of the finding recommendation was completed.

" + } + }, + "nextToken": { + "target": "com.amazonaws.accessanalyzer#Token", + "traits": { + "smithy.api#documentation": "

A token used for pagination of results returned.

" + } + }, + "error": { + "target": "com.amazonaws.accessanalyzer#RecommendationError", + "traits": { + "smithy.api#documentation": "

Detailed information about the reason that the retrieval of a recommendation for the\n finding failed.

" + } + }, + "resourceArn": { + "target": "com.amazonaws.accessanalyzer#ResourceArn", + "traits": { + "smithy.api#documentation": "

The ARN of the resource of the finding.

", + "smithy.api#required": {} + } + }, + "recommendedSteps": { + "target": "com.amazonaws.accessanalyzer#RecommendedStepList", + "traits": { + "smithy.api#documentation": "

A group of recommended steps for the finding.

" + } + }, + "recommendationType": { + "target": "com.amazonaws.accessanalyzer#RecommendationType", + "traits": { + "smithy.api#documentation": "

The type of recommendation for the finding.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.accessanalyzer#Status", + "traits": { + "smithy.api#documentation": "

The status of the retrieval of the finding recommendation.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.accessanalyzer#GetFindingRequest": { "type": "structure", "members": { @@ -5793,12 +6402,88 @@ "target": "com.amazonaws.accessanalyzer#ReasonSummary" } }, + "com.amazonaws.accessanalyzer#RecommendationError": { + "type": "structure", + "members": { + "code": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The error code for a failed retrieval of a recommendation for a finding.

", + "smithy.api#required": {} + } + }, + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The error message for a failed retrieval of a recommendation for a finding.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about the reason that the retrieval of a recommendation for a\n finding failed.

" + } + }, + "com.amazonaws.accessanalyzer#RecommendationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "UnusedPermissionRecommendation", + "name": "UNUSED_PERMISSION_RECOMMENDATION" + } + ] + } + }, + "com.amazonaws.accessanalyzer#RecommendedRemediationAction": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATE_POLICY", + "name": "CREATE_POLICY" + }, + { + "value": "DETACH_POLICY", + "name": "DETACH_POLICY" + } + ] + } + }, + "com.amazonaws.accessanalyzer#RecommendedStep": { + "type": "union", + "members": { + "unusedPermissionsRecommendedStep": { + "target": "com.amazonaws.accessanalyzer#UnusedPermissionsRecommendedStep", + "traits": { + "smithy.api#documentation": "

A recommended step for an unused permissions finding.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about a recommended step for an unused access analyzer\n finding.

" + } + }, + "com.amazonaws.accessanalyzer#RecommendedStepList": { + "type": "list", + "member": { + "target": "com.amazonaws.accessanalyzer#RecommendedStep" + } + }, "com.amazonaws.accessanalyzer#RegionList": { "type": "list", "member": { "target": "smithy.api#String" } }, + "com.amazonaws.accessanalyzer#Resource": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + } + } + }, "com.amazonaws.accessanalyzer#ResourceArn": { "type": "string", "traits": { @@ -5906,6 +6591,12 @@ ] } }, + "com.amazonaws.accessanalyzer#ResourcesList": { + "type": "list", + "member": { + "target": "com.amazonaws.accessanalyzer#Resource" + } + }, "com.amazonaws.accessanalyzer#RetiringPrincipal": { "type": "string" }, @@ -6332,6 +7023,25 @@ "smithy.api#documentation": "

Starts a scan of the policies applied to the specified resource.

" } }, + "com.amazonaws.accessanalyzer#Status": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SUCCEEDED", + "name": "SUCCEEDED" + }, + { + "value": "FAILED", + "name": "FAILED" + }, + { + "value": "IN_PROGRESS", + "name": "IN_PROGRESS" + } + ] + } + }, "com.amazonaws.accessanalyzer#StatusReason": { "type": "structure", "members": { @@ -6763,7 +7473,7 @@ "lastAccessed": { "target": "com.amazonaws.accessanalyzer#Timestamp", "traits": { - "smithy.api#documentation": "

The time at which the permission last accessed.

" + "smithy.api#documentation": "

The time at which the permission was last accessed.

" } } }, @@ -6771,6 +7481,39 @@ "smithy.api#documentation": "

Contains information about an unused access finding for a permission. IAM Access Analyzer\n charges for unused access analysis based on the number of IAM roles and users analyzed\n per month. For more details on pricing, see IAM Access Analyzer\n pricing.

" } }, + "com.amazonaws.accessanalyzer#UnusedPermissionsRecommendedStep": { + "type": "structure", + "members": { + "policyUpdatedAt": { + "target": "com.amazonaws.accessanalyzer#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which the existing policy for the unused permissions finding was last\n updated.

" + } + }, + "recommendedAction": { + "target": "com.amazonaws.accessanalyzer#RecommendedRemediationAction", + "traits": { + "smithy.api#documentation": "

A recommendation of whether to create or detach a policy for an unused permissions\n finding.

", + "smithy.api#required": {} + } + }, + "recommendedPolicy": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

If the recommended action for the unused permissions finding is to replace the existing\n policy, the contents of the recommended policy to replace the policy specified in the\n existingPolicyId field.

" + } + }, + "existingPolicyId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

If the recommended action for the unused permissions finding is to detach a policy, the\n ID of an existing policy to be detached.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about the action to take for a policy in an unused permissions\n finding.

" + } + }, "com.amazonaws.accessanalyzer#UpdateArchiveRule": { "type": "operation", "input": { @@ -7200,6 +7943,10 @@ { "value": "other", "name": "OTHER" + }, + { + "value": "notSupported", + "name": "NOT_SUPPORTED" } ] } diff --git a/models/account.json b/models/account.json index 814604516d..95a0177d57 100644 --- a/models/account.json +++ b/models/account.json @@ -1,6 +1,86 @@ { "smithy": "2.0", "shapes": { + "com.amazonaws.account#AcceptPrimaryEmailUpdate": { + "type": "operation", + "input": { + "target": "com.amazonaws.account#AcceptPrimaryEmailUpdateRequest" + }, + "output": { + "target": "com.amazonaws.account#AcceptPrimaryEmailUpdateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.account#AccessDeniedException" + }, + { + "target": "com.amazonaws.account#ConflictException" + }, + { + "target": "com.amazonaws.account#InternalServerException" + }, + { + "target": "com.amazonaws.account#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.account#TooManyRequestsException" + }, + { + "target": "com.amazonaws.account#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Accepts the request that originated from StartPrimaryEmailUpdate to update the primary email address (also known\n as the root user email address) for the specified account.

", + "smithy.api#http": { + "uri": "/acceptPrimaryEmailUpdate", + "method": "POST", + "code": 200 + } + } + }, + "com.amazonaws.account#AcceptPrimaryEmailUpdateRequest": { + "type": "structure", + "members": { + "AccountId": { + "target": "com.amazonaws.account#AccountId", + "traits": { + "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. To use this parameter, the caller must be an identity in\n the organization's\n management account or a delegated administrator account. The specified\n account ID must be a member account in the same organization. The organization must have\n all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n

This operation can only be called from the management account or the delegated\n administrator account of an organization for a member account.

\n \n

The management account can't specify its own AccountId.

\n
", + "smithy.api#required": {} + } + }, + "PrimaryEmail": { + "target": "com.amazonaws.account#PrimaryEmailAddress", + "traits": { + "smithy.api#documentation": "

The new primary email address for use with the specified account. This must\n match the PrimaryEmail from the StartPrimaryEmailUpdate API\n call.

", + "smithy.api#required": {} + } + }, + "Otp": { + "target": "com.amazonaws.account#Otp", + "traits": { + "smithy.api#documentation": "

The OTP code sent to the PrimaryEmail specified on the\n StartPrimaryEmailUpdate API call.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.account#AcceptPrimaryEmailUpdateResponse": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.account#PrimaryEmailUpdateStatus", + "traits": { + "smithy.api#documentation": "

Retrieves the status of the accepted primary email update request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.account#AccessDeniedException": { "type": "structure", "members": { @@ -27,6 +107,9 @@ { "target": "com.amazonaws.account#ContactInformationResource" }, + { + "target": "com.amazonaws.account#PrimaryEmailResource" + }, { "target": "com.amazonaws.account#RegionOptResource" } @@ -94,7 +177,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -137,7 +219,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -150,7 +233,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -164,7 +246,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -289,7 +370,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -324,7 +404,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -335,14 +414,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -356,14 +437,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -372,11 +451,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -387,14 +466,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -408,7 +489,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -428,7 +508,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -439,14 +518,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -457,9 +538,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -1016,7 +1099,7 @@ "StateOrRegion": { "target": "com.amazonaws.account#StateOrRegion", "traits": { - "smithy.api#documentation": "

The state or region of the primary contact address. This field is required in selected countries.

" + "smithy.api#documentation": "

The state or region of the primary contact address. If the mailing address is within the United States (US), the\n value in this field can be either a two character state code (for example, NJ) or the full state name\n (for example, New Jersey). This field is required in the following countries: US,\n CA, GB, DE, JP, IN,\n and BR.

" } }, "DistrictOrCounty": { @@ -1042,14 +1125,14 @@ "PhoneNumber": { "target": "com.amazonaws.account#ContactInformationPhoneNumber", "traits": { - "smithy.api#documentation": "

The phone number of the primary contact information. The number will be validated and, in some countries, checked for activation.

", + "smithy.api#documentation": "

The phone number of the primary contact information. The number will be validated and,\n in some countries, checked for activation.

", "smithy.api#required": {} } }, "CompanyName": { "target": "com.amazonaws.account#CompanyName", "traits": { - "smithy.api#documentation": "

The name of the company associated with the primary contact information, if any.

" + "smithy.api#documentation": "

The name of the company associated with the primary contact information, if\n any.

" } }, "WebsiteUrl": { @@ -1060,7 +1143,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains the details of the primary contact information associated with an Amazon Web Services account.

" + "smithy.api#documentation": "

Contains the details of the primary contact information associated with an\n Amazon Web Services account.

" } }, "com.amazonaws.account#ContactInformationPhoneNumber": { @@ -1180,7 +1263,7 @@ } ], "traits": { - "smithy.api#documentation": "

Disables (opts-out) a particular Region for an account.

", + "smithy.api#documentation": "

Disables (opts-out) a particular Region for an account.

\n \n

The act of disabling a Region will remove all IAM access to any resources that\n reside in that Region.

\n
", "smithy.api#http": { "uri": "/disableRegion", "method": "POST", @@ -1194,13 +1277,13 @@ "AccountId": { "target": "com.amazonaws.account#AccountId", "traits": { - "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this parameter, the\n caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must also be a member account in the same organization. The organization must\n have all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to the\n account whose contacts you wish to retrieve or modify.

" + "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this\n parameter, the caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must be a member account in the same organization. The organization must have\n all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to the\n account whose contacts you wish to retrieve or modify.

" } }, "RegionName": { "target": "com.amazonaws.account#RegionName", "traits": { - "smithy.api#documentation": "

Specifies the Region-code for a given Region name (for example, af-south-1). When\n you disable a Region, Amazon Web Services performs actions to deactivate that Region in your account, such\n as destroying IAM resources in the Region. This process takes a few minutes for most\n accounts, but this can take several hours. You cannot enable the Region until the\n disabling process is fully completed.

", + "smithy.api#documentation": "

Specifies the Region-code for a given Region name (for example,\n af-south-1). When you disable a Region, Amazon Web Services performs actions to\n deactivate that Region in your account, such as destroying IAM resources in the Region.\n This process takes a few minutes for most accounts, but this can take several hours. You\n cannot enable the Region until the disabling process is fully completed.

", "smithy.api#required": {} } } @@ -1270,13 +1353,13 @@ "AccountId": { "target": "com.amazonaws.account#AccountId", "traits": { - "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this parameter, the\n caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must also be a member account in the same organization. The organization must\n have all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to the\n account whose contacts you wish to retrieve or modify.

" + "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this\n parameter, the caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must be a member account in the same organization. The organization must have\n all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to the\n account whose contacts you wish to retrieve or modify.

" } }, "RegionName": { "target": "com.amazonaws.account#RegionName", "traits": { - "smithy.api#documentation": "

Specifies the Region-code for a given Region name (for example, af-south-1). When\n you enable a Region, Amazon Web Services performs actions to prepare your account in that Region, such\n as distributing your IAM resources to the Region. This process takes a few minutes for\n most accounts, but it can take several hours. You cannot use the Region until this\n process is complete. Furthermore, you cannot disable the Region until the enabling\n process is fully completed.

", + "smithy.api#documentation": "

Specifies the Region-code for a given Region name (for example,\n af-south-1). When you enable a Region, Amazon Web Services performs actions to\n prepare your account in that Region, such as distributing your IAM resources to the\n Region. This process takes a few minutes for most accounts, but it can take several\n hours. You cannot use the Region until this process is complete. Furthermore, you cannot\n disable the Region until the enabling process is fully completed.

", "smithy.api#required": {} } } @@ -1412,7 +1495,7 @@ "AccountId": { "target": "com.amazonaws.account#AccountId", "traits": { - "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this parameter, the\n caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must also be a member account in the same organization. The organization must have\n all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to\n the account whose contacts you wish to retrieve or modify.

" + "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this\n parameter, the caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must be a member account in the same organization. The organization must have\n all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to the\n account whose contacts you wish to retrieve or modify.

" } } }, @@ -1426,7 +1509,74 @@ "ContactInformation": { "target": "com.amazonaws.account#ContactInformation", "traits": { - "smithy.api#documentation": "

Contains the details of the primary contact information associated with an Amazon Web Services account.

" + "smithy.api#documentation": "

Contains the details of the primary contact information associated with an\n Amazon Web Services account.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.account#GetPrimaryEmail": { + "type": "operation", + "input": { + "target": "com.amazonaws.account#GetPrimaryEmailRequest" + }, + "output": { + "target": "com.amazonaws.account#GetPrimaryEmailResponse" + }, + "errors": [ + { + "target": "com.amazonaws.account#AccessDeniedException" + }, + { + "target": "com.amazonaws.account#InternalServerException" + }, + { + "target": "com.amazonaws.account#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.account#TooManyRequestsException" + }, + { + "target": "com.amazonaws.account#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the primary email address for the specified account.

", + "smithy.api#http": { + "uri": "/getPrimaryEmail", + "method": "POST", + "code": 200 + }, + "smithy.api#readonly": {}, + "smithy.api#suppress": [ + "HttpMethodSemantics" + ] + } + }, + "com.amazonaws.account#GetPrimaryEmailRequest": { + "type": "structure", + "members": { + "AccountId": { + "target": "com.amazonaws.account#AccountId", + "traits": { + "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. To use this parameter, the caller must be an identity in\n the organization's\n management account or a delegated administrator account. The specified\n account ID must be a member account in the same organization. The organization must have\n all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n

This operation can only be called from the management account or the delegated\n administrator account of an organization for a member account.

\n \n

The management account can't specify its own AccountId.

\n
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.account#GetPrimaryEmailResponse": { + "type": "structure", + "members": { + "PrimaryEmail": { + "target": "com.amazonaws.account#PrimaryEmailAddress", + "traits": { + "smithy.api#documentation": "

Retrieves the primary email address associated with the specified\n account.

" } } }, @@ -1475,13 +1625,13 @@ "AccountId": { "target": "com.amazonaws.account#AccountId", "traits": { - "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this parameter, the\n caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must also be a member account in the same organization. The organization must\n have all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to the\n account whose contacts you wish to retrieve or modify.

" + "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this\n parameter, the caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must be a member account in the same organization. The organization must have\n all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to the\n account whose contacts you wish to retrieve or modify.

" } }, "RegionName": { "target": "com.amazonaws.account#RegionName", "traits": { - "smithy.api#documentation": "

Specifies the Region-code for a given Region name (for example, af-south-1). This\n function will return the status of whatever Region you pass into this parameter.

", + "smithy.api#documentation": "

Specifies the Region-code for a given Region name (for example,\n af-south-1). This function will return the status of whatever Region\n you pass into this parameter.

", "smithy.api#required": {} } } @@ -1574,7 +1724,7 @@ "AccountId": { "target": "com.amazonaws.account#AccountId", "traits": { - "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this parameter, the\n caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must also be a member account in the same organization. The organization must\n have all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to the\n account whose contacts you wish to retrieve or modify.

" + "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this\n parameter, the caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must be a member account in the same organization. The organization must have\n all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to the\n account whose contacts you wish to retrieve or modify.

" } }, "MaxResults": { @@ -1637,6 +1787,13 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.account#Otp": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9]{6}$", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.account#PhoneNumber": { "type": "string", "traits": { @@ -1658,6 +1815,50 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.account#PrimaryEmailAddress": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 5, + "max": 64 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.account#PrimaryEmailResource": { + "type": "resource", + "operations": [ + { + "target": "com.amazonaws.account#AcceptPrimaryEmailUpdate" + }, + { + "target": "com.amazonaws.account#GetPrimaryEmail" + }, + { + "target": "com.amazonaws.account#StartPrimaryEmailUpdate" + } + ], + "traits": { + "aws.api#arn": { + "template": "PrimaryEmail" + } + } + }, + "com.amazonaws.account#PrimaryEmailUpdateStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PENDING", + "name": "PENDING" + }, + { + "value": "ACCEPTED", + "name": "ACCEPTED" + } + ] + } + }, "com.amazonaws.account#PutAlternateContact": { "type": "operation", "input": { @@ -1762,7 +1963,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the primary contact information of an Amazon Web Services account.

\n

For complete details about how to use the primary contact operations, see Update\n the primary and alternate contact information.

", + "smithy.api#documentation": "

Updates the primary contact information of an Amazon Web Services account.

\n

For complete details about how to use the primary contact operations, see Update\n the primary and alternate contact information.

", "smithy.api#http": { "uri": "/putContactInformation", "method": "POST", @@ -1777,14 +1978,14 @@ "ContactInformation": { "target": "com.amazonaws.account#ContactInformation", "traits": { - "smithy.api#documentation": "

Contains the details of the primary contact information associated with an Amazon Web Services account.

", + "smithy.api#documentation": "

Contains the details of the primary contact information associated with an\n Amazon Web Services account.

", "smithy.api#required": {} } }, "AccountId": { "target": "com.amazonaws.account#AccountId", "traits": { - "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this parameter, the\n caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must also be a member account in the same organization. The organization must have\n all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to\n the account whose contacts you wish to retrieve or modify.

" + "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. If you don't specify this parameter, it defaults to the\n Amazon Web Services account of the identity used to call the operation. To use this\n parameter, the caller must be an identity in the organization's\n management account or a delegated administrator account. The specified\n account ID must be a member account in the same organization. The organization must have\n all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n \n

The management account can't specify its own AccountId. It must call\n the operation in standalone context by not including the AccountId\n parameter.

\n
\n

To call this operation on an account that is not a member of an organization, don't\n specify this parameter. Instead, call the operation using an identity belonging to the\n account whose contacts you wish to retrieve or modify.

" } } }, @@ -1804,7 +2005,7 @@ "RegionOptStatus": { "target": "com.amazonaws.account#RegionOptStatus", "traits": { - "smithy.api#documentation": "

One of potential statuses a Region can undergo (Enabled, Enabling, Disabled, Disabling,\n Enabled_By_Default).

" + "smithy.api#documentation": "

One of potential statuses a Region can undergo (Enabled, Enabling, Disabled,\n Disabling, Enabled_By_Default).

" } } }, @@ -1904,6 +2105,79 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.account#StartPrimaryEmailUpdate": { + "type": "operation", + "input": { + "target": "com.amazonaws.account#StartPrimaryEmailUpdateRequest" + }, + "output": { + "target": "com.amazonaws.account#StartPrimaryEmailUpdateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.account#AccessDeniedException" + }, + { + "target": "com.amazonaws.account#ConflictException" + }, + { + "target": "com.amazonaws.account#InternalServerException" + }, + { + "target": "com.amazonaws.account#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.account#TooManyRequestsException" + }, + { + "target": "com.amazonaws.account#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Starts the process to update the primary email address for the specified\n account.

", + "smithy.api#http": { + "uri": "/startPrimaryEmailUpdate", + "method": "POST", + "code": 200 + } + } + }, + "com.amazonaws.account#StartPrimaryEmailUpdateRequest": { + "type": "structure", + "members": { + "AccountId": { + "target": "com.amazonaws.account#AccountId", + "traits": { + "smithy.api#documentation": "

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access\n or modify with this operation. To use this parameter, the caller must be an identity in\n the organization's\n management account or a delegated administrator account. The specified\n account ID must be a member account in the same organization. The organization must have\n all features\n enabled, and the organization must have trusted access enabled\n for the Account Management service, and optionally a delegated admin account\n assigned.

\n

This operation can only be called from the management account or the delegated\n administrator account of an organization for a member account.

\n \n

The management account can't specify its own AccountId.

\n
", + "smithy.api#required": {} + } + }, + "PrimaryEmail": { + "target": "com.amazonaws.account#PrimaryEmailAddress", + "traits": { + "smithy.api#documentation": "

The new primary email address (also known as the root user email address) to\n use in the specified account.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.account#StartPrimaryEmailUpdateResponse": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.account#PrimaryEmailUpdateStatus", + "traits": { + "smithy.api#documentation": "

The status of the primary email update request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.account#StateOrRegion": { "type": "string", "traits": { diff --git a/models/acm-pca.json b/models/acm-pca.json index 3f8388611d..651b691b04 100644 --- a/models/acm-pca.json +++ b/models/acm-pca.json @@ -108,7 +108,7 @@ "sdkId": "ACM PCA", "arnNamespace": "acm-pca", "cloudFormationName": "ACMPCA", - "cloudTrailEventSource": "acmpca.amazonaws.com", + "cloudTrailEventSource": "acm-pca.amazonaws.com", "endpointPrefix": "acm-pca" }, "aws.auth#sigv4": { @@ -1773,7 +1773,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a root or subordinate private certificate authority (CA). You must specify the\n\t\t\tCA configuration, an optional configuration for Online Certificate Status Protocol\n\t\t\t(OCSP) and/or a certificate revocation list (CRL), the CA type, and an optional\n\t\t\tidempotency token to avoid accidental creation of multiple CAs. The CA configuration\n\t\t\tspecifies the name of the algorithm and key size to be used to create the CA private\n\t\t\tkey, the type of signing algorithm that the CA uses, and X.500 subject information. The\n\t\t\tOCSP configuration can optionally specify a custom URL for the OCSP responder. The CRL\n\t\t\tconfiguration specifies the CRL expiration period in days (the validity period of the\n\t\t\tCRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3\n\t\t\tbucket that is included in certificates issued by the CA. If successful, this action\n\t\t\treturns the Amazon Resource Name (ARN) of the CA.

\n \n

Both Amazon Web Services Private CA and the IAM principal must have permission to write to\n the S3 bucket that you specify. If the IAM principal making the call\n does not have permission to write to the bucket, then an exception is\n thrown. For more information, see Access \n\t\t\t\t\t\tpolicies for CRLs in Amazon S3.

\n
\n

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. \n For more information, see Encrypting Your\n\t\t\tCRLs.

", + "smithy.api#documentation": "

Creates a root or subordinate private certificate authority (CA). You must specify the\n\t\t\tCA configuration, an optional configuration for Online Certificate Status Protocol\n\t\t\t(OCSP) and/or a certificate revocation list (CRL), the CA type, and an optional\n\t\t\tidempotency token to avoid accidental creation of multiple CAs. The CA configuration\n\t\t\tspecifies the name of the algorithm and key size to be used to create the CA private\n\t\t\tkey, the type of signing algorithm that the CA uses, and X.500 subject information. The\n\t\t\tOCSP configuration can optionally specify a custom URL for the OCSP responder. The CRL\n\t\t\tconfiguration specifies the CRL expiration period in days (the validity period of the\n\t\t\tCRL), the Amazon S3 bucket that will contain the CRL, and a CNAME alias for the S3\n\t\t\tbucket that is included in certificates issued by the CA. If successful, this action\n\t\t\treturns the Amazon Resource Name (ARN) of the CA.

\n \n

Both Amazon Web Services Private CA and the IAM principal must have permission to write to\n the S3 bucket that you specify. If the IAM principal making the call\n does not have permission to write to the bucket, then an exception is\n thrown. For more information, see Access \n\t\t\t\t\t\tpolicies for CRLs in Amazon S3.

\n
\n

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. \n For more information, see Encrypting Your\n\t\t\tCRLs.

", "smithy.api#idempotent": {} } }, @@ -2035,7 +2035,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains configuration information for a certificate revocation list (CRL). Your\n\t\t\tprivate certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You\n\t\t\tcan enable CRLs for your new or an existing private CA by setting the Enabled parameter to true. Your private CA\n\t\t\twrites CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by\n\t\t\tspecifying a value for the CustomCname parameter. Your\n\t\t\tprivate CA by default copies the CNAME or the S3 bucket name to the CRL\n\t\t\t\tDistribution Points extension of each certificate it issues. If you want to configure\n\t\t\t\tthis default behavior to be something different, you can set the CrlDistributionPointExtensionConfiguration \n\t\t\t\tparameter. Your S3\n\t\t\tbucket policy must give write permission to Amazon Web Services Private CA.

\n

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. \n For more information, see Encrypting Your\n\t\t\tCRLs.

\n

Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed prior to a\n\t\t\tcertificate's expiration date or when a certificate is revoked. When a certificate is\n\t\t\trevoked, it appears in the CRL until the certificate expires, and then in one additional\n\t\t\tCRL after expiration, and it always appears in the audit report.

\n

A CRL is typically updated approximately 30 minutes after a certificate \n\tis revoked. If for any reason a CRL update fails, Amazon Web Services Private CA makes further attempts \n\tevery 15 minutes.

\n

CRLs contain the following fields:

\n
    \n
  • \n

    \n Version: The current version number defined\n\t\t\t\t\tin RFC 5280 is V2. The integer value is 0x1.

    \n
  • \n
  • \n

    \n Signature Algorithm: The name of the\n\t\t\t\t\talgorithm used to sign the CRL.

    \n
  • \n
  • \n

    \n Issuer: The X.500 distinguished name of your\n\t\t\t\t\tprivate CA that issued the CRL.

    \n
  • \n
  • \n

    \n Last Update: The issue date and time of this\n\t\t\t\t\tCRL.

    \n
  • \n
  • \n

    \n Next Update: The day and time by which the\n\t\t\t\t\tnext CRL will be issued.

    \n
  • \n
  • \n

    \n Revoked Certificates: List of revoked\n\t\t\t\t\tcertificates. Each list item contains the following information.

    \n
      \n
    • \n

      \n Serial Number: The serial number, in\n\t\t\t\t\t\t\thexadecimal format, of the revoked certificate.

      \n
    • \n
    • \n

      \n Revocation Date: Date and time the\n\t\t\t\t\t\t\tcertificate was revoked.

      \n
    • \n
    • \n

      \n CRL Entry Extensions: Optional\n\t\t\t\t\t\t\textensions for the CRL entry.

      \n
        \n
      • \n

        \n X509v3 CRL Reason Code:\n\t\t\t\t\t\t\t\t\tReason the certificate was revoked.

        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    \n CRL Extensions: Optional extensions for the\n\t\t\t\t\tCRL.

    \n
      \n
    • \n

      \n X509v3 Authority Key Identifier:\n\t\t\t\t\t\t\tIdentifies the public key associated with the private key used to sign\n\t\t\t\t\t\t\tthe certificate.

      \n
    • \n
    • \n

      \n X509v3 CRL Number:: Decimal sequence\n\t\t\t\t\t\t\tnumber for the CRL.

      \n
    • \n
    \n
  • \n
  • \n

    \n Signature Algorithm: Algorithm used by your\n\t\t\t\t\tprivate CA to sign the CRL.

    \n
  • \n
  • \n

    \n Signature Value: Signature computed over the\n\t\t\t\t\tCRL.

    \n
  • \n
\n

Certificate revocation lists created by Amazon Web Services Private CA are DER-encoded. You can use the\n\t\t\tfollowing OpenSSL command to list a CRL.

\n

\n openssl crl -inform DER -text -in crl_path\n\t\t\t-noout\n

\n

For more information, see Planning a certificate revocation list\n\t\t\t\t(CRL) in the Amazon Web Services Private Certificate Authority User Guide\n

" + "smithy.api#documentation": "

Contains configuration information for a certificate revocation list (CRL). Your\n\t\t\tprivate certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You\n\t\t\tcan enable CRLs for your new or an existing private CA by setting the Enabled parameter to true. Your private CA\n\t\t\twrites CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by\n\t\t\tspecifying a value for the CustomCname parameter. Your\n\t\t\tprivate CA by default copies the CNAME or the S3 bucket name to the CRL\n\t\t\t\tDistribution Points extension of each certificate it issues. If you want to configure\n\t\t\t\tthis default behavior to be something different, you can set the CrlDistributionPointExtensionConfiguration \n\t\t\t\tparameter. Your S3\n\t\t\tbucket policy must give write permission to Amazon Web Services Private CA.

\n

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. \n For more information, see Encrypting Your\n\t\t\tCRLs.

\n

Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed prior to a\n\t\t\tcertificate's expiration date or when a certificate is revoked. When a certificate is\n\t\t\trevoked, it appears in the CRL until the certificate expires, and then in one additional\n\t\t\tCRL after expiration, and it always appears in the audit report.

\n

A CRL is typically updated approximately 30 minutes after a certificate \n\tis revoked. If for any reason a CRL update fails, Amazon Web Services Private CA makes further attempts \n\tevery 15 minutes.

\n

CRLs contain the following fields:

\n
    \n
  • \n

    \n Version: The current version number defined\n\t\t\t\t\tin RFC 5280 is V2. The integer value is 0x1.

    \n
  • \n
  • \n

    \n Signature Algorithm: The name of the\n\t\t\t\t\talgorithm used to sign the CRL.

    \n
  • \n
  • \n

    \n Issuer: The X.500 distinguished name of your\n\t\t\t\t\tprivate CA that issued the CRL.

    \n
  • \n
  • \n

    \n Last Update: The issue date and time of this\n\t\t\t\t\tCRL.

    \n
  • \n
  • \n

    \n Next Update: The day and time by which the\n\t\t\t\t\tnext CRL will be issued.

    \n
  • \n
  • \n

    \n Revoked Certificates: List of revoked\n\t\t\t\t\tcertificates. Each list item contains the following information.

    \n
      \n
    • \n

      \n Serial Number: The serial number, in\n\t\t\t\t\t\t\thexadecimal format, of the revoked certificate.

      \n
    • \n
    • \n

      \n Revocation Date: Date and time the\n\t\t\t\t\t\t\tcertificate was revoked.

      \n
    • \n
    • \n

      \n CRL Entry Extensions: Optional\n\t\t\t\t\t\t\textensions for the CRL entry.

      \n
        \n
      • \n

        \n X509v3 CRL Reason Code:\n\t\t\t\t\t\t\t\t\tReason the certificate was revoked.

        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    \n CRL Extensions: Optional extensions for the\n\t\t\t\t\tCRL.

    \n
      \n
    • \n

      \n X509v3 Authority Key Identifier:\n\t\t\t\t\t\t\tIdentifies the public key associated with the private key used to sign\n\t\t\t\t\t\t\tthe certificate.

      \n
    • \n
    • \n

      \n X509v3 CRL Number:: Decimal sequence\n\t\t\t\t\t\t\tnumber for the CRL.

      \n
    • \n
    \n
  • \n
  • \n

    \n Signature Algorithm: Algorithm used by your\n\t\t\t\t\tprivate CA to sign the CRL.

    \n
  • \n
  • \n

    \n Signature Value: Signature computed over the\n\t\t\t\t\tCRL.

    \n
  • \n
\n

Certificate revocation lists created by Amazon Web Services Private CA are DER-encoded. You can use the\n\t\t\tfollowing OpenSSL command to list a CRL.

\n

\n openssl crl -inform DER -text -in crl_path\n\t\t\t-noout\n

\n

For more information, see Planning a certificate revocation list\n\t\t\t\t(CRL) in the Amazon Web Services Private Certificate Authority User Guide\n

" } }, "com.amazonaws.acmpca#CrlDistributionPointExtensionConfiguration": { @@ -2360,6 +2360,9 @@ ], "traits": { "smithy.api#documentation": "

Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport action. Audit information is created\n\t\t\tevery time the certificate authority (CA) private key is used. The private key is used\n\t\t\twhen you call the IssueCertificate action or the\n\t\t\t\tRevokeCertificate action.

", + "smithy.api#suppress": [ + "WaitableTraitInvalidErrorType" + ], "smithy.waiters#waitable": { "AuditReportCreated": { "documentation": "Wait until a Audit Report is created", @@ -2383,6 +2386,12 @@ "comparator": "stringEquals" } } + }, + { + "state": "failure", + "matcher": { + "errorType": "AccessDeniedException" + } } ], "minDelay": 3 @@ -2735,6 +2744,9 @@ ], "traits": { "smithy.api#documentation": "

Retrieves a certificate from your private CA or one that has been shared with you. The\n\t\t\tARN of the certificate is returned when you call the IssueCertificate action. You\n\t\t\tmust specify both the ARN of your private CA and the ARN of the issued certificate when\n\t\t\tcalling the GetCertificate action. You can retrieve the\n\t\t\tcertificate if it is in the ISSUED state. You can call\n\t\t\tthe CreateCertificateAuthorityAuditReport action to create a report that\n\t\t\tcontains information about all of the certificates issued and revoked by your private\n\t\t\tCA.

", + "smithy.api#suppress": [ + "WaitableTraitInvalidErrorType" + ], "smithy.waiters#waitable": { "CertificateIssued": { "documentation": "Wait until a certificate is issued", @@ -2750,6 +2762,12 @@ "matcher": { "errorType": "RequestInProgressException" } + }, + { + "state": "failure", + "matcher": { + "errorType": "AccessDeniedException" + } } ], "minDelay": 1 @@ -2842,6 +2860,9 @@ ], "traits": { "smithy.api#documentation": "

Retrieves the certificate signing request (CSR) for your private certificate authority\n\t\t\t(CA). The CSR is created when you call the CreateCertificateAuthority action. Sign the CSR with your Amazon Web Services Private CA-hosted\n\t\t\tor on-premises root or subordinate CA. Then import the signed certificate back into\n\t\t\tAmazon Web Services Private CA by calling the ImportCertificateAuthorityCertificate action. The CSR is returned as a\n\t\t\tbase64 PEM-encoded string.

", + "smithy.api#suppress": [ + "WaitableTraitInvalidErrorType" + ], "smithy.waiters#waitable": { "CertificateAuthorityCSRCreated": { "documentation": "Wait until a Certificate Authority CSR is created", @@ -2857,6 +2878,12 @@ "matcher": { "errorType": "RequestInProgressException" } + }, + { + "state": "failure", + "matcher": { + "errorType": "AccessDeniedException" + } } ], "minDelay": 3 @@ -3038,7 +3065,7 @@ } ], "traits": { - "smithy.api#documentation": "

Imports a signed private CA certificate into Amazon Web Services Private CA. This action is used when you\n\t\t\tare using a chain of trust whose root is located outside Amazon Web Services Private CA. Before you can call\n\t\t\tthis action, the following preparations must in place:

\n
    \n
  1. \n

    In Amazon Web Services Private CA, call the CreateCertificateAuthority action to create the private CA that you\n\t\t\t\t\tplan to back with the imported certificate.

    \n
  2. \n
  3. \n

    Call the GetCertificateAuthorityCsr action to generate a certificate signing\n\t\t\t\t\trequest (CSR).

    \n
  4. \n
  5. \n

    Sign the CSR using a root or intermediate CA hosted by either an on-premises\n\t\t\t\t\tPKI hierarchy or by a commercial CA.

    \n
  6. \n
  7. \n

    Create a certificate chain and copy the signed certificate and the certificate\n\t\t\t\t\tchain to your working directory.

    \n
  8. \n
\n

Amazon Web Services Private CA supports three scenarios for installing a CA certificate:

\n
    \n
  • \n

    Installing a certificate for a root CA hosted by Amazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is hosted by\n\t\t\t\t\tAmazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is externally\n\t\t\t\t\thosted.

    \n
  • \n
\n

The following additional requirements apply when you import a CA certificate.

\n
    \n
  • \n

    Only a self-signed certificate can be imported as a root CA.

    \n
  • \n
  • \n

    A self-signed certificate cannot be imported as a subordinate CA.

    \n
  • \n
  • \n

    Your certificate chain must not include the private CA certificate that you\n\t\t\t\t\tare importing.

    \n
  • \n
  • \n

    Your root CA must be the last certificate in your chain. The subordinate\n\t\t\t\t\tcertificate, if any, that your root CA signed must be next to last. The\n\t\t\t\t\tsubordinate certificate signed by the preceding subordinate CA must come next,\n\t\t\t\t\tand so on until your chain is built.

    \n
  • \n
  • \n

    The chain must be PEM-encoded.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate is 32 KB.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate chain is 2 MB.

    \n
  • \n
\n

\n Enforcement of Critical Constraints\n

\n

Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA\n\t\t\tcertificate or chain.

\n
    \n
  • \n

    Basic constraints (must be marked critical)

    \n
  • \n
  • \n

    Subject alternative names

    \n
  • \n
  • \n

    Key usage

    \n
  • \n
  • \n

    Extended key usage

    \n
  • \n
  • \n

    Authority key identifier

    \n
  • \n
  • \n

    Subject key identifier

    \n
  • \n
  • \n

    Issuer alternative name

    \n
  • \n
  • \n

    Subject directory attributes

    \n
  • \n
  • \n

    Subject information access

    \n
  • \n
  • \n

    Certificate policies

    \n
  • \n
  • \n

    Policy mappings

    \n
  • \n
  • \n

    Inhibit anyPolicy

    \n
  • \n
\n

Amazon Web Services Private CA rejects the following extensions when they are marked critical in an\n\t\t\timported CA certificate or chain.

\n
    \n
  • \n

    Name constraints

    \n
  • \n
  • \n

    Policy constraints

    \n
  • \n
  • \n

    CRL distribution points

    \n
  • \n
  • \n

    Authority information access

    \n
  • \n
  • \n

    Freshest CRL

    \n
  • \n
  • \n

    Any other extension

    \n
  • \n
" + "smithy.api#documentation": "

Imports a signed private CA certificate into Amazon Web Services Private CA. This action is used when you\n\t\t\tare using a chain of trust whose root is located outside Amazon Web Services Private CA. Before you can call\n\t\t\tthis action, the following preparations must in place:

\n
    \n
  1. \n

    In Amazon Web Services Private CA, call the CreateCertificateAuthority action to create the private CA that you\n\t\t\t\t\tplan to back with the imported certificate.

    \n
  2. \n
  3. \n

    Call the GetCertificateAuthorityCsr action to generate a certificate signing\n\t\t\t\t\trequest (CSR).

    \n
  4. \n
  5. \n

    Sign the CSR using a root or intermediate CA hosted by either an on-premises\n\t\t\t\t\tPKI hierarchy or by a commercial CA.

    \n
  6. \n
  7. \n

    Create a certificate chain and copy the signed certificate and the certificate\n\t\t\t\t\tchain to your working directory.

    \n
  8. \n
\n

Amazon Web Services Private CA supports three scenarios for installing a CA certificate:

\n
    \n
  • \n

    Installing a certificate for a root CA hosted by Amazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is hosted by\n\t\t\t\t\tAmazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is externally\n\t\t\t\t\thosted.

    \n
  • \n
\n

The following additional requirements apply when you import a CA certificate.

\n
    \n
  • \n

    Only a self-signed certificate can be imported as a root CA.

    \n
  • \n
  • \n

    A self-signed certificate cannot be imported as a subordinate CA.

    \n
  • \n
  • \n

    Your certificate chain must not include the private CA certificate that you\n\t\t\t\t\tare importing.

    \n
  • \n
  • \n

    Your root CA must be the last certificate in your chain. The subordinate\n\t\t\t\t\tcertificate, if any, that your root CA signed must be next to last. The\n\t\t\t\t\tsubordinate certificate signed by the preceding subordinate CA must come next,\n\t\t\t\t\tand so on until your chain is built.

    \n
  • \n
  • \n

    The chain must be PEM-encoded.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate is 32 KB.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate chain is 2 MB.

    \n
  • \n
\n

\n Enforcement of Critical Constraints\n

\n

Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA\n\t\t\tcertificate or chain.

\n
    \n
  • \n

    Authority key identifier

    \n
  • \n
  • \n

    Basic constraints (must be marked critical)

    \n
  • \n
  • \n

    Certificate policies

    \n
  • \n
  • \n

    Extended key usage

    \n
  • \n
  • \n

    Inhibit anyPolicy

    \n
  • \n
  • \n

    Issuer alternative name

    \n
  • \n
  • \n

    Key usage

    \n
  • \n
  • \n

    Name constraints

    \n
  • \n
  • \n

    Policy mappings

    \n
  • \n
  • \n

    Subject alternative name

    \n
  • \n
  • \n

    Subject directory attributes

    \n
  • \n
  • \n

    Subject key identifier

    \n
  • \n
  • \n

    Subject information access

    \n
  • \n
\n

Amazon Web Services Private CA rejects the following extensions when they are marked critical in an\n\t\t\timported CA certificate or chain.

\n
    \n
  • \n

    Authority information access

    \n
  • \n
  • \n

    CRL distribution points

    \n
  • \n
  • \n

    Freshest CRL

    \n
  • \n
  • \n

    Policy constraints

    \n
  • \n
\n

Amazon Web Services Private Certificate Authority will also reject any other extension marked as critical not contained on the preceding list of allowed extensions.

" } }, "com.amazonaws.acmpca#ImportCertificateAuthorityCertificateRequest": { diff --git a/models/acm.json b/models/acm.json index 43a1fffc0d..b81b1ddf41 100644 --- a/models/acm.json +++ b/models/acm.json @@ -424,7 +424,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -467,7 +466,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -480,7 +480,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -494,7 +493,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -517,7 +515,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -552,7 +549,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -563,14 +559,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -584,14 +582,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -600,18 +596,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -620,7 +615,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -640,14 +636,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -661,7 +659,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -681,7 +678,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -692,14 +688,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -710,9 +708,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -2291,7 +2291,22 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves an Amazon-issued certificate and its certificate chain. The chain consists of\n the certificate of the issuing CA and the intermediate certificates of any other subordinate\n CAs. All of the certificates are base64 encoded. You can use OpenSSL to decode\n the certificates and inspect individual fields.

" + "smithy.api#documentation": "

Retrieves an Amazon-issued certificate and its certificate chain. The chain consists of\n the certificate of the issuing CA and the intermediate certificates of any other subordinate\n CAs. All of the certificates are base64 encoded. You can use OpenSSL to decode\n the certificates and inspect individual fields.

", + "smithy.test#smokeTests": [ + { + "id": "GetCertificateFailure", + "params": { + "CertificateArn": "arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.acm#GetCertificateRequest": { @@ -2697,7 +2712,20 @@ "outputToken": "NextToken", "items": "CertificateSummaryList", "pageSize": "MaxItems" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListCertificatesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.acm#ListCertificatesRequest": { diff --git a/models/amplify.json b/models/amplify.json index a3f1beea1c..b3950ef5f8 100644 --- a/models/amplify.json +++ b/models/amplify.json @@ -1436,7 +1436,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the backend properties associated with an Amplify\n Branch.

" + "smithy.api#documentation": "

Describes the backend associated with an Amplify\n Branch.

\n

This property is available to Amplify Gen 2 apps only. When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

" } }, "com.amazonaws.amplify#BackendEnvironment": { @@ -1484,7 +1484,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the backend environment for an Amplify app.

" + "smithy.api#documentation": "

Describes the backend environment associated with a Branch of a Gen 1\n Amplify app. Amplify Gen 1 applications are created\n using Amplify Studio or the Amplify command line\n interface (CLI).

" } }, "com.amazonaws.amplify#BackendEnvironmentArn": { @@ -1706,7 +1706,7 @@ "backendEnvironmentArn": { "target": "com.amazonaws.amplify#BackendEnvironmentArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify\n app.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify app.

\n

This property is available to Amplify Gen 1 apps only. When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

" } }, "backend": { @@ -2082,7 +2082,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new backend environment for an Amplify app.

\n

This API is available only to Amplify Gen 1 applications where the\n backend is created using Amplify Studio or the Amplify\n command line interface (CLI). This API isn’t available to applications created using the\n Amplify Gen 2 public preview. When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

", + "smithy.api#documentation": "

Creates a new backend environment for an Amplify app.

\n

This API is available only to Amplify Gen 1 applications where the\n backend is created using Amplify Studio or the Amplify\n command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. \n When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

", "smithy.api#http": { "method": "POST", "uri": "/apps/{appId}/backendenvironments", @@ -2290,13 +2290,13 @@ "backendEnvironmentArn": { "target": "com.amazonaws.amplify#BackendEnvironmentArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify\n app.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for a backend environment that is part of a Gen 1 Amplify\n app.

\n

This field is available to Amplify Gen 1 apps only where the\n backend is created using Amplify Studio or the Amplify\n command line interface (CLI).

" } }, "backend": { "target": "com.amazonaws.amplify#Backend", "traits": { - "smithy.api#documentation": "

The backend for a Branch of an Amplify app. Use for a\n backend created from an CloudFormation stack.

" + "smithy.api#documentation": "

The backend for a Branch of an Amplify app. Use for a\n backend created from an CloudFormation stack.

\n

This field is available to Amplify Gen 2 apps only. When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

" } } }, @@ -2787,7 +2787,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a backend environment for an Amplify app.

\n

This API is available only to Amplify Gen 1 applications where the\n backend was created using Amplify Studio or the Amplify\n command line interface (CLI). This API isn’t available to applications created using the\n Amplify Gen 2 public preview. When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

", + "smithy.api#documentation": "

Deletes a backend environment for an Amplify app.

\n

This API is available only to Amplify Gen 1 applications where the\n backend is created using Amplify Studio or the Amplify\n command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. \n When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

", "smithy.api#http": { "method": "DELETE", "uri": "/apps/{appId}/backendenvironments/{environmentName}", @@ -3719,7 +3719,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a backend environment for an Amplify app.

\n

This API is available only to Amplify Gen 1 applications where the\n backend was created using Amplify Studio or the Amplify\n command line interface (CLI). This API isn’t available to applications created using the\n Amplify Gen 2 public preview. When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

", + "smithy.api#documentation": "

Returns a backend environment for an Amplify app.

\n

This API is available only to Amplify Gen 1 applications where the\n backend is created using Amplify Studio or the Amplify\n command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. \n When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

", "smithy.api#http": { "method": "GET", "uri": "/apps/{appId}/backendenvironments/{environmentName}", @@ -4497,7 +4497,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the backend environments for an Amplify app.

\n

This API is available only to Amplify Gen 1 applications where the\n backend was created using Amplify Studio or the Amplify\n command line interface (CLI). This API isn’t available to applications created using the\n Amplify Gen 2 public preview. When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

", + "smithy.api#documentation": "

Lists the backend environments for an Amplify app.

\n

This API is available only to Amplify Gen 1 applications where the\n backend is created using Amplify Studio or the Amplify\n command line interface (CLI). This API isn’t available to Amplify Gen 2 applications. \n When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

", "smithy.api#http": { "method": "GET", "uri": "/apps/{appId}/backendenvironments", @@ -6324,13 +6324,13 @@ "backendEnvironmentArn": { "target": "com.amazonaws.amplify#BackendEnvironmentArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for a backend environment that is part of an Amplify\n app.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for a backend environment that is part of a Gen 1 Amplify\n app.

\n

This field is available to Amplify Gen 1 apps only where the\n backend is created using Amplify Studio or the Amplify\n command line interface (CLI).

" } }, "backend": { "target": "com.amazonaws.amplify#Backend", "traits": { - "smithy.api#documentation": "

The backend for a Branch of an Amplify app. Use for a\n backend created from an CloudFormation stack.

" + "smithy.api#documentation": "

The backend for a Branch of an Amplify app. Use for a\n backend created from an CloudFormation stack.

\n

This field is available to Amplify Gen 2 apps only. When you deploy an application with\n Amplify Gen 2, you provision the app's backend infrastructure using Typescript\n code.

" } } }, diff --git a/models/application-auto-scaling.json b/models/application-auto-scaling.json index bcb5579c88..0d642e9a7d 100644 --- a/models/application-auto-scaling.json +++ b/models/application-auto-scaling.json @@ -1487,7 +1487,22 @@ "outputToken": "NextToken", "items": "ScalableTargets", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeScalableTargetsSuccess", + "params": { + "ServiceNamespace": "ec2" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.applicationautoscaling#DescribeScalableTargetsRequest": { diff --git a/models/application-discovery-service.json b/models/application-discovery-service.json index a47d155bc4..263d487b00 100644 --- a/models/application-discovery-service.json +++ b/models/application-discovery-service.json @@ -2373,7 +2373,20 @@ "outputToken": "nextToken", "items": "agentsInfo", "pageSize": "maxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeAgentsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.applicationdiscoveryservice#DescribeAgentsRequest": { diff --git a/models/application-signals.json b/models/application-signals.json new file mode 100644 index 0000000000..b292b17c31 --- /dev/null +++ b/models/application-signals.json @@ -0,0 +1,3192 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.applicationsignals#AccessDeniedException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.applicationsignals#ServiceErrorMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "AccessDenied", + "httpResponseCode": 403 + }, + "smithy.api#documentation": "

You don't have sufficient permissions to perform this action.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.applicationsignals#AccountId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.applicationsignals#AmazonResourceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.applicationsignals#ApplicationSignals": { + "type": "service", + "version": "2024-04-15", + "operations": [ + { + "target": "com.amazonaws.applicationsignals#BatchGetServiceLevelObjectiveBudgetReport" + }, + { + "target": "com.amazonaws.applicationsignals#GetService" + }, + { + "target": "com.amazonaws.applicationsignals#ListServiceDependencies" + }, + { + "target": "com.amazonaws.applicationsignals#ListServiceDependents" + }, + { + "target": "com.amazonaws.applicationsignals#ListServiceOperations" + }, + { + "target": "com.amazonaws.applicationsignals#ListServices" + }, + { + "target": "com.amazonaws.applicationsignals#ListTagsForResource" + }, + { + "target": "com.amazonaws.applicationsignals#StartDiscovery" + }, + { + "target": "com.amazonaws.applicationsignals#TagResource" + }, + { + "target": "com.amazonaws.applicationsignals#UntagResource" + } + ], + "resources": [ + { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveResource" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "Application Signals", + "arnNamespace": "application-signals", + "endpointPrefix": "application-signals" + }, + "aws.auth#sigv4": { + "name": "application-signals" + }, + "aws.endpoints#dualStackOnlyEndpoints": {}, + "aws.endpoints#standardRegionalEndpoints": {}, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "\n

This is a Preview release of the Application Signals API Reference. Operations and parameters are subject to change before the general availability\n release.

\n
\n

Use CloudWatch Application Signals for comprehensive observability of your cloud-based applications. \n It enables real-time service health dashboards and helps you track long-term performance trends against your business goals. \n The application-centric view provides you with unified visibility across your applications, services, and \n dependencies, so you can proactively monitor and efficiently triage any issues that may arise, \n ensuring optimal customer experience.

\n

Application Signals provides the following benefits:

\n
    \n
  • \n

    Automatically collect metrics and traces from your applications, and display key metrics such as call volume, availability, latency, faults, and errors.

    \n
  • \n
  • \n

    Create and monitor service level objectives (SLOs).

    \n
  • \n
  • \n

    See a map of your application topology that Application Signals automatically discovers, that gives you a visual representation of your applications, dependencies, and their connectivity.

    \n
  • \n
", + "smithy.api#title": "Amazon CloudWatch Application Signals", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://application-signals-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://application-signals.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://application-signals.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.applicationsignals#Attainment": { + "type": "double" + }, + "com.amazonaws.applicationsignals#AttainmentGoal": { + "type": "double" + }, + "com.amazonaws.applicationsignals#AttributeMap": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.applicationsignals#AttributeMaps": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#AttributeMap" + } + }, + "com.amazonaws.applicationsignals#Attributes": { + "type": "map", + "key": { + "target": "com.amazonaws.applicationsignals#KeyAttributeName" + }, + "value": { + "target": "com.amazonaws.applicationsignals#KeyAttributeValue" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 3 + } + } + }, + "com.amazonaws.applicationsignals#BatchGetServiceLevelObjectiveBudgetReport": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#BatchGetServiceLevelObjectiveBudgetReportInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#BatchGetServiceLevelObjectiveBudgetReportOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Use this operation to retrieve one or more service level objective (SLO) budget reports.

\n

An error budget is the amount of time in unhealthy periods that your service can \n accumulate during an interval before your overall SLO budget health is breached and the SLO is considered to be\n unmet. For example, an SLO with a threshold of 99.95% and a monthly interval\n translates to an error budget of 21.9 minutes of \n downtime in a 30-day month.

\n

Budget reports include a health indicator, the attainment value, and \n remaining budget.

\n

For more information about SLO error budgets, see \n \n SLO concepts.

", + "smithy.api#http": { + "method": "POST", + "uri": "/budget-report", + "code": 200 + } + } + }, + "com.amazonaws.applicationsignals#BatchGetServiceLevelObjectiveBudgetReportInput": { + "type": "structure", + "members": { + "Timestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that you want the report to be for. It is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.

", + "smithy.api#required": {} + } + }, + "SloIds": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveIds", + "traits": { + "smithy.api#documentation": "

An array containing the IDs of the service level objectives that you want to include in the report.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#BatchGetServiceLevelObjectiveBudgetReportOutput": { + "type": "structure", + "members": { + "Timestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that the report is for. It is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.

", + "smithy.api#required": {} + } + }, + "Reports": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReports", + "traits": { + "smithy.api#documentation": "

An array of structures, where each structure is one budget report.

", + "smithy.api#required": {} + } + }, + "Errors": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReportErrors", + "traits": { + "smithy.api#documentation": "

An array of structures, where each structure includes an error indicating that one \n of the requests in the array was not valid.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#BudgetSecondsRemaining": { + "type": "integer" + }, + "com.amazonaws.applicationsignals#CalendarInterval": { + "type": "structure", + "members": { + "StartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time when you want the first interval to start. Be sure to choose a time that configures the \n intervals the way that you want. For example, if you want weekly intervals\n starting on Mondays at 6 a.m., be sure to specify a start time that is a Monday at 6 a.m.

\n

When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

\n

As soon as one calendar interval ends, another automatically begins.

", + "smithy.api#required": {} + } + }, + "DurationUnit": { + "target": "com.amazonaws.applicationsignals#DurationUnit", + "traits": { + "smithy.api#documentation": "

Specifies the calendar interval unit.

", + "smithy.api#required": {} + } + }, + "Duration": { + "target": "com.amazonaws.applicationsignals#CalendarIntervalDuration", + "traits": { + "smithy.api#documentation": "

Specifies the duration of each calendar interval. For example, if Duration is 1 and\n DurationUnit is MONTH, each interval is one month, aligned with the calendar.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

If the interval for this service level objective is a calendar interval, this structure contains the interval specifications.

" + } + }, + "com.amazonaws.applicationsignals#CalendarIntervalDuration": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.applicationsignals#ConflictException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This operation attempted to create a resource that already exists.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.applicationsignals#CreateServiceLevelObjective": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#CreateServiceLevelObjectiveInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#CreateServiceLevelObjectiveOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#AccessDeniedException" + }, + { + "target": "com.amazonaws.applicationsignals#ConflictException" + }, + { + "target": "com.amazonaws.applicationsignals#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a service level objective (SLO), which can help you ensure that your critical business operations are \n meeting customer expectations. Use SLOs to set and track specific target levels for the\n reliability and availability of your applications and services. SLOs use service level indicators (SLIs) to \n calculate whether the application is performing at the level that you want.

\n

Create an SLO to set a target for a service or operation’s availability or latency. CloudWatch\n measures this target frequently you can find whether it has been breached.

\n

When you create an SLO, you set an attainment goal for it. An \n attainment goal is the \n ratio of good periods that meet the threshold requirements to the total periods within the interval. \n For example, an attainment goal of 99.9% means that within your interval, you are targeting 99.9% of the \n periods to be in healthy state.

\n

After you have created an SLO, you can retrieve error budget reports for it. \n An error budget is the number of periods or amount of time that your service can \n accumulate during an interval before your overall SLO budget health is breached and the SLO is considered to be\n unmet. for example, an SLO with a threshold that 99.95% of requests must be completed under 2000ms every month \n translates to an error budget of 21.9 minutes of downtime per month.

\n

When you call this operation, Application Signals creates the AWSServiceRoleForCloudWatchApplicationSignals service-linked role, \n if it doesn't already exist in your account. This service-\n linked role has the following permissions:

\n
    \n
  • \n

    \n xray:GetServiceGraph\n

    \n
  • \n
  • \n

    \n logs:StartQuery\n

    \n
  • \n
  • \n

    \n logs:GetQueryResults\n

    \n
  • \n
  • \n

    \n cloudwatch:GetMetricData\n

    \n
  • \n
  • \n

    \n cloudwatch:ListMetrics\n

    \n
  • \n
  • \n

    \n tag:GetResources\n

    \n
  • \n
  • \n

    \n autoscaling:DescribeAutoScalingGroups\n

    \n
  • \n
\n

You can easily set SLO targets for your applications that are discovered by Application Signals, using critical metrics such as latency and availability. \n You can also set SLOs against any CloudWatch metric or math expression that produces a time series.

\n

For more information about SLOs, see \n Service level objectives (SLOs).\n

", + "smithy.api#http": { + "method": "POST", + "uri": "/slo", + "code": 200 + } + } + }, + "com.amazonaws.applicationsignals#CreateServiceLevelObjectiveInput": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveName", + "traits": { + "smithy.api#documentation": "

A name for this SLO.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveDescription", + "traits": { + "smithy.api#documentation": "

An optional description for this SLO.

" + } + }, + "SliConfig": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicatorConfig", + "traits": { + "smithy.api#documentation": "

A structure that contains information about what service and what performance metric that this SLO will monitor.

", + "smithy.api#required": {} + } + }, + "Goal": { + "target": "com.amazonaws.applicationsignals#Goal", + "traits": { + "smithy.api#documentation": "

A structure that contains the attributes that determine the goal of the SLO. This includes\n the time period for evaluation and the attainment threshold.

" + } + }, + "Tags": { + "target": "com.amazonaws.applicationsignals#TagList", + "traits": { + "smithy.api#documentation": "

A list of key-value pairs to associate with the SLO. You can associate as many as 50 tags with an SLO.\n To be able to associate tags with the SLO when you create the SLO, you must\n have the cloudwatch:TagResource permission.

\n

Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user\n permission to access or change only resources with certain tag values.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#CreateServiceLevelObjectiveOutput": { + "type": "structure", + "members": { + "Slo": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjective", + "traits": { + "smithy.api#documentation": "

A structure that contains information about the SLO that you just created.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#DeleteServiceLevelObjective": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#DeleteServiceLevelObjectiveInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#DeleteServiceLevelObjectiveOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the specified service level objective.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/slo/{Id}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.applicationsignals#DeleteServiceLevelObjectiveInput": { + "type": "structure", + "members": { + "Id": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveId", + "traits": { + "smithy.api#documentation": "

The ARN or name of the service level objective to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#DeleteServiceLevelObjectiveOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#Dimension": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.applicationsignals#DimensionName", + "traits": { + "smithy.api#documentation": "

The name of the dimension. Dimension names must contain only ASCII characters, must include \n at least one non-whitespace character, and cannot start with a colon (:).\n ASCII\n control characters are not supported as part of dimension names.

", + "smithy.api#required": {} + } + }, + "Value": { + "target": "com.amazonaws.applicationsignals#DimensionValue", + "traits": { + "smithy.api#documentation": "

The value of the dimension. Dimension values must contain only ASCII characters and must include \n at least one non-whitespace character. ASCII\n control characters are not supported as part of dimension values.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A dimension is a name/value pair that is part of the identity of a metric. Because dimensions are part of the unique \n identifier for a metric, whenever you add a unique name/value pair to one of \n your metrics, you are creating a new variation of that metric. For example, many Amazon EC2 metrics publish\n InstanceId as a dimension name, and the actual instance ID as the value for that dimension.

\n

You \n can assign up to 30 dimensions to a metric.

" + } + }, + "com.amazonaws.applicationsignals#DimensionName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.applicationsignals#DimensionValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.applicationsignals#Dimensions": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#Dimension" + }, + "traits": { + "smithy.api#length": { + "max": 30 + } + } + }, + "com.amazonaws.applicationsignals#DurationUnit": { + "type": "enum", + "members": { + "DAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DAY" + } + }, + "MONTH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MONTH" + } + } + } + }, + "com.amazonaws.applicationsignals#FaultDescription": { + "type": "string" + }, + "com.amazonaws.applicationsignals#GetService": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#GetServiceInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#GetServiceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns information about a service discovered by Application Signals.

", + "smithy.api#http": { + "method": "POST", + "uri": "/service", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.applicationsignals#GetServiceInput": { + "type": "structure", + "members": { + "StartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#httpQuery": "StartTime", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#httpQuery": "EndTime", + "smithy.api#required": {} + } + }, + "KeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

Use this field to specify which service you want to retrieve information for. You must specify at least the Type, \n Name, and Environment attributes.

\n

This is a string-to-string map. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object this is.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#GetServiceLevelObjective": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#GetServiceLevelObjectiveInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#GetServiceLevelObjectiveOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns information about one SLO created in the account.

", + "smithy.api#http": { + "method": "GET", + "uri": "/slo/{Id}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.applicationsignals#GetServiceLevelObjectiveInput": { + "type": "structure", + "members": { + "Id": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveId", + "traits": { + "smithy.api#documentation": "

The ARN or name of the SLO that you want to retrieve information about. You can find the ARNs \n of SLOs by using the ListServiceLevelObjectives\n operation.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#GetServiceLevelObjectiveOutput": { + "type": "structure", + "members": { + "Slo": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjective", + "traits": { + "smithy.api#documentation": "

A structure containing the information about the SLO.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#GetServiceOutput": { + "type": "structure", + "members": { + "Service": { + "target": "com.amazonaws.applicationsignals#Service", + "traits": { + "smithy.api#documentation": "

A structure containing information about the service.

", + "smithy.api#required": {} + } + }, + "StartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start time of the data included in the response. In a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057.

", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end time of the data included in the response. In a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#Goal": { + "type": "structure", + "members": { + "Interval": { + "target": "com.amazonaws.applicationsignals#Interval", + "traits": { + "smithy.api#documentation": "

The time period used to evaluate the SLO. It can be either a calendar interval or rolling interval.

\n

If you omit this parameter, a rolling interval of 7 days is used.

" + } + }, + "AttainmentGoal": { + "target": "com.amazonaws.applicationsignals#AttainmentGoal", + "traits": { + "smithy.api#documentation": "

The threshold that determines if the goal is being met. An attainment goal is the \n ratio of good periods that meet the threshold requirements to the total periods within the interval. \n For example, an attainment goal of 99.9% means that within your interval, you are targeting 99.9% of the \n periods to be in healthy state.

\n

If you omit this parameter, 99 is used\n to represent 99% as the attainment goal.

" + } + }, + "WarningThreshold": { + "target": "com.amazonaws.applicationsignals#WarningThreshold", + "traits": { + "smithy.api#documentation": "

The percentage of remaining budget over total budget that you want to get warnings for. \n If you omit this parameter, the default of 50.0 is used.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure contains the attributes that determine the goal of an SLO. This includes\n the time period for evaluation and the attainment threshold.

" + } + }, + "com.amazonaws.applicationsignals#Interval": { + "type": "union", + "members": { + "RollingInterval": { + "target": "com.amazonaws.applicationsignals#RollingInterval", + "traits": { + "smithy.api#documentation": "

If the interval is a rolling interval, this structure contains the interval specifications.

" + } + }, + "CalendarInterval": { + "target": "com.amazonaws.applicationsignals#CalendarInterval", + "traits": { + "smithy.api#documentation": "

If the interval is a calendar interval, this structure contains the interval specifications.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The time period used to evaluate the SLO. It can be either a calendar interval or rolling interval.

" + } + }, + "com.amazonaws.applicationsignals#KeyAttributeName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z]{1,50}$" + } + }, + "com.amazonaws.applicationsignals#KeyAttributeValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^[ -~]*[!-~]+[ -~]*$" + } + }, + "com.amazonaws.applicationsignals#ListServiceDependencies": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#ListServiceDependenciesInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#ListServiceDependenciesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of service dependencies of the service that you specify. A dependency is an infrastructure \n component that an operation of this service connects with. Dependencies can include Amazon Web Services \n services, Amazon Web Services resources, and third-party services.\n

", + "smithy.api#http": { + "method": "POST", + "uri": "/service-dependencies", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "pageSize": "MaxResults", + "outputToken": "NextToken", + "items": "ServiceDependencies" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.applicationsignals#ListServiceDependenciesInput": { + "type": "structure", + "members": { + "StartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#httpQuery": "StartTime", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#httpQuery": "EndTime", + "smithy.api#required": {} + } + }, + "KeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

Use this field to specify which service you want to retrieve information for. You must specify at least the Type, \n Name, and Environment attributes.

\n

This is a string-to-string map. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object this is.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.applicationsignals#ListServiceDependenciesMaxResults", + "traits": { + "smithy.api#default": 20, + "smithy.api#documentation": "

The maximum number of results to return in one operation. If you omit this\n parameter, the default of 50 is used.

", + "smithy.api#httpQuery": "MaxResults" + } + }, + "NextToken": { + "target": "com.amazonaws.applicationsignals#NextToken", + "traits": { + "smithy.api#documentation": "

Include this value, if it was returned by the previous operation, to get the next set of service dependencies.

", + "smithy.api#httpQuery": "NextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#ListServiceDependenciesMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.applicationsignals#ListServiceDependenciesOutput": { + "type": "structure", + "members": { + "StartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#required": {} + } + }, + "ServiceDependencies": { + "target": "com.amazonaws.applicationsignals#ServiceDependencies", + "traits": { + "smithy.api#documentation": "

An array, where each object in the array contains information about one of the dependencies of this service.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.applicationsignals#NextToken", + "traits": { + "smithy.api#documentation": "

Include this value in your next use of this API to get next set \n of service dependencies.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#ListServiceDependents": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#ListServiceDependentsInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#ListServiceDependentsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the list of dependents that invoked the specified service during the provided time range. Dependents include \n other services, CloudWatch Synthetics canaries, and clients that are instrumented with CloudWatch RUM app monitors.

", + "smithy.api#http": { + "method": "POST", + "uri": "/service-dependents", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "pageSize": "MaxResults", + "outputToken": "NextToken", + "items": "ServiceDependents" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.applicationsignals#ListServiceDependentsInput": { + "type": "structure", + "members": { + "StartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#httpQuery": "StartTime", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#httpQuery": "EndTime", + "smithy.api#required": {} + } + }, + "KeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

Use this field to specify which service you want to retrieve information for. You must specify at least the Type, \n Name, and Environment attributes.

\n

This is a string-to-string map. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object this is.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.applicationsignals#ListServiceDependentsMaxResults", + "traits": { + "smithy.api#default": 20, + "smithy.api#documentation": "

The maximum number of results to return in one operation. If you omit this\n parameter, the default of 50 is used.

", + "smithy.api#httpQuery": "MaxResults" + } + }, + "NextToken": { + "target": "com.amazonaws.applicationsignals#NextToken", + "traits": { + "smithy.api#documentation": "

Include this value, if it was returned by the previous operation, to get the next set of service dependents.

", + "smithy.api#httpQuery": "NextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#ListServiceDependentsMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.applicationsignals#ListServiceDependentsOutput": { + "type": "structure", + "members": { + "StartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#required": {} + } + }, + "ServiceDependents": { + "target": "com.amazonaws.applicationsignals#ServiceDependents", + "traits": { + "smithy.api#documentation": "

An array, where each object in the array contains information about one of the dependents of this service.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.applicationsignals#NextToken", + "traits": { + "smithy.api#documentation": "

Include this value in your next use of this API to get next set \n of service dependents.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#ListServiceLevelObjectives": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#ListServiceLevelObjectivesInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#ListServiceLevelObjectivesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of SLOs created in this account.

", + "smithy.api#http": { + "method": "POST", + "uri": "/slos", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "pageSize": "MaxResults", + "outputToken": "NextToken", + "items": "SloSummaries" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.applicationsignals#ListServiceLevelObjectivesInput": { + "type": "structure", + "members": { + "KeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

You can use this optional field to specify which services you want to retrieve SLO information for.

\n

This is a string-to-string map. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object this is.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
" + } + }, + "OperationName": { + "target": "com.amazonaws.applicationsignals#OperationName", + "traits": { + "smithy.api#documentation": "

The name of the operation that this SLO is associated with.

", + "smithy.api#httpQuery": "OperationName" + } + }, + "MaxResults": { + "target": "com.amazonaws.applicationsignals#ListServiceLevelObjectivesMaxResults", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

The maximum number of results to return in one operation. If you omit this\n parameter, the default of 50 is used.

", + "smithy.api#httpQuery": "MaxResults" + } + }, + "NextToken": { + "target": "com.amazonaws.applicationsignals#NextToken", + "traits": { + "smithy.api#documentation": "

Include this value, if it was returned by the previous operation, to get the next set of service level objectives.

", + "smithy.api#httpQuery": "NextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#ListServiceLevelObjectivesMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.applicationsignals#ListServiceLevelObjectivesOutput": { + "type": "structure", + "members": { + "SloSummaries": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveSummaries", + "traits": { + "smithy.api#documentation": "

An array of structures, where each structure contains information about one SLO.

" + } + }, + "NextToken": { + "target": "com.amazonaws.applicationsignals#NextToken", + "traits": { + "smithy.api#documentation": "

Include this value in your next use of this API to get next set \n of service level objectives.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#ListServiceOperationMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.applicationsignals#ListServiceOperations": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#ListServiceOperationsInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#ListServiceOperationsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of the operations of this service that have been discovered by Application Signals.\n Only the operations that were invoked during the specified time range are returned.

", + "smithy.api#http": { + "method": "POST", + "uri": "/service-operations", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "pageSize": "MaxResults", + "outputToken": "NextToken", + "items": "ServiceOperations" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.applicationsignals#ListServiceOperationsInput": { + "type": "structure", + "members": { + "StartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#httpQuery": "StartTime", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#httpQuery": "EndTime", + "smithy.api#required": {} + } + }, + "KeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

Use this field to specify which service you want to retrieve information for. You must specify at least the Type, \n Name, and Environment attributes.

\n

This is a string-to-string map. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object this is.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.applicationsignals#ListServiceOperationMaxResults", + "traits": { + "smithy.api#default": 20, + "smithy.api#documentation": "

The maximum number of results to return in one operation. If you omit this\n parameter, the default of 50 is used.

", + "smithy.api#httpQuery": "MaxResults" + } + }, + "NextToken": { + "target": "com.amazonaws.applicationsignals#NextToken", + "traits": { + "smithy.api#documentation": "

Include this value, if it was returned by the previous operation, to get the next set of service operations.

", + "smithy.api#httpQuery": "NextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#ListServiceOperationsOutput": { + "type": "structure", + "members": { + "StartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#required": {} + } + }, + "ServiceOperations": { + "target": "com.amazonaws.applicationsignals#ServiceOperations", + "traits": { + "smithy.api#documentation": "

An array of structures that each contain information about one operation of this service.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.applicationsignals#NextToken", + "traits": { + "smithy.api#documentation": "

Include this value in your next use of this API to get next set \n of service operations.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#ListServices": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#ListServicesInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#ListServicesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of services that have been discovered by Application Signals. \n A service represents a minimum logical and transactional unit that completes a business function. Services\n are discovered through Application Signals instrumentation.

", + "smithy.api#http": { + "method": "GET", + "uri": "/services", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "pageSize": "MaxResults", + "outputToken": "NextToken", + "items": "ServiceSummaries" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.applicationsignals#ListServicesInput": { + "type": "structure", + "members": { + "StartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#httpQuery": "StartTime", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#httpQuery": "EndTime", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.applicationsignals#ListServicesMaxResults", + "traits": { + "smithy.api#default": 50, + "smithy.api#documentation": "

\n The maximum number \n of results \n to return \n in one operation. \n If you omit this parameter, \n the default of 50 is used.\n

", + "smithy.api#httpQuery": "MaxResults" + } + }, + "NextToken": { + "target": "com.amazonaws.applicationsignals#NextToken", + "traits": { + "smithy.api#documentation": "

Include this value, if it was returned by the previous operation, to get the next set of services.

", + "smithy.api#httpQuery": "NextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#ListServicesMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.applicationsignals#ListServicesOutput": { + "type": "structure", + "members": { + "StartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end of the time period that the returned information applies to. When used in a raw HTTP Query API, it is formatted as \n be epoch time in seconds. For example: 1698778057\n

", + "smithy.api#required": {} + } + }, + "ServiceSummaries": { + "target": "com.amazonaws.applicationsignals#ServiceSummaries", + "traits": { + "smithy.api#documentation": "

An array of structures, where each structure contains some information about a service. To\n get complete information about a service, use \n GetService.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.applicationsignals#NextToken", + "traits": { + "smithy.api#documentation": "

Include this value in your next use of this API to get next set \n of services.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.applicationsignals#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Displays the tags associated with a CloudWatch resource. Tags can be assigned to service level objectives.

", + "smithy.api#http": { + "method": "GET", + "uri": "/tags", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.applicationsignals#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.applicationsignals#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the CloudWatch resource that you want to view tags for.

\n

The ARN format of an Application Signals SLO is \n arn:aws:cloudwatch:Region:account-id:slo:slo-name\n \n

\n

For more information about ARN format, see Resource\n Types Defined by Amazon CloudWatch in the Amazon Web Services General\n Reference.

", + "smithy.api#httpQuery": "ResourceArn", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.applicationsignals#TagList", + "traits": { + "smithy.api#documentation": "

The list of tag keys and values associated with the resource you specified.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#Metric": { + "type": "structure", + "members": { + "Namespace": { + "target": "com.amazonaws.applicationsignals#Namespace", + "traits": { + "smithy.api#documentation": "

The namespace of the metric. For more information, see \n Namespaces.

" + } + }, + "MetricName": { + "target": "com.amazonaws.applicationsignals#MetricName", + "traits": { + "smithy.api#documentation": "

The name of the metric to use.

" + } + }, + "Dimensions": { + "target": "com.amazonaws.applicationsignals#Dimensions", + "traits": { + "smithy.api#documentation": "

An array of one or more dimensions to use to define the metric that you want to use. \n For more information, see \n Dimensions.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure defines the metric used for a service level indicator, including the metric name, namespace, and dimensions

" + } + }, + "com.amazonaws.applicationsignals#MetricDataQueries": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#MetricDataQuery" + } + }, + "com.amazonaws.applicationsignals#MetricDataQuery": { + "type": "structure", + "members": { + "Id": { + "target": "com.amazonaws.applicationsignals#MetricId", + "traits": { + "smithy.api#documentation": "

A short name used to tie this object to the results in the response. This Id must be unique \n within a MetricDataQueries array. If you are performing math expressions on this set of data, \n this name represents that data and can serve as a variable in the metric math expression. The valid characters \n are letters, numbers, and underscore. The first character must be a lowercase letter.

", + "smithy.api#required": {} + } + }, + "MetricStat": { + "target": "com.amazonaws.applicationsignals#MetricStat", + "traits": { + "smithy.api#documentation": "

A metric to be used directly for the SLO, or to be used in the math expression that will be used for the SLO.

\n

Within one MetricDataQuery object, you must specify either \n Expression or MetricStat but not both.

" + } + }, + "Expression": { + "target": "com.amazonaws.applicationsignals#MetricExpression", + "traits": { + "smithy.api#documentation": "

This field can contain a metric math expression to be performed on the other metrics that\n you are retrieving within this MetricDataQueries structure.

\n

A math expression\n can use the Id of the other metrics or queries to refer to those metrics, and can also use \n the Id of other \n expressions to use the result of those expressions. For more information about metric math expressions, see \n Metric Math Syntax and Functions in the\n Amazon CloudWatch User Guide.

\n

Within each MetricDataQuery object, you must specify either \n Expression or MetricStat but not both.

" + } + }, + "Label": { + "target": "com.amazonaws.applicationsignals#MetricLabel", + "traits": { + "smithy.api#documentation": "

A human-readable label for this metric or expression. This is especially useful \n if this is an expression, so that you know\n what the value represents. If the metric or expression is shown in a \n CloudWatch dashboard widget, the label is shown. If Label is omitted, CloudWatch \n generates a default.

\n

You can put dynamic expressions into a label, so that it is more descriptive. \n For more information, see Using Dynamic Labels.

" + } + }, + "ReturnData": { + "target": "com.amazonaws.applicationsignals#ReturnData", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

Use this only if you are using a metric math expression for the SLO. \n Specify true for ReturnData for only the one expression result to use as the alarm. For all \n other metrics and expressions in the same CreateServiceLevelObjective operation, specify ReturnData as false.

" + } + }, + "Period": { + "target": "com.amazonaws.applicationsignals#Period", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

The granularity, in seconds, of the returned data points for this metric. For metrics with regular resolution, a period can\n be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected\n at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics\n are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second.

\n

If the StartTime parameter specifies a time stamp that is greater than\n 3 hours ago, you must specify the period as follows or no data points in that time range is returned:

\n
    \n
  • \n

    Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 minute).

    \n
  • \n
  • \n

    Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).

    \n
  • \n
  • \n

    Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour).

    \n
  • \n
" + } + }, + "AccountId": { + "target": "com.amazonaws.applicationsignals#AccountId", + "traits": { + "smithy.api#documentation": "

The ID of the account where this metric is located. If you are performing this operatiion in a monitoring account, \n use this to specify which source account to retrieve this metric from.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Use this structure to define a metric or metric math expression that you want to use as for a service level objective.

\n

Each MetricDataQuery in the MetricDataQueries array specifies either a metric to retrieve, or a metric math expression \n to be performed on retrieved metrics. A single MetricDataQueries array can include as many as 20 MetricDataQuery structures in the array. \n The 20 structures can include as many as 10 structures that contain a MetricStat parameter to retrieve a metric, and as many as 10 structures that \n contain the Expression parameter to perform a math expression. Of those Expression structures, \n exactly one must have true as the value for ReturnData. The result of this expression used for the SLO.

\n

For more information about metric math expressions, see \n CloudWatchUse metric math.

\n

Within each MetricDataQuery object, you must specify either \n Expression or MetricStat but not both.

" + } + }, + "com.amazonaws.applicationsignals#MetricExpression": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, + "com.amazonaws.applicationsignals#MetricId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.applicationsignals#MetricLabel": { + "type": "string" + }, + "com.amazonaws.applicationsignals#MetricName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.applicationsignals#MetricReference": { + "type": "structure", + "members": { + "Namespace": { + "target": "com.amazonaws.applicationsignals#Namespace", + "traits": { + "smithy.api#documentation": "

The namespace of the metric. For more information, see \n CloudWatchNamespaces.

", + "smithy.api#required": {} + } + }, + "MetricType": { + "target": "com.amazonaws.applicationsignals#MetricType", + "traits": { + "smithy.api#documentation": "

Used to display the appropriate statistics in the CloudWatch console.

", + "smithy.api#required": {} + } + }, + "Dimensions": { + "target": "com.amazonaws.applicationsignals#Dimensions", + "traits": { + "smithy.api#documentation": "

An array of one or more dimensions that further define the metric. \n For more information, see \n CloudWatchDimensions.

" + } + }, + "MetricName": { + "target": "com.amazonaws.applicationsignals#MetricName", + "traits": { + "smithy.api#documentation": "

The name of the metric.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure contains information about one CloudWatch metric associated with this entity discovered\n by Application Signals.

" + } + }, + "com.amazonaws.applicationsignals#MetricReferences": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#MetricReference" + } + }, + "com.amazonaws.applicationsignals#MetricStat": { + "type": "structure", + "members": { + "Metric": { + "target": "com.amazonaws.applicationsignals#Metric", + "traits": { + "smithy.api#documentation": "

The metric to use as the service level indicator, including the metric name, namespace, and dimensions.

", + "smithy.api#required": {} + } + }, + "Period": { + "target": "com.amazonaws.applicationsignals#Period", + "traits": { + "smithy.api#documentation": "

The granularity, in seconds, to be used for the metric. For metrics with regular resolution, a period can\n be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected\n at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics\n are those metrics stored by a PutMetricData call that includes a StorageResolution of 1 second.

", + "smithy.api#required": {} + } + }, + "Stat": { + "target": "com.amazonaws.applicationsignals#Stat", + "traits": { + "smithy.api#documentation": "

The statistic to use for comparison to the threshold. It can be any CloudWatch statistic or extended statistic. For more information about statistics, \n see CloudWatch statistics definitions.

", + "smithy.api#required": {} + } + }, + "Unit": { + "target": "com.amazonaws.applicationsignals#StandardUnit", + "traits": { + "smithy.api#documentation": "

If you omit Unit then all data that was collected with any unit is returned, along with the corresponding units that were specified\n when the data was reported to CloudWatch. If you specify a unit, the operation returns only data that was collected with that unit specified.\n If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure defines the metric to be used as the service level indicator, along with the statistics, period, and unit.

" + } + }, + "com.amazonaws.applicationsignals#MetricType": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[A-Za-z0-9 -]+$" + } + }, + "com.amazonaws.applicationsignals#Namespace": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "[^:].*" + } + }, + "com.amazonaws.applicationsignals#NextToken": { + "type": "string" + }, + "com.amazonaws.applicationsignals#OperationName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.applicationsignals#Period": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.applicationsignals#ResourceId": { + "type": "string" + }, + "com.amazonaws.applicationsignals#ResourceNotFoundException": { + "type": "structure", + "members": { + "ResourceType": { + "target": "com.amazonaws.applicationsignals#ResourceType", + "traits": { + "smithy.api#documentation": "

The resource type is not valid.

", + "smithy.api#required": {} + } + }, + "ResourceId": { + "target": "com.amazonaws.applicationsignals#ResourceId", + "traits": { + "smithy.api#documentation": "

Cannot find the resource id.

", + "smithy.api#required": {} + } + }, + "Message": { + "target": "com.amazonaws.applicationsignals#FaultDescription", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Resource not found.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.applicationsignals#ResourceType": { + "type": "string" + }, + "com.amazonaws.applicationsignals#ReturnData": { + "type": "boolean" + }, + "com.amazonaws.applicationsignals#RollingInterval": { + "type": "structure", + "members": { + "DurationUnit": { + "target": "com.amazonaws.applicationsignals#DurationUnit", + "traits": { + "smithy.api#documentation": "

Specifies the rolling interval unit.

", + "smithy.api#required": {} + } + }, + "Duration": { + "target": "com.amazonaws.applicationsignals#RollingIntervalDuration", + "traits": { + "smithy.api#documentation": "

Specifies the duration of each rolling interval. For example, if Duration is 7 and\n DurationUnit is DAY, each rolling interval is seven days.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

If the interval for this SLO is a rolling interval, this structure contains the interval specifications.

" + } + }, + "com.amazonaws.applicationsignals#RollingIntervalDuration": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.applicationsignals#SLIPeriodSeconds": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 60, + "max": 900 + } + } + }, + "com.amazonaws.applicationsignals#Service": { + "type": "structure", + "members": { + "KeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

This is a string-to-string map. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object this is.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "AttributeMaps": { + "target": "com.amazonaws.applicationsignals#AttributeMaps", + "traits": { + "smithy.api#documentation": "

This structure contains one or more string-to-string maps that help identify this service. It can include platform attributes, application attributes, and telemetry attributes.

\n

Platform attributes contain information the service's platform.

\n
    \n
  • \n

    \n PlatformType defines the hosted-in platform.

    \n
  • \n
  • \n

    \n EKS.Cluster is the name of the Amazon EKS cluster.

    \n
  • \n
  • \n

    \n K8s.Cluster is the name of the self-hosted Kubernetes cluster.

    \n
  • \n
  • \n

    \n K8s.Namespace is the name of the Kubernetes namespace in either Amazon EKS or Kubernetes clusters.

    \n
  • \n
  • \n

    \n K8s.Workload is the name of the Kubernetes workload in either Amazon EKS or Kubernetes clusters.

    \n
  • \n
  • \n

    \n K8s.Node is the name of the Kubernetes node in either Amazon EKS or Kubernetes clusters.

    \n
  • \n
  • \n

    \n K8s.Pod is the name of the Kubernetes pod in either Amazon EKS or Kubernetes clusters.

    \n
  • \n
  • \n

    \n EC2.AutoScalingGroup is the name of the Amazon EC2 Auto Scaling group.

    \n
  • \n
  • \n

    \n EC2.InstanceId is the ID of the Amazon EC2 instance.

    \n
  • \n
  • \n

    \n Host is the name of the host, for all platform types.

    \n
  • \n
\n

Applciation attributes contain information about the application.

\n
    \n
  • \n

    \n AWS.Application is the application's name in Amazon Web Services Service Catalog AppRegistry.

    \n
  • \n
  • \n

    \n AWS.Application.ARN is the application's ARN in Amazon Web Services Service Catalog AppRegistry.

    \n
  • \n
\n

Telemetry attributes contain telemetry information.

\n
    \n
  • \n

    \n Telemetry.SDK is the fingerprint of the OpenTelemetry SDK version for instrumented services.

    \n
  • \n
  • \n

    \n Telemetry.Agent is the fingerprint of the agent used to collect and send telemetry data.

    \n
  • \n
  • \n

    \n Telemetry.Source Specifies the point of application where the telemetry was collected or specifies what was used for the source of telemetry data.

    \n
  • \n
" + } + }, + "MetricReferences": { + "target": "com.amazonaws.applicationsignals#MetricReferences", + "traits": { + "smithy.api#documentation": "

An array of structures that each contain information about one metric associated with this service.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure contains information about one of your services that was discovered by Application Signals.\n

" + } + }, + "com.amazonaws.applicationsignals#ServiceDependencies": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#ServiceDependency" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.applicationsignals#ServiceDependency": { + "type": "structure", + "members": { + "OperationName": { + "target": "com.amazonaws.applicationsignals#OperationName", + "traits": { + "smithy.api#documentation": "

The name of the operation in this service that calls the dependency.

", + "smithy.api#required": {} + } + }, + "DependencyKeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

This is a string-to-string map. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object this is.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "DependencyOperationName": { + "target": "com.amazonaws.applicationsignals#OperationName", + "traits": { + "smithy.api#documentation": "

The name of the called operation in the dependency.

", + "smithy.api#required": {} + } + }, + "MetricReferences": { + "target": "com.amazonaws.applicationsignals#MetricReferences", + "traits": { + "smithy.api#documentation": "

An array of structures that each contain information about one metric associated with this service dependency\n that was discovered by\n Application Signals.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure contains information about one dependency\n of this service.

" + } + }, + "com.amazonaws.applicationsignals#ServiceDependent": { + "type": "structure", + "members": { + "OperationName": { + "target": "com.amazonaws.applicationsignals#OperationName", + "traits": { + "smithy.api#documentation": "

If the invoked entity is an operation on an entity, the name of that dependent operation is displayed here.

" + } + }, + "DependentKeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

This is a string-to-string map. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object this is.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "DependentOperationName": { + "target": "com.amazonaws.applicationsignals#OperationName", + "traits": { + "smithy.api#documentation": "

If the dependent invoker was a service that invoked it from an operation, the name of that dependent operation\n is displayed here.

" + } + }, + "MetricReferences": { + "target": "com.amazonaws.applicationsignals#MetricReferences", + "traits": { + "smithy.api#documentation": "

An array of structures that each contain information about one metric associated with this service dependent\n that was discovered by\n Application Signals.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure contains information about a service dependent that was discovered by Application Signals. A \n dependent is an entity that invoked the specified service during the provided time range. Dependents include \n other services, CloudWatch Synthetics canaries, and clients that are instrumented with CloudWatch RUM app monitors.

" + } + }, + "com.amazonaws.applicationsignals#ServiceDependents": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#ServiceDependent" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.applicationsignals#ServiceErrorMessage": { + "type": "string" + }, + "com.amazonaws.applicationsignals#ServiceLevelIndicator": { + "type": "structure", + "members": { + "SliMetric": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicatorMetric", + "traits": { + "smithy.api#documentation": "

A structure that contains information about the metric that the SLO monitors.

", + "smithy.api#required": {} + } + }, + "MetricThreshold": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicatorMetricThreshold", + "traits": { + "smithy.api#documentation": "

The value that the SLI metric is compared to.

", + "smithy.api#required": {} + } + }, + "ComparisonOperator": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicatorComparisonOperator", + "traits": { + "smithy.api#documentation": "

The arithmetic operation used when comparing the specified metric to the\n threshold.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure contains information about the performance metric that an SLO monitors.

" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelIndicatorComparisonOperator": { + "type": "enum", + "members": { + "GREATER_THAN_OR_EQUAL_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GreaterThanOrEqualTo" + } + }, + "GREATER_THAN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GreaterThan" + } + }, + "LESS_THAN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LessThan" + } + }, + "LESS_THAN_OR_EQUAL_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LessThanOrEqualTo" + } + } + } + }, + "com.amazonaws.applicationsignals#ServiceLevelIndicatorConfig": { + "type": "structure", + "members": { + "SliMetricConfig": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicatorMetricConfig", + "traits": { + "smithy.api#documentation": "

Use this structure to specify the metric to be used for the SLO.

", + "smithy.api#required": {} + } + }, + "MetricThreshold": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicatorMetricThreshold", + "traits": { + "smithy.api#documentation": "

The value that the SLI metric is compared to.

", + "smithy.api#required": {} + } + }, + "ComparisonOperator": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicatorComparisonOperator", + "traits": { + "smithy.api#documentation": "

The arithmetic operation to use when comparing the specified metric to the\n threshold.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure specifies the information about the service and the performance metric that an SLO is to monitor.

" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelIndicatorMetric": { + "type": "structure", + "members": { + "KeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

This is a string-to-string map that contains information about the type of object that this SLO is related to. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object that this SLO is related to.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
" + } + }, + "OperationName": { + "target": "com.amazonaws.applicationsignals#OperationName", + "traits": { + "smithy.api#documentation": "

If the SLO monitors a specific operation of the service, this field displays that operation name.

" + } + }, + "MetricType": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicatorMetricType", + "traits": { + "smithy.api#documentation": "

If the SLO monitors either the LATENCY or AVAILABILITY metric that Application Signals \n collects, this field displays which of those metrics is used.

" + } + }, + "MetricDataQueries": { + "target": "com.amazonaws.applicationsignals#MetricDataQueries", + "traits": { + "smithy.api#documentation": "

If this SLO monitors a CloudWatch metric or the result of a CloudWatch metric math expression, \n this structure includes the information about that metric or expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure contains the information about the metric that is used for the SLO.

" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelIndicatorMetricConfig": { + "type": "structure", + "members": { + "KeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

If this SLO is related to a metric collected by Application Signals, you must use this field to specify which service \n the SLO metric is related to. To do so, you must specify at least the Type, \n Name, and Environment attributes.

\n

This is a string-to-string map. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object this is.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
" + } + }, + "OperationName": { + "target": "com.amazonaws.applicationsignals#OperationName", + "traits": { + "smithy.api#documentation": "

If the SLO is to monitor a specific operation of the service, use this field to specify the name of that operation.

" + } + }, + "MetricType": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicatorMetricType", + "traits": { + "smithy.api#documentation": "

If the SLO is to monitor either the LATENCY or AVAILABILITY metric that Application Signals \n collects, use this field to specify which of those metrics is used.

" + } + }, + "Statistic": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicatorStatistic", + "traits": { + "smithy.api#documentation": "

The statistic to use for comparison to the threshold. It can be any CloudWatch statistic or extended statistic. For more information about statistics, \n see CloudWatch statistics definitions.

" + } + }, + "PeriodSeconds": { + "target": "com.amazonaws.applicationsignals#SLIPeriodSeconds", + "traits": { + "smithy.api#documentation": "

The number of seconds to use as the period for SLO evaluation. Your application's performance is compared to the \n SLI during each period. For each period, the application is determined to have either achieved or not achieved the necessary performance.

" + } + }, + "MetricDataQueries": { + "target": "com.amazonaws.applicationsignals#MetricDataQueries", + "traits": { + "smithy.api#documentation": "

If this SLO monitors a CloudWatch metric or the result of a CloudWatch metric math expression, \n use this structure to specify that metric or expression.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Use this structure to specify the information for the metric that the SLO will monitor.

" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelIndicatorMetricThreshold": { + "type": "double" + }, + "com.amazonaws.applicationsignals#ServiceLevelIndicatorMetricType": { + "type": "enum", + "members": { + "LATENCY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LATENCY" + } + }, + "AVAILABILITY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AVAILABILITY" + } + } + } + }, + "com.amazonaws.applicationsignals#ServiceLevelIndicatorStatistic": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + }, + "smithy.api#pattern": "^[a-zA-Z0-9.]+$" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjective": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveArn", + "traits": { + "smithy.api#documentation": "

The ARN of this SLO.

", + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveName", + "traits": { + "smithy.api#documentation": "

The name of this SLO.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveDescription", + "traits": { + "smithy.api#documentation": "

The description that you created for this SLO.

" + } + }, + "CreatedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that this SLO was created. When used in a raw HTTP Query API, it is formatted as \n yyyy-MM-dd'T'HH:mm:ss. For example, \n 2019-07-01T23:59:59.

", + "smithy.api#required": {} + } + }, + "LastUpdatedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time that this SLO was most recently updated. When used in a raw HTTP Query API, it is formatted as \n yyyy-MM-dd'T'HH:mm:ss. For example, \n 2019-07-01T23:59:59.

", + "smithy.api#required": {} + } + }, + "Sli": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicator", + "traits": { + "smithy.api#documentation": "

A structure containing information about the performance metric that this SLO monitors.

", + "smithy.api#required": {} + } + }, + "Goal": { + "target": "com.amazonaws.applicationsignals#Goal", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing information about one service level objective (SLO) that has been created in Application Signals.\n Creating SLOs can help you ensure your services are \n performing to the level that you expect. SLOs help you set and track a specific target level for the\n reliability and availability of your applications and services. Each SLO uses a service level indicator (SLI), which is \n a key performance metric, to \n calculate how much underperformance can be tolerated before the goal that you set for the SLO is not achieved.

" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws:application-signals:[^:]*:[^:]*:slo/[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReport": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveArn", + "traits": { + "smithy.api#documentation": "

The ARN of the SLO that this report is for.

", + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveName", + "traits": { + "smithy.api#documentation": "

The name of the SLO that this report is for.

", + "smithy.api#required": {} + } + }, + "BudgetStatus": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetStatus", + "traits": { + "smithy.api#documentation": "

The status of this SLO, as it relates to the error budget for the entire time interval.

\n
    \n
  • \n

    \n OK means that the SLO had remaining budget above the warning threshold,\n as of the time that you specified in TimeStamp.

    \n
  • \n
  • \n

    \n WARNING means that the SLO's remaining budget was below the warning threshold,\n as of the time that you specified in TimeStamp.

    \n
  • \n
  • \n

    \n BREACHED means that the SLO's budget was exhausted,\n as of the time that you specified in TimeStamp.

    \n
  • \n
  • \n

    \n INSUFFICIENT_DATA means that the specifed start and end times were before the\n SLO was created, or that attainment data is missing.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "Attainment": { + "target": "com.amazonaws.applicationsignals#Attainment", + "traits": { + "smithy.api#documentation": "

A number between 0 and 100 that represents the percentage of time periods that the service has \n attained the SLO's attainment goal, as of the time of the request.

" + } + }, + "TotalBudgetSeconds": { + "target": "com.amazonaws.applicationsignals#TotalBudgetSeconds", + "traits": { + "smithy.api#documentation": "

The total number of seconds in the error budget for the interval.

" + } + }, + "BudgetSecondsRemaining": { + "target": "com.amazonaws.applicationsignals#BudgetSecondsRemaining", + "traits": { + "smithy.api#documentation": "

The budget amount remaining before the SLO status becomes BREACHING, at the time specified in\n the \n Timestemp parameter of the request. If this value is negative, then the SLO is already in BREACHING\n status.

" + } + }, + "Sli": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicator", + "traits": { + "smithy.api#documentation": "

A structure that contains information about the performance metric that this SLO monitors.

" + } + }, + "Goal": { + "target": "com.amazonaws.applicationsignals#Goal" + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing an SLO budget report that you have requested.

" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReportError": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveName", + "traits": { + "smithy.api#documentation": "

The name of the SLO that this error is related to.

", + "smithy.api#required": {} + } + }, + "Arn": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveArn", + "traits": { + "smithy.api#documentation": "

The ARN of the SLO that this error is related to.

", + "smithy.api#required": {} + } + }, + "ErrorCode": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReportErrorCode", + "traits": { + "smithy.api#documentation": "

The error code for this error.

", + "smithy.api#required": {} + } + }, + "ErrorMessage": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReportErrorMessage", + "traits": { + "smithy.api#documentation": "

The message for this error.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing information about one error that occurred during a \n BatchGetServiceLevelObjectiveBudgetReport\n operation.

" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReportErrorCode": { + "type": "string" + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReportErrorMessage": { + "type": "string" + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReportErrors": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReportError" + }, + "traits": { + "smithy.api#length": { + "max": 50 + } + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReports": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetReport" + }, + "traits": { + "smithy.api#length": { + "max": 50 + } + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveBudgetStatus": { + "type": "enum", + "members": { + "OK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OK" + } + }, + "WARNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WARNING" + } + }, + "BREACHED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BREACHED" + } + }, + "INSUFFICIENT_DATA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INSUFFICIENT_DATA" + } + } + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$|^arn:aws:application-signals:[^:]*:[^:]*:slo/[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveIds": { + "type": "list", + "member": { + "target": "smithy.api#String" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9A-Za-z][-._0-9A-Za-z ]{0,126}[0-9A-Za-z]$" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveResource": { + "type": "resource", + "identifiers": { + "Id": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveId" + } + }, + "create": { + "target": "com.amazonaws.applicationsignals#CreateServiceLevelObjective" + }, + "read": { + "target": "com.amazonaws.applicationsignals#GetServiceLevelObjective" + }, + "update": { + "target": "com.amazonaws.applicationsignals#UpdateServiceLevelObjective" + }, + "delete": { + "target": "com.amazonaws.applicationsignals#DeleteServiceLevelObjective" + }, + "list": { + "target": "com.amazonaws.applicationsignals#ListServiceLevelObjectives" + }, + "traits": { + "aws.api#arn": { + "template": "{Id}", + "absolute": false + } + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveSummary" + } + }, + "com.amazonaws.applicationsignals#ServiceLevelObjectiveSummary": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveArn", + "traits": { + "smithy.api#documentation": "

The ARN of this service level objective.

", + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveName", + "traits": { + "smithy.api#documentation": "

The name of the service level objective.

", + "smithy.api#required": {} + } + }, + "KeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

This is a string-to-string map. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object this service level objective is for.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
" + } + }, + "OperationName": { + "target": "com.amazonaws.applicationsignals#OperationName", + "traits": { + "smithy.api#documentation": "

If this service level objective is specific to a single operation, this \n field displays the name of that operation.

" + } + }, + "CreatedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that this service level objective was created. It is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure that contains information about one service level objective (SLO) created in Application Signals.

" + } + }, + "com.amazonaws.applicationsignals#ServiceOperation": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.applicationsignals#OperationName", + "traits": { + "smithy.api#documentation": "

The name of the operation, discovered by Application Signals.

", + "smithy.api#required": {} + } + }, + "MetricReferences": { + "target": "com.amazonaws.applicationsignals#MetricReferences", + "traits": { + "smithy.api#documentation": "

An array of structures that each contain information about one metric associated with this service operation\n that was discovered by\n Application Signals.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure contains information about an operation discovered by Application Signals. An operation\n is a specific function performed by a service that was discovered by Application Signals, and is often an API \n that is called by an upstream dependent.\n

" + } + }, + "com.amazonaws.applicationsignals#ServiceOperations": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#ServiceOperation" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.applicationsignals#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This request exceeds a service quota.

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.applicationsignals#ServiceSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#ServiceSummary" + } + }, + "com.amazonaws.applicationsignals#ServiceSummary": { + "type": "structure", + "members": { + "KeyAttributes": { + "target": "com.amazonaws.applicationsignals#Attributes", + "traits": { + "smithy.api#documentation": "

This is a string-to-string map that help identify the objects discovered by Application Signals. It can \n include the following fields.

\n
    \n
  • \n

    \n Type designates the type of object this is.

    \n
  • \n
  • \n

    \n ResourceType specifies the type of the resource. This field is used only\n when the value of the Type field is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Name specifies the name of the object. This is used only if the value of the Type field\n is Service, RemoteService, or AWS::Service.

    \n
  • \n
  • \n

    \n Identifier identifies the resource objects of this resource. \n This is used only if the value of the Type field\n is Resource or AWS::Resource.

    \n
  • \n
  • \n

    \n Environment specifies the location where this object is hosted, or what it belongs to.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "AttributeMaps": { + "target": "com.amazonaws.applicationsignals#AttributeMaps", + "traits": { + "smithy.api#documentation": "

This structure contains one or more string-to-string maps that help identify this service. It can include platform attributes, application attributes, and telemetry attributes.

\n

Platform attributes contain information the service's platform.

\n
    \n
  • \n

    \n PlatformType defines the hosted-in platform.

    \n
  • \n
  • \n

    \n EKS.Cluster is the name of the Amazon EKS cluster.

    \n
  • \n
  • \n

    \n K8s.Cluster is the name of the self-hosted Kubernetes cluster.

    \n
  • \n
  • \n

    \n K8s.Namespace is the name of the Kubernetes namespace in either Amazon EKS or Kubernetes clusters.

    \n
  • \n
  • \n

    \n K8s.Workload is the name of the Kubernetes workload in either Amazon EKS or Kubernetes clusters.

    \n
  • \n
  • \n

    \n K8s.Node is the name of the Kubernetes node in either Amazon EKS or Kubernetes clusters.

    \n
  • \n
  • \n

    \n K8s.Pod is the name of the Kubernetes pod in either Amazon EKS or Kubernetes clusters.

    \n
  • \n
  • \n

    \n EC2.AutoScalingGroup is the name of the Amazon EC2 Auto Scaling group.

    \n
  • \n
  • \n

    \n EC2.InstanceId is the ID of the Amazon EC2 instance.

    \n
  • \n
  • \n

    \n Host is the name of the host, for all platform types.

    \n
  • \n
\n

Applciation attributes contain information about the application.

\n
    \n
  • \n

    \n AWS.Application is the application's name in Amazon Web Services Service Catalog AppRegistry.

    \n
  • \n
  • \n

    \n AWS.Application.ARN is the application's ARN in Amazon Web Services Service Catalog AppRegistry.

    \n
  • \n
\n

Telemetry attributes contain telemetry information.

\n
    \n
  • \n

    \n Telemetry.SDK is the fingerprint of the OpenTelemetry SDK version for instrumented services.

    \n
  • \n
  • \n

    \n Telemetry.Agent is the fingerprint of the agent used to collect and send telemetry data.

    \n
  • \n
  • \n

    \n Telemetry.Source Specifies the point of application where the telemetry was collected or specifies what was used for the source of telemetry data.

    \n
  • \n
" + } + }, + "MetricReferences": { + "target": "com.amazonaws.applicationsignals#MetricReferences", + "traits": { + "smithy.api#documentation": "

An array of structures that each contain information about one metric associated with this service.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This structure contains information about one of your services that\n was discoverd by Application Signals

" + } + }, + "com.amazonaws.applicationsignals#StandardUnit": { + "type": "enum", + "members": { + "MICROSECONDS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Microseconds" + } + }, + "MILLISECONDS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Milliseconds" + } + }, + "SECONDS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Seconds" + } + }, + "BYTES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Bytes" + } + }, + "KILOBYTES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Kilobytes" + } + }, + "MEGABYTES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Megabytes" + } + }, + "GIGABYTES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Gigabytes" + } + }, + "TERABYTES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Terabytes" + } + }, + "BITS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Bits" + } + }, + "KILOBITS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Kilobits" + } + }, + "MEGABITS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Megabits" + } + }, + "GIGABITS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Gigabits" + } + }, + "TERABITS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Terabits" + } + }, + "PERCENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Percent" + } + }, + "COUNT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Count" + } + }, + "BYTES_SECOND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Bytes/Second" + } + }, + "KILOBYTES_SECOND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Kilobytes/Second" + } + }, + "MEGABYTES_SECOND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Megabytes/Second" + } + }, + "GIGABYTES_SECOND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Gigabytes/Second" + } + }, + "TERABYTES_SECOND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Terabytes/Second" + } + }, + "BITS_SECOND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Bits/Second" + } + }, + "KILOBITS_SECOND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Kilobits/Second" + } + }, + "MEGABITS_SECOND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Megabits/Second" + } + }, + "GIGABITS_SECOND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Gigabits/Second" + } + }, + "TERABITS_SECOND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Terabits/Second" + } + }, + "COUNT_SECOND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Count/Second" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "None" + } + } + } + }, + "com.amazonaws.applicationsignals#StartDiscovery": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#StartDiscoveryInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#StartDiscoveryOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#AccessDeniedException" + }, + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Enables this Amazon Web Services account to be able to use CloudWatch Application Signals \n by creating the AWSServiceRoleForCloudWatchApplicationSignals service-linked role. This service-\n linked role has the following permissions:

\n
    \n
  • \n

    \n xray:GetServiceGraph\n

    \n
  • \n
  • \n

    \n logs:StartQuery\n

    \n
  • \n
  • \n

    \n logs:GetQueryResults\n

    \n
  • \n
  • \n

    \n cloudwatch:GetMetricData\n

    \n
  • \n
  • \n

    \n cloudwatch:ListMetrics\n

    \n
  • \n
  • \n

    \n tag:GetResources\n

    \n
  • \n
  • \n

    \n autoscaling:DescribeAutoScalingGroups\n

    \n
  • \n
\n

After completing this step, you still need to instrument your Java and Python applications to send data \n to Application Signals. For more information, see \n \n Enabling Application Signals.

", + "smithy.api#http": { + "method": "POST", + "uri": "/start-discovery", + "code": 200 + } + } + }, + "com.amazonaws.applicationsignals#StartDiscoveryInput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#StartDiscoveryOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#Stat": { + "type": "string" + }, + "com.amazonaws.applicationsignals#Tag": { + "type": "structure", + "members": { + "Key": { + "target": "com.amazonaws.applicationsignals#TagKey", + "traits": { + "smithy.api#documentation": "

A string that you can use to assign a value. The combination of tag keys and values can help you organize and categorize your \n resources.

", + "smithy.api#required": {} + } + }, + "Value": { + "target": "com.amazonaws.applicationsignals#TagValue", + "traits": { + "smithy.api#documentation": "

The value for the specified tag key.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A key-value pair associated with a resource. Tags can help you organize and categorize your resources.

" + } + }, + "com.amazonaws.applicationsignals#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.applicationsignals#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.applicationsignals#TagList": { + "type": "list", + "member": { + "target": "com.amazonaws.applicationsignals#Tag" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.applicationsignals#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.applicationsignals#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.applicationsignals#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Assigns one or more tags (key-value pairs) to the specified CloudWatch resource, such as a service level objective.

\n

Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user\n permission to access or change only resources with certain tag values.

\n

Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

\n

You can use the TagResource action with an alarm that already has tags. If you specify a new tag key for the alarm, \n this tag is appended to the list of tags associated\n with the alarm. If you specify a tag key that is already associated with the alarm, the new tag value that you specify replaces\n the previous value for that tag.

\n

You can associate as many as 50 tags with a CloudWatch resource.

", + "smithy.api#http": { + "method": "POST", + "uri": "/tag-resource", + "code": 200 + } + } + }, + "com.amazonaws.applicationsignals#TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.applicationsignals#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the CloudWatch resource that you want to set tags for.

\n

The ARN format of an Application Signals SLO is \n arn:aws:cloudwatch:Region:account-id:slo:slo-name\n \n

\n

For more information about ARN format, see Resource\n Types Defined by Amazon CloudWatch in the Amazon Web Services General\n Reference.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.applicationsignals#TagList", + "traits": { + "smithy.api#documentation": "

The list of key-value pairs to associate with the alarm.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, + "com.amazonaws.applicationsignals#ThrottlingException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request was throttled because of quota limits.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, + "com.amazonaws.applicationsignals#TotalBudgetSeconds": { + "type": "integer" + }, + "com.amazonaws.applicationsignals#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.applicationsignals#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Removes one or more tags from the specified resource.

", + "smithy.api#http": { + "method": "POST", + "uri": "/untag-resource", + "code": 200 + } + } + }, + "com.amazonaws.applicationsignals#UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.applicationsignals#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the CloudWatch resource that you want to delete tags from.

\n

The ARN format of an Application Signals SLO is \n arn:aws:cloudwatch:Region:account-id:slo:slo-name\n \n

\n

For more information about ARN format, see Resource\n Types Defined by Amazon CloudWatch in the Amazon Web Services General\n Reference.

", + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.applicationsignals#TagKeyList", + "traits": { + "smithy.api#documentation": "

The list of tag keys to remove from the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#UpdateServiceLevelObjective": { + "type": "operation", + "input": { + "target": "com.amazonaws.applicationsignals#UpdateServiceLevelObjectiveInput" + }, + "output": { + "target": "com.amazonaws.applicationsignals#UpdateServiceLevelObjectiveOutput" + }, + "errors": [ + { + "target": "com.amazonaws.applicationsignals#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.applicationsignals#ThrottlingException" + }, + { + "target": "com.amazonaws.applicationsignals#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates an existing service level objective (SLO). If you omit parameters, the previous values\n of those parameters are retained.

", + "smithy.api#http": { + "method": "PATCH", + "uri": "/slo/{Id}", + "code": 200 + } + } + }, + "com.amazonaws.applicationsignals#UpdateServiceLevelObjectiveInput": { + "type": "structure", + "members": { + "Id": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveId", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) or name of the service level objective that you want to update.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjectiveDescription", + "traits": { + "smithy.api#documentation": "

An optional description for the SLO.

" + } + }, + "SliConfig": { + "target": "com.amazonaws.applicationsignals#ServiceLevelIndicatorConfig", + "traits": { + "smithy.api#documentation": "

A structure that contains information about what performance metric this SLO will monitor.

" + } + }, + "Goal": { + "target": "com.amazonaws.applicationsignals#Goal", + "traits": { + "smithy.api#documentation": "

A structure that contains the attributes that determine the goal of the SLO. This includes\n the time period for evaluation and the attainment threshold.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.applicationsignals#UpdateServiceLevelObjectiveOutput": { + "type": "structure", + "members": { + "Slo": { + "target": "com.amazonaws.applicationsignals#ServiceLevelObjective", + "traits": { + "smithy.api#documentation": "

A structure that contains information about the SLO that you just updated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.applicationsignals#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.applicationsignals#ValidationExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ValidationError", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

The resource is not valid.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.applicationsignals#ValidationExceptionMessage": { + "type": "string" + }, + "com.amazonaws.applicationsignals#WarningThreshold": { + "type": "double" + } + } +} \ No newline at end of file diff --git a/models/apptest.json b/models/apptest.json new file mode 100644 index 0000000000..a80d7cc7c5 --- /dev/null +++ b/models/apptest.json @@ -0,0 +1,6288 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.apptest#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The account or role doesn't have the right permissions to make the request.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.apptest#Arn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+=,@.-]{0,1023}$" + } + }, + "com.amazonaws.apptest#AwsApptestControlPlaneService": { + "type": "service", + "version": "2022-12-06", + "operations": [ + { + "target": "com.amazonaws.apptest#ListTagsForResource" + }, + { + "target": "com.amazonaws.apptest#TagResource" + }, + { + "target": "com.amazonaws.apptest#UntagResource" + } + ], + "resources": [ + { + "target": "com.amazonaws.apptest#TestCase" + }, + { + "target": "com.amazonaws.apptest#TestConfiguration" + }, + { + "target": "com.amazonaws.apptest#TestRun" + }, + { + "target": "com.amazonaws.apptest#TestSuite" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "AppTest", + "cloudFormationName": "AppTest", + "cloudTrailEventSource": "apptest.amazonaws.com", + "arnNamespace": "apptest", + "endpointPrefix": "apptest" + }, + "aws.auth#sigv4": { + "name": "apptest" + }, + "aws.iam#defineConditionKeys": { + "aws:ResourceTag/${TagKey}": { + "type": "String", + "documentation": "Filters access by a tag key and value pair of a resource", + "externalDocumentation": "${DocHomeURL}IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-resourcetag" + }, + "aws:RequestTag/${TagKey}": { + "type": "String", + "documentation": "Filters access by a tag key and value pair that is allowed in the request", + "externalDocumentation": "${DocHomeURL}IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requesttag" + }, + "aws:TagKeys": { + "type": "ArrayOfString", + "documentation": "Filters access by a list of tag keys that are allowed in the request", + "externalDocumentation": "${DocHomeURL}IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-tagkeys" + } + }, + "aws.iam#supportedPrincipalTypes": [ + "Root", + "IAMUser", + "IAMRole", + "FederatedUser" + ], + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": [ + "*,authorization,date,x-amz-date,x-amz-security-token,x-amz-target,content-type,x-amz-content-sha256,x-amz-user-agent,x-amzn-platform-id,x-amzn-trace-id,amz-sdk-invocation-id,amz-sdk-request" + ] + }, + "smithy.api#documentation": "

AWS Mainframe Modernization Application Testing provides tools and resources for automated functional equivalence testing for your migration projects.

", + "smithy.api#title": "AWS Mainframe Modernization Application Testing", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apptest-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apptest-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apptest.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://apptest.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://apptest-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apptest-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://apptest.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apptest.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://apptest-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apptest-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://apptest.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apptest.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://apptest-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apptest-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://apptest.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apptest.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apptest-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apptest.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apptest-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://apptest.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.apptest#Batch": { + "type": "structure", + "members": { + "batchJobName": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The job name of the batch.

", + "smithy.api#required": {} + } + }, + "batchJobParameters": { + "target": "com.amazonaws.apptest#BatchJobParameters", + "traits": { + "smithy.api#documentation": "

The batch job parameters of the batch.

" + } + }, + "exportDataSetNames": { + "target": "com.amazonaws.apptest#ExportDataSetNames", + "traits": { + "smithy.api#documentation": "

The export data set names of the batch.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines a batch.

" + } + }, + "com.amazonaws.apptest#BatchJobParameters": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.apptest#BatchStepInput": { + "type": "structure", + "members": { + "resource": { + "target": "com.amazonaws.apptest#MainframeResourceSummary", + "traits": { + "smithy.api#documentation": "

The resource of the batch step input.

", + "smithy.api#required": {} + } + }, + "batchJobName": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The batch job name of the batch step input.

", + "smithy.api#required": {} + } + }, + "batchJobParameters": { + "target": "com.amazonaws.apptest#BatchJobParameters", + "traits": { + "smithy.api#documentation": "

The batch job parameters of the batch step input.

" + } + }, + "exportDataSetNames": { + "target": "com.amazonaws.apptest#ExportDataSetNames", + "traits": { + "smithy.api#documentation": "

The export data set names of the batch step input.

" + } + }, + "properties": { + "target": "com.amazonaws.apptest#MainframeActionProperties", + "traits": { + "smithy.api#documentation": "

The properties of the batch step input.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines a batch step input.

" + } + }, + "com.amazonaws.apptest#BatchStepOutput": { + "type": "structure", + "members": { + "dataSetExportLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The data set export location of the batch step output.

" + } + }, + "dmsOutputLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The Database Migration Service (DMS) output location of the batch step output.

" + } + }, + "dataSetDetails": { + "target": "com.amazonaws.apptest#DataSetList", + "traits": { + "smithy.api#documentation": "

The data set details of the batch step output.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines a batch step output.

" + } + }, + "com.amazonaws.apptest#BatchSummary": { + "type": "structure", + "members": { + "stepInput": { + "target": "com.amazonaws.apptest#BatchStepInput", + "traits": { + "smithy.api#documentation": "

The step input of the batch summary.

", + "smithy.api#required": {} + } + }, + "stepOutput": { + "target": "com.amazonaws.apptest#BatchStepOutput", + "traits": { + "smithy.api#documentation": "

The step output of the batch summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summarizes a batch job.

" + } + }, + "com.amazonaws.apptest#CaptureTool": { + "type": "enum", + "members": { + "PRECISELY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Precisely" + } + }, + "AWS_DMS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS DMS" + } + } + } + }, + "com.amazonaws.apptest#CloudFormation": { + "type": "structure", + "members": { + "templateLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The template location of the CloudFormation template.

", + "smithy.api#required": {} + } + }, + "parameters": { + "target": "com.amazonaws.apptest#Properties", + "traits": { + "smithy.api#documentation": "

The CloudFormation properties in the CloudFormation template.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the CloudFormation template and its parameters.

" + } + }, + "com.amazonaws.apptest#CloudFormationAction": { + "type": "structure", + "members": { + "resource": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The resource of the CloudFormation action.

", + "smithy.api#required": {} + } + }, + "actionType": { + "target": "com.amazonaws.apptest#CloudFormationActionType", + "traits": { + "smithy.api#documentation": "

The action type of the CloudFormation action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the CloudFormation action.

" + } + }, + "com.amazonaws.apptest#CloudFormationActionType": { + "type": "enum", + "members": { + "CREATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Create" + } + }, + "DELETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Delete" + } + } + } + }, + "com.amazonaws.apptest#CloudFormationStepSummary": { + "type": "union", + "members": { + "createCloudformation": { + "target": "com.amazonaws.apptest#CreateCloudFormationSummary", + "traits": { + "smithy.api#documentation": "

Creates the CloudFormation summary of the step.

" + } + }, + "deleteCloudformation": { + "target": "com.amazonaws.apptest#DeleteCloudFormationSummary", + "traits": { + "smithy.api#documentation": "

Deletes the CloudFormation summary of the CloudFormation step summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the CloudFormation step summary.

" + } + }, + "com.amazonaws.apptest#CompareAction": { + "type": "structure", + "members": { + "input": { + "target": "com.amazonaws.apptest#Input", + "traits": { + "smithy.api#documentation": "

The input of the compare action.

", + "smithy.api#required": {} + } + }, + "output": { + "target": "com.amazonaws.apptest#Output", + "traits": { + "smithy.api#documentation": "

The output of the compare action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Compares the action.

" + } + }, + "com.amazonaws.apptest#CompareActionSummary": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.apptest#File", + "traits": { + "smithy.api#documentation": "

The type of the compare action summary.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the compare action summary.

" + } + }, + "com.amazonaws.apptest#CompareDataSetsStepInput": { + "type": "structure", + "members": { + "sourceLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The source location of the compare data sets step input location.

", + "smithy.api#required": {} + } + }, + "targetLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The target location of the compare data sets step input location.

", + "smithy.api#required": {} + } + }, + "sourceDataSets": { + "target": "com.amazonaws.apptest#DataSetList", + "traits": { + "smithy.api#documentation": "

The source data sets of the compare data sets step input location.

", + "smithy.api#required": {} + } + }, + "targetDataSets": { + "target": "com.amazonaws.apptest#DataSetList", + "traits": { + "smithy.api#documentation": "

The target data sets of the compare data sets step input location.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the compare data sets step input.

" + } + }, + "com.amazonaws.apptest#CompareDataSetsStepOutput": { + "type": "structure", + "members": { + "comparisonOutputLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The comparison output location of the compare data sets step output.

", + "smithy.api#required": {} + } + }, + "comparisonStatus": { + "target": "com.amazonaws.apptest#ComparisonStatusEnum", + "traits": { + "smithy.api#documentation": "

The comparison status of the compare data sets step output.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the compare data sets step output.

" + } + }, + "com.amazonaws.apptest#CompareDataSetsSummary": { + "type": "structure", + "members": { + "stepInput": { + "target": "com.amazonaws.apptest#CompareDataSetsStepInput", + "traits": { + "smithy.api#documentation": "

The step input of the compare data sets summary.

", + "smithy.api#required": {} + } + }, + "stepOutput": { + "target": "com.amazonaws.apptest#CompareDataSetsStepOutput", + "traits": { + "smithy.api#documentation": "

The step output of the compare data sets summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Compares data sets summary.

" + } + }, + "com.amazonaws.apptest#CompareDatabaseCDCStepInput": { + "type": "structure", + "members": { + "sourceLocation": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The source location of the compare database CDC step input.

", + "smithy.api#required": {} + } + }, + "targetLocation": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The target location of the compare database CDC step input.

", + "smithy.api#required": {} + } + }, + "outputLocation": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The output location of the compare database CDC step input.

" + } + }, + "sourceMetadata": { + "target": "com.amazonaws.apptest#SourceDatabaseMetadata", + "traits": { + "smithy.api#documentation": "

The source metadata of the compare database CDC step input.

", + "smithy.api#required": {} + } + }, + "targetMetadata": { + "target": "com.amazonaws.apptest#TargetDatabaseMetadata", + "traits": { + "smithy.api#documentation": "

The target metadata location of the compare database CDC step input.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Compares the database Change Data Capture (CDC) step input.

" + } + }, + "com.amazonaws.apptest#CompareDatabaseCDCStepOutput": { + "type": "structure", + "members": { + "comparisonOutputLocation": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The comparison output of the compare database CDC step output.

", + "smithy.api#required": {} + } + }, + "comparisonStatus": { + "target": "com.amazonaws.apptest#ComparisonStatusEnum", + "traits": { + "smithy.api#documentation": "

The comparison status of the compare database CDC step output.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Compares the database CDC step output.

" + } + }, + "com.amazonaws.apptest#CompareDatabaseCDCSummary": { + "type": "structure", + "members": { + "stepInput": { + "target": "com.amazonaws.apptest#CompareDatabaseCDCStepInput", + "traits": { + "smithy.api#documentation": "

The step input of the compare database CDC summary.

", + "smithy.api#required": {} + } + }, + "stepOutput": { + "target": "com.amazonaws.apptest#CompareDatabaseCDCStepOutput", + "traits": { + "smithy.api#documentation": "

The step output of the compare database CDC summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Compares the database CDC summary.

" + } + }, + "com.amazonaws.apptest#CompareFileType": { + "type": "union", + "members": { + "datasets": { + "target": "com.amazonaws.apptest#CompareDataSetsSummary", + "traits": { + "smithy.api#documentation": "

The data sets in the compare file type.

" + } + }, + "databaseCDC": { + "target": "com.amazonaws.apptest#CompareDatabaseCDCSummary", + "traits": { + "smithy.api#documentation": "

The database CDC of the compare file type.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Compares the file type.

" + } + }, + "com.amazonaws.apptest#ComparisonStatusEnum": { + "type": "enum", + "members": { + "DIFFERENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Different" + } + }, + "EQUIVALENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Equivalent" + } + }, + "EQUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Equal" + } + } + } + }, + "com.amazonaws.apptest#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The resource ID of the conflicts with existing resources.

" + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The resource type of the conflicts with existing resources.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The parameters provided in the request conflict with existing resources.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.apptest#CreateCloudFormationStepInput": { + "type": "structure", + "members": { + "templateLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The template location of the CloudFormation step input.

", + "smithy.api#required": {} + } + }, + "parameters": { + "target": "com.amazonaws.apptest#Properties", + "traits": { + "smithy.api#documentation": "

The CloudFormation properties of the CloudFormation step input.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Creates the CloudFormation step input.

" + } + }, + "com.amazonaws.apptest#CreateCloudFormationStepOutput": { + "type": "structure", + "members": { + "stackId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The stack ID of the CloudFormation step output.

", + "smithy.api#required": {} + } + }, + "exports": { + "target": "com.amazonaws.apptest#Properties", + "traits": { + "smithy.api#documentation": "

The exports of the CloudFormation step output.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Creates a CloudFormation step output.

" + } + }, + "com.amazonaws.apptest#CreateCloudFormationSummary": { + "type": "structure", + "members": { + "stepInput": { + "target": "com.amazonaws.apptest#CreateCloudFormationStepInput", + "traits": { + "smithy.api#documentation": "

The step input of the CloudFormation summary.

", + "smithy.api#required": {} + } + }, + "stepOutput": { + "target": "com.amazonaws.apptest#CreateCloudFormationStepOutput", + "traits": { + "smithy.api#documentation": "

The step output of the CloudFormation summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Creates a CloudFormation summary.

" + } + }, + "com.amazonaws.apptest#CreateTestCase": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#CreateTestCaseRequest" + }, + "output": { + "target": "com.amazonaws.apptest#CreateTestCaseResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#ConflictException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#conditionKeys": [ + "aws:RequestTag/${TagKey}", + "aws:TagKeys" + ], + "aws.iam#iamAction": { + "name": "CreateTestCase", + "documentation": "Grants permission to create a test case", + "createsResources": [ + "TestCase" + ] + }, + "smithy.api#documentation": "

Creates a test case.

", + "smithy.api#http": { + "method": "POST", + "uri": "/testcase", + "code": 201 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.apptest#CreateTestCaseRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the test case.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.apptest#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the test case.

" + } + }, + "steps": { + "target": "com.amazonaws.apptest#StepList", + "traits": { + "smithy.api#documentation": "

The steps in the test case.

", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.apptest#IdempotencyTokenString", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The client token of the test case.

", + "smithy.api#idempotencyToken": {} + } + }, + "tags": { + "target": "com.amazonaws.apptest#TagMap", + "traits": { + "smithy.api#documentation": "

The specified tags of the test case.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#CreateTestCaseResponse": { + "type": "structure", + "members": { + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test case ID of the test case.

", + "smithy.api#required": {} + } + }, + "testCaseVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test case version of the test case.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#CreateTestConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#CreateTestConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.apptest#CreateTestConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#ConflictException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#conditionKeys": [ + "aws:RequestTag/${TagKey}", + "aws:TagKeys" + ], + "aws.iam#iamAction": { + "name": "CreateTestConfiguration", + "documentation": "Grants permission to create a test configuration", + "createsResources": [ + "TestConfiguration" + ] + }, + "smithy.api#documentation": "

Creates a test configuration.

", + "smithy.api#http": { + "method": "POST", + "uri": "/testconfiguration", + "code": 201 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.apptest#CreateTestConfigurationRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the test configuration.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.apptest#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the test configuration.

" + } + }, + "resources": { + "target": "com.amazonaws.apptest#ResourceList", + "traits": { + "smithy.api#documentation": "

The defined resources of the test configuration.

", + "smithy.api#required": {} + } + }, + "properties": { + "target": "com.amazonaws.apptest#Properties", + "traits": { + "smithy.api#documentation": "

The properties of the test configuration.

" + } + }, + "clientToken": { + "target": "com.amazonaws.apptest#IdempotencyTokenString", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The client token of the test configuration.

", + "smithy.api#idempotencyToken": {} + } + }, + "tags": { + "target": "com.amazonaws.apptest#TagMap", + "traits": { + "smithy.api#documentation": "

The tags of the test configuration.

" + } + }, + "serviceSettings": { + "target": "com.amazonaws.apptest#ServiceSettings", + "traits": { + "smithy.api#documentation": "

The service settings of the test configuration.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#CreateTestConfigurationResponse": { + "type": "structure", + "members": { + "testConfigurationId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test configuration ID.

", + "smithy.api#required": {} + } + }, + "testConfigurationVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test configuration version.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#CreateTestSuite": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#CreateTestSuiteRequest" + }, + "output": { + "target": "com.amazonaws.apptest#CreateTestSuiteResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#ConflictException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#conditionKeys": [ + "aws:RequestTag/${TagKey}", + "aws:TagKeys" + ], + "aws.iam#iamAction": { + "name": "CreateTestSuite", + "documentation": "Grants permission to create a test suite", + "createsResources": [ + "TestSuite" + ] + }, + "smithy.api#documentation": "

Creates a test suite.

", + "smithy.api#http": { + "method": "POST", + "uri": "/testsuite", + "code": 201 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.apptest#CreateTestSuiteRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the test suite.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.apptest#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the test suite.

" + } + }, + "beforeSteps": { + "target": "com.amazonaws.apptest#StepList", + "traits": { + "smithy.api#documentation": "

The before steps of the test suite.

" + } + }, + "afterSteps": { + "target": "com.amazonaws.apptest#StepList", + "traits": { + "smithy.api#documentation": "

The after steps of the test suite.

" + } + }, + "testCases": { + "target": "com.amazonaws.apptest#TestCases", + "traits": { + "smithy.api#documentation": "

The test cases in the test suite.

", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.apptest#IdempotencyTokenString", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The client token of the test suite.

", + "smithy.api#idempotencyToken": {} + } + }, + "tags": { + "target": "com.amazonaws.apptest#TagMap", + "traits": { + "smithy.api#documentation": "

The tags of the test suite.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#CreateTestSuiteResponse": { + "type": "structure", + "members": { + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The suite ID of the test suite.

", + "smithy.api#required": {} + } + }, + "testSuiteVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The suite version of the test suite.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#DataSet": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.apptest#DataSetType", + "traits": { + "smithy.api#documentation": "

The type of the data set.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.apptest#String100", + "traits": { + "smithy.api#documentation": "

The name of the data set.

", + "smithy.api#required": {} + } + }, + "ccsid": { + "target": "com.amazonaws.apptest#String50", + "traits": { + "smithy.api#documentation": "

The CCSID of the data set.

", + "smithy.api#required": {} + } + }, + "format": { + "target": "com.amazonaws.apptest#Format", + "traits": { + "smithy.api#documentation": "

The format of the data set.

", + "smithy.api#required": {} + } + }, + "length": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The length of the data set.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines a data set.

" + } + }, + "com.amazonaws.apptest#DataSetList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#DataSet" + } + }, + "com.amazonaws.apptest#DataSetType": { + "type": "enum", + "members": { + "PS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PS" + } + } + } + }, + "com.amazonaws.apptest#DatabaseCDC": { + "type": "structure", + "members": { + "sourceMetadata": { + "target": "com.amazonaws.apptest#SourceDatabaseMetadata", + "traits": { + "smithy.api#documentation": "

The source metadata of the database CDC.

", + "smithy.api#required": {} + } + }, + "targetMetadata": { + "target": "com.amazonaws.apptest#TargetDatabaseMetadata", + "traits": { + "smithy.api#documentation": "

The target metadata of the database CDC.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines the Change Data Capture (CDC) of the database.

" + } + }, + "com.amazonaws.apptest#DeleteCloudFormationStepInput": { + "type": "structure", + "members": { + "stackId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The stack ID of the deleted CloudFormation step input.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Deletes the CloudFormation step input.

" + } + }, + "com.amazonaws.apptest#DeleteCloudFormationStepOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

Deletes the CloudFormation summary step output.

" + } + }, + "com.amazonaws.apptest#DeleteCloudFormationSummary": { + "type": "structure", + "members": { + "stepInput": { + "target": "com.amazonaws.apptest#DeleteCloudFormationStepInput", + "traits": { + "smithy.api#documentation": "

The step input of the deleted CloudFormation summary.

", + "smithy.api#required": {} + } + }, + "stepOutput": { + "target": "com.amazonaws.apptest#DeleteCloudFormationStepOutput", + "traits": { + "smithy.api#documentation": "

The step output of the deleted CloudFormation summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Deletes the CloudFormation summary.

" + } + }, + "com.amazonaws.apptest#DeleteTestCase": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#DeleteTestCaseRequest" + }, + "output": { + "target": "com.amazonaws.apptest#DeleteTestCaseResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#ConflictException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "DeleteTestCase", + "documentation": "Grants permission to delete a test case" + }, + "smithy.api#documentation": "

Deletes a test case.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/testcases/{testCaseId}", + "code": 204 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.apptest#DeleteTestCaseRequest": { + "type": "structure", + "members": { + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test case ID of the test case.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#DeleteTestCaseResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#DeleteTestConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#DeleteTestConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.apptest#DeleteTestConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#ConflictException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "DeleteTestConfiguration", + "documentation": "Grants permission to delete a test configuration" + }, + "smithy.api#documentation": "

Deletes a test configuration.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/testconfigurations/{testConfigurationId}", + "code": 204 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.apptest#DeleteTestConfigurationRequest": { + "type": "structure", + "members": { + "testConfigurationId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test ID of the test configuration.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#DeleteTestConfigurationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#DeleteTestRun": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#DeleteTestRunRequest" + }, + "output": { + "target": "com.amazonaws.apptest#DeleteTestRunResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#ConflictException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "DeleteTestRun", + "documentation": "Grants permission to delete a test run", + "requiredActions": [ + "s3:DeleteObjects", + "s3:ListObjectsV2" + ] + }, + "smithy.api#documentation": "

Deletes a test run.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/testruns/{testRunId}", + "code": 204 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.apptest#DeleteTestRunRequest": { + "type": "structure", + "members": { + "testRunId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The run ID of the test run.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#DeleteTestRunResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#DeleteTestSuite": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#DeleteTestSuiteRequest" + }, + "output": { + "target": "com.amazonaws.apptest#DeleteTestSuiteResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#ConflictException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "DeleteTestSuite", + "documentation": "Grants permission to delete a test suite" + }, + "smithy.api#documentation": "

Deletes a test suite.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/testsuites/{testSuiteId}", + "code": 204 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.apptest#DeleteTestSuiteRequest": { + "type": "structure", + "members": { + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test ID of the test suite.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#DeleteTestSuiteResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#ExportDataSetNames": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#String100" + } + }, + "com.amazonaws.apptest#File": { + "type": "union", + "members": { + "fileType": { + "target": "com.amazonaws.apptest#CompareFileType", + "traits": { + "smithy.api#documentation": "

The file type of the file.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines a file.

" + } + }, + "com.amazonaws.apptest#FileMetadata": { + "type": "union", + "members": { + "dataSets": { + "target": "com.amazonaws.apptest#DataSetList", + "traits": { + "smithy.api#documentation": "

The data sets of the file metadata.

" + } + }, + "databaseCDC": { + "target": "com.amazonaws.apptest#DatabaseCDC", + "traits": { + "smithy.api#documentation": "

The database CDC of the file metadata.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a file metadata.

" + } + }, + "com.amazonaws.apptest#Format": { + "type": "enum", + "members": { + "FIXED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FIXED" + } + }, + "VARIABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VARIABLE" + } + }, + "LINE_SEQUENTIAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LINE_SEQUENTIAL" + } + } + } + }, + "com.amazonaws.apptest#GetTestCase": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#GetTestCaseRequest" + }, + "output": { + "target": "com.amazonaws.apptest#GetTestCaseResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "GetTestCase", + "documentation": "Grants permission to get a test case" + }, + "smithy.api#documentation": "

Gets a test case.

", + "smithy.api#http": { + "method": "GET", + "uri": "/testcases/{testCaseId}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.apptest#GetTestCaseRequest": { + "type": "structure", + "members": { + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The request test ID of the test case.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "testCaseVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test case version of the test case.

", + "smithy.api#httpQuery": "testCaseVersion" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#GetTestCaseResponse": { + "type": "structure", + "members": { + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The response test ID of the test case.

", + "smithy.api#required": {} + } + }, + "testCaseArn": { + "target": "com.amazonaws.apptest#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the test case.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the test case.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.apptest#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the test case.

" + } + }, + "latestVersion": { + "target": "com.amazonaws.apptest#TestCaseLatestVersion", + "traits": { + "smithy.api#documentation": "

The latest version of the test case.

", + "smithy.api#required": {} + } + }, + "testCaseVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The case version of the test case.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.apptest#TestCaseLifecycle", + "traits": { + "smithy.api#documentation": "

The status of the test case.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test case.

" + } + }, + "creationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The creation time of the test case.

", + "smithy.api#required": {} + } + }, + "lastUpdateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The last update time of the test case.

", + "smithy.api#required": {} + } + }, + "steps": { + "target": "com.amazonaws.apptest#StepList", + "traits": { + "smithy.api#documentation": "

The steps of the test case.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.apptest#TagMap", + "traits": { + "smithy.api#documentation": "

The tags of the test case.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#GetTestConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#GetTestConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.apptest#GetTestConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "GetTestConfiguration", + "documentation": "Grants permission to get a test configuration" + }, + "smithy.api#documentation": "

Gets a test configuration.

", + "smithy.api#http": { + "method": "GET", + "uri": "/testconfigurations/{testConfigurationId}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.apptest#GetTestConfigurationRequest": { + "type": "structure", + "members": { + "testConfigurationId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The request test configuration ID.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "testConfigurationVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test configuration version.

", + "smithy.api#httpQuery": "testConfigurationVersion" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#GetTestConfigurationResponse": { + "type": "structure", + "members": { + "testConfigurationId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The response test configuration ID.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The test configuration name

", + "smithy.api#required": {} + } + }, + "testConfigurationArn": { + "target": "com.amazonaws.apptest#Arn", + "traits": { + "smithy.api#documentation": "

The test configuration Amazon Resource Name (ARN).

", + "smithy.api#required": {} + } + }, + "latestVersion": { + "target": "com.amazonaws.apptest#TestConfigurationLatestVersion", + "traits": { + "smithy.api#documentation": "

The latest version of the test configuration.

", + "smithy.api#required": {} + } + }, + "testConfigurationVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test configuration version.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.apptest#TestConfigurationLifecycle", + "traits": { + "smithy.api#documentation": "

The status of the test configuration.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test configuration.

" + } + }, + "creationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The creation time of the test configuration.

", + "smithy.api#required": {} + } + }, + "lastUpdateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The last update time of the test configuration.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.apptest#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the test configuration.

" + } + }, + "resources": { + "target": "com.amazonaws.apptest#ResourceList", + "traits": { + "smithy.api#documentation": "

The resources of the test configuration.

", + "smithy.api#required": {} + } + }, + "properties": { + "target": "com.amazonaws.apptest#Properties", + "traits": { + "smithy.api#documentation": "

The properties of the test configuration.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.apptest#TagMap", + "traits": { + "smithy.api#documentation": "

The tags of the test configuration.

" + } + }, + "serviceSettings": { + "target": "com.amazonaws.apptest#ServiceSettings", + "traits": { + "smithy.api#documentation": "

The service settings of the test configuration.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#GetTestRunStep": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#GetTestRunStepRequest" + }, + "output": { + "target": "com.amazonaws.apptest#GetTestRunStepResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "GetTestRunStep", + "documentation": "Grants permission to get test run step" + }, + "smithy.api#documentation": "

Gets a test run step.

", + "smithy.api#http": { + "method": "GET", + "uri": "/testruns/{testRunId}/steps/{stepName}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.apptest#GetTestRunStepRequest": { + "type": "structure", + "members": { + "testRunId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test run ID of the test run step.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "stepName": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The step name of the test run step.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test case ID of a test run step.

", + "smithy.api#httpQuery": "testCaseId" + } + }, + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test suite ID of a test run step.

", + "smithy.api#httpQuery": "testSuiteId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#GetTestRunStepResponse": { + "type": "structure", + "members": { + "stepName": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The step name of the test run step.

", + "smithy.api#required": {} + } + }, + "testRunId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test run ID of the test run step.

", + "smithy.api#required": {} + } + }, + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test case ID of the test run step.

" + } + }, + "testCaseVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test case version of the test run step.

" + } + }, + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test suite ID of the test run step.

" + } + }, + "testSuiteVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test suite version of the test run step.

" + } + }, + "beforeStep": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

The before steps of the test run step.

" + } + }, + "afterStep": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

The after steps of the test run step.

" + } + }, + "status": { + "target": "com.amazonaws.apptest#StepRunStatus", + "traits": { + "smithy.api#documentation": "

The status of the test run step.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test run step.

" + } + }, + "runStartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The run start time of the test run step.

", + "smithy.api#required": {} + } + }, + "runEndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The run end time of the test run step.

" + } + }, + "stepRunSummary": { + "target": "com.amazonaws.apptest#StepRunSummary", + "traits": { + "smithy.api#documentation": "

The step run summary of the test run step.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#GetTestSuite": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#GetTestSuiteRequest" + }, + "output": { + "target": "com.amazonaws.apptest#GetTestSuiteResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "GetTestSuite", + "documentation": "Grants permission to get a test suite" + }, + "smithy.api#documentation": "

Gets a test suite.

", + "smithy.api#http": { + "method": "GET", + "uri": "/testsuites/{testSuiteId}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.apptest#GetTestSuiteRequest": { + "type": "structure", + "members": { + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The ID of the test suite.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "testSuiteVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The version of the test suite.

", + "smithy.api#httpQuery": "testSuiteVersion" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#GetTestSuiteResponse": { + "type": "structure", + "members": { + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The response ID of the test suite.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the test suite.

", + "smithy.api#required": {} + } + }, + "latestVersion": { + "target": "com.amazonaws.apptest#TestSuiteLatestVersion", + "traits": { + "smithy.api#documentation": "

The latest version of the test suite.

", + "smithy.api#required": {} + } + }, + "testSuiteVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The version of the test suite.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.apptest#TestSuiteLifecycle", + "traits": { + "smithy.api#documentation": "

The status of the test suite.

" + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test suite.

" + } + }, + "testSuiteArn": { + "target": "com.amazonaws.apptest#Arn", + "traits": { + "smithy.api#documentation": "

The test suite Amazon Resource Name (ARN).

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The creation time of the test suite.

", + "smithy.api#required": {} + } + }, + "lastUpdateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The last update time of the test suite.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.apptest#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the test suite.

" + } + }, + "beforeSteps": { + "target": "com.amazonaws.apptest#StepList", + "traits": { + "smithy.api#documentation": "

The before steps of the test suite.

", + "smithy.api#required": {} + } + }, + "afterSteps": { + "target": "com.amazonaws.apptest#StepList", + "traits": { + "smithy.api#documentation": "

The after steps of the test suite.

", + "smithy.api#required": {} + } + }, + "testCases": { + "target": "com.amazonaws.apptest#TestCases", + "traits": { + "smithy.api#documentation": "

The test cases of the test suite.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.apptest#TagMap", + "traits": { + "smithy.api#documentation": "

The tags of the test suite.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#IdempotencyTokenString": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[A-Za-z0-9\\-]{1,64}$" + } + }, + "com.amazonaws.apptest#Identifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[A-Za-z0-9:/\\-]{1,100}$" + } + }, + "com.amazonaws.apptest#Input": { + "type": "union", + "members": { + "file": { + "target": "com.amazonaws.apptest#InputFile", + "traits": { + "smithy.api#documentation": "

The file in the input.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the input.

" + } + }, + "com.amazonaws.apptest#InputFile": { + "type": "structure", + "members": { + "sourceLocation": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The source location of the input file.

", + "smithy.api#required": {} + } + }, + "targetLocation": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The target location of the input file.

", + "smithy.api#required": {} + } + }, + "fileMetadata": { + "target": "com.amazonaws.apptest#FileMetadata", + "traits": { + "smithy.api#documentation": "

The file metadata of the input file.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the input file.

" + } + }, + "com.amazonaws.apptest#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "retryAfterSeconds": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of seconds to retry the query.

", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

An unexpected error occurred during the processing of the request.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.apptest#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.apptest#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants permission to list tags for a resource" + }, + "smithy.api#documentation": "

Lists tags for a resource.

", + "smithy.api#http": { + "method": "GET", + "uri": "/tags/{resourceArn}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.apptest#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.apptest#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.apptest#TagMap", + "traits": { + "smithy.api#documentation": "

The tags of the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#ListTestCases": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#ListTestCasesRequest" + }, + "output": { + "target": "com.amazonaws.apptest#ListTestCasesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "ListTestCases", + "documentation": "Grants permission to list test cases" + }, + "smithy.api#documentation": "

Lists test cases.

", + "smithy.api#http": { + "method": "GET", + "uri": "/testcases", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "testCases" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.apptest#ListTestCasesRequest": { + "type": "structure", + "members": { + "testCaseIds": { + "target": "com.amazonaws.apptest#TestCaseIdList", + "traits": { + "smithy.api#documentation": "

The IDs of the test cases.

", + "smithy.api#httpQuery": "testCaseIds" + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The next token of the test cases.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.apptest#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum results of the test case.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#ListTestCasesResponse": { + "type": "structure", + "members": { + "testCases": { + "target": "com.amazonaws.apptest#TestCaseSummaryList", + "traits": { + "smithy.api#documentation": "

The test cases in an application.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The next token in test cases.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#ListTestConfigurations": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#ListTestConfigurationsRequest" + }, + "output": { + "target": "com.amazonaws.apptest#ListTestConfigurationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "ListTestConfigurations", + "documentation": "Grants permission to list test configurations" + }, + "smithy.api#documentation": "

Lists test configurations.

", + "smithy.api#http": { + "method": "GET", + "uri": "/testconfigurations", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "testConfigurations" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.apptest#ListTestConfigurationsRequest": { + "type": "structure", + "members": { + "testConfigurationIds": { + "target": "com.amazonaws.apptest#TestConfigurationIdList", + "traits": { + "smithy.api#documentation": "

The configuration IDs of the test configurations.

", + "smithy.api#httpQuery": "testConfigurationIds" + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The next token for the test configurations.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.apptest#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum results of the test configuration.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#ListTestConfigurationsResponse": { + "type": "structure", + "members": { + "testConfigurations": { + "target": "com.amazonaws.apptest#TestConfigurationList", + "traits": { + "smithy.api#documentation": "

The test configurations.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The next token in the test configurations.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#ListTestRunSteps": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#ListTestRunStepsRequest" + }, + "output": { + "target": "com.amazonaws.apptest#ListTestRunStepsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "ListTestRunSteps", + "documentation": "Grants permission to list steps for a test run" + }, + "smithy.api#documentation": "

Lists test run steps.

", + "smithy.api#http": { + "method": "GET", + "uri": "/testruns/{testRunId}/steps", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "testRunSteps" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.apptest#ListTestRunStepsRequest": { + "type": "structure", + "members": { + "testRunId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test run ID of the test run steps.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test case ID of the test run steps.

", + "smithy.api#httpQuery": "testCaseId" + } + }, + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test suite ID of the test run steps.

", + "smithy.api#httpQuery": "testSuiteId" + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The token from a previous step to retrieve the next page of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.apptest#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of test run steps to return in one page of results.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#ListTestRunStepsResponse": { + "type": "structure", + "members": { + "testRunSteps": { + "target": "com.amazonaws.apptest#TestRunStepSummaryList", + "traits": { + "smithy.api#documentation": "

The test run steps of the response query.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The token from a previous request to retrieve the next page of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#ListTestRunTestCases": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#ListTestRunTestCasesRequest" + }, + "output": { + "target": "com.amazonaws.apptest#ListTestRunTestCasesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "ListTestRunTestCases", + "documentation": "Grants permission to list test cases for a test run" + }, + "smithy.api#documentation": "

Lists test run test cases.

", + "smithy.api#http": { + "method": "GET", + "uri": "/testruns/{testRunId}/testcases", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "testRunTestCases" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.apptest#ListTestRunTestCasesRequest": { + "type": "structure", + "members": { + "testRunId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test run ID of the test cases.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The token from a previous request to retrieve the next page of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.apptest#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of test run test cases to return in one page of results.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#ListTestRunTestCasesResponse": { + "type": "structure", + "members": { + "testRunTestCases": { + "target": "com.amazonaws.apptest#TestCaseRunSummaryList", + "traits": { + "smithy.api#documentation": "

The test run of the test cases.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The token from a previous request to retrieve the next page of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#ListTestRuns": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#ListTestRunsRequest" + }, + "output": { + "target": "com.amazonaws.apptest#ListTestRunsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "ListTestRuns", + "documentation": "Grants permission to list test runs" + }, + "smithy.api#documentation": "

Lists test runs.

", + "smithy.api#http": { + "method": "GET", + "uri": "/testruns", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "testRuns" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.apptest#ListTestRunsRequest": { + "type": "structure", + "members": { + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test suite ID of the test runs.

", + "smithy.api#httpQuery": "testSuiteId" + } + }, + "testRunIds": { + "target": "com.amazonaws.apptest#TestRunIdList", + "traits": { + "smithy.api#documentation": "

The test run IDs of the test runs.

", + "smithy.api#httpQuery": "testrunIds" + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The token from the previous request to retrieve the next page of test run results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.apptest#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of test runs to return in one page of results.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#ListTestRunsResponse": { + "type": "structure", + "members": { + "testRuns": { + "target": "com.amazonaws.apptest#TestRunSummaryList", + "traits": { + "smithy.api#documentation": "

The test runs of the response query.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The token from the previous request to retrieve the next page of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#ListTestSuites": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#ListTestSuitesRequest" + }, + "output": { + "target": "com.amazonaws.apptest#ListTestSuitesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "ListTestSuites", + "documentation": "Grants permission to list test suites" + }, + "smithy.api#documentation": "

Lists test suites.

", + "smithy.api#http": { + "method": "GET", + "uri": "/testsuites", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "testSuites" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.apptest#ListTestSuitesRequest": { + "type": "structure", + "members": { + "testSuiteIds": { + "target": "com.amazonaws.apptest#TestSuiteIdList", + "traits": { + "smithy.api#documentation": "

The suite ID of the test suites.

", + "smithy.api#httpQuery": "testSuiteIds" + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The token from a previous request to retrieve the next page of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.apptest#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of test suites to return in one page of results.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#ListTestSuitesResponse": { + "type": "structure", + "members": { + "testSuites": { + "target": "com.amazonaws.apptest#TestSuiteList", + "traits": { + "smithy.api#documentation": "

The test suites returned with the response query.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.apptest#NextToken", + "traits": { + "smithy.api#documentation": "

The token from a previous request to retrieve the next page of test suites results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#M2ManagedActionProperties": { + "type": "structure", + "members": { + "forceStop": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Force stops the AWS Mainframe Modernization managed action properties.

" + } + }, + "importDataSetLocation": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The import data set location of the AWS Mainframe Modernization managed action properties.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization managed action properties.

" + } + }, + "com.amazonaws.apptest#M2ManagedActionType": { + "type": "enum", + "members": { + "CONFIGURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Configure" + } + }, + "DECONFIGURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deconfigure" + } + } + } + }, + "com.amazonaws.apptest#M2ManagedApplication": { + "type": "structure", + "members": { + "applicationId": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The application ID of the AWS Mainframe Modernization managed application.

", + "smithy.api#required": {} + } + }, + "runtime": { + "target": "com.amazonaws.apptest#M2ManagedRuntime", + "traits": { + "smithy.api#documentation": "

The runtime of the AWS Mainframe Modernization managed application.

", + "smithy.api#required": {} + } + }, + "vpcEndpointServiceName": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The VPC endpoint service name of the AWS Mainframe Modernization managed application.

" + } + }, + "listenerPort": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The listener port of the AWS Mainframe Modernization managed application.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization managed application.

" + } + }, + "com.amazonaws.apptest#M2ManagedApplicationAction": { + "type": "structure", + "members": { + "resource": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The resource of the AWS Mainframe Modernization managed application action.

", + "smithy.api#required": {} + } + }, + "actionType": { + "target": "com.amazonaws.apptest#M2ManagedActionType", + "traits": { + "smithy.api#documentation": "

The action type of the AWS Mainframe Modernization managed application action.

", + "smithy.api#required": {} + } + }, + "properties": { + "target": "com.amazonaws.apptest#M2ManagedActionProperties", + "traits": { + "smithy.api#documentation": "

The properties of the AWS Mainframe Modernization managed application action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization managed application action.

" + } + }, + "com.amazonaws.apptest#M2ManagedApplicationStepInput": { + "type": "structure", + "members": { + "applicationId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The application ID of the AWS Mainframe Modernization managed application step input.

", + "smithy.api#required": {} + } + }, + "runtime": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The runtime of the AWS Mainframe Modernization managed application step input.

", + "smithy.api#required": {} + } + }, + "vpcEndpointServiceName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The VPC endpoint service name of the AWS Mainframe Modernization managed application step input.

" + } + }, + "listenerPort": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The listener port of the AWS Mainframe Modernization managed application step input.

" + } + }, + "actionType": { + "target": "com.amazonaws.apptest#M2ManagedActionType", + "traits": { + "smithy.api#documentation": "

The action type of the AWS Mainframe Modernization managed application step input.

", + "smithy.api#required": {} + } + }, + "properties": { + "target": "com.amazonaws.apptest#M2ManagedActionProperties", + "traits": { + "smithy.api#documentation": "

The properties of the AWS Mainframe Modernization managed application step input.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization managed application step input.

" + } + }, + "com.amazonaws.apptest#M2ManagedApplicationStepOutput": { + "type": "structure", + "members": { + "importDataSetSummary": { + "target": "com.amazonaws.apptest#Properties", + "traits": { + "smithy.api#documentation": "

The import data set summary of the AWS Mainframe Modernization managed application step output.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization managed application step output.

" + } + }, + "com.amazonaws.apptest#M2ManagedApplicationStepSummary": { + "type": "structure", + "members": { + "stepInput": { + "target": "com.amazonaws.apptest#M2ManagedApplicationStepInput", + "traits": { + "smithy.api#documentation": "

The step input of the AWS Mainframe Modernization managed application step summary.

", + "smithy.api#required": {} + } + }, + "stepOutput": { + "target": "com.amazonaws.apptest#M2ManagedApplicationStepOutput", + "traits": { + "smithy.api#documentation": "

The step output of the AWS Mainframe Modernization managed application step summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization managed application step summary.

" + } + }, + "com.amazonaws.apptest#M2ManagedApplicationSummary": { + "type": "structure", + "members": { + "applicationId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The application ID of the AWS Mainframe Modernization managed application summary.

", + "smithy.api#required": {} + } + }, + "runtime": { + "target": "com.amazonaws.apptest#M2ManagedRuntime", + "traits": { + "smithy.api#documentation": "

The runtime of the AWS Mainframe Modernization managed application summary.

", + "smithy.api#required": {} + } + }, + "listenerPort": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The listener port of the AWS Mainframe Modernization managed application summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization managed application summary.

" + } + }, + "com.amazonaws.apptest#M2ManagedRuntime": { + "type": "enum", + "members": { + "MICROFOCUS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MicroFocus" + } + } + } + }, + "com.amazonaws.apptest#M2NonManagedActionType": { + "type": "enum", + "members": { + "CONFIGURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Configure" + } + }, + "DECONFIGURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deconfigure" + } + } + } + }, + "com.amazonaws.apptest#M2NonManagedApplication": { + "type": "structure", + "members": { + "vpcEndpointServiceName": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The VPC endpoint service name of the AWS Mainframe Modernization non-managed application.

", + "smithy.api#required": {} + } + }, + "listenerPort": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The listener port of the AWS Mainframe Modernization non-managed application.

", + "smithy.api#required": {} + } + }, + "runtime": { + "target": "com.amazonaws.apptest#M2NonManagedRuntime", + "traits": { + "smithy.api#documentation": "

The runtime of the AWS Mainframe Modernization non-managed application.

", + "smithy.api#required": {} + } + }, + "webAppName": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The web application name of the AWS Mainframe Modernization non-managed application.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization non-managed application.

" + } + }, + "com.amazonaws.apptest#M2NonManagedApplicationAction": { + "type": "structure", + "members": { + "resource": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The resource of the AWS Mainframe Modernization non-managed application action.

", + "smithy.api#required": {} + } + }, + "actionType": { + "target": "com.amazonaws.apptest#M2NonManagedActionType", + "traits": { + "smithy.api#documentation": "

The action type of the AWS Mainframe Modernization non-managed application action.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization non-managed application action.

" + } + }, + "com.amazonaws.apptest#M2NonManagedApplicationStepInput": { + "type": "structure", + "members": { + "vpcEndpointServiceName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The VPC endpoint service name of the AWS Mainframe Modernization non-managed application step input.

", + "smithy.api#required": {} + } + }, + "listenerPort": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The listener port of the AWS Mainframe Modernization non-managed application step input.

", + "smithy.api#required": {} + } + }, + "runtime": { + "target": "com.amazonaws.apptest#M2NonManagedRuntime", + "traits": { + "smithy.api#documentation": "

The runtime of the AWS Mainframe Modernization non-managed application step input.

", + "smithy.api#required": {} + } + }, + "webAppName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The web app name of the AWS Mainframe Modernization non-managed application step input.

" + } + }, + "actionType": { + "target": "com.amazonaws.apptest#M2NonManagedActionType", + "traits": { + "smithy.api#documentation": "

The action type of the AWS Mainframe Modernization non-managed application step input.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization non-managed application step input.

" + } + }, + "com.amazonaws.apptest#M2NonManagedApplicationStepOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization non-managed application step output.

" + } + }, + "com.amazonaws.apptest#M2NonManagedApplicationStepSummary": { + "type": "structure", + "members": { + "stepInput": { + "target": "com.amazonaws.apptest#M2NonManagedApplicationStepInput", + "traits": { + "smithy.api#documentation": "

The step input of the AWS Mainframe Modernization non-managed application step summary.

", + "smithy.api#required": {} + } + }, + "stepOutput": { + "target": "com.amazonaws.apptest#M2NonManagedApplicationStepOutput", + "traits": { + "smithy.api#documentation": "

The step output of the AWS Mainframe Modernization non-managed application step summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization non-managed application step summary.

" + } + }, + "com.amazonaws.apptest#M2NonManagedApplicationSummary": { + "type": "structure", + "members": { + "vpcEndpointServiceName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The VPC endpoint service name of the AWS Mainframe Modernization non-managed application summary.

", + "smithy.api#required": {} + } + }, + "listenerPort": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The listener port of the AWS Mainframe Modernization non-managed application summary.

", + "smithy.api#required": {} + } + }, + "runtime": { + "target": "com.amazonaws.apptest#M2NonManagedRuntime", + "traits": { + "smithy.api#documentation": "

The runtime of the AWS Mainframe Modernization non-managed application summary.

", + "smithy.api#required": {} + } + }, + "webAppName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The web application name of the AWS Mainframe Modernization non-managed application summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the AWS Mainframe Modernization non-managed application summary.

" + } + }, + "com.amazonaws.apptest#M2NonManagedRuntime": { + "type": "enum", + "members": { + "BLUAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BluAge" + } + } + } + }, + "com.amazonaws.apptest#MainframeAction": { + "type": "structure", + "members": { + "resource": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The resource of the mainframe action.

", + "smithy.api#required": {} + } + }, + "actionType": { + "target": "com.amazonaws.apptest#MainframeActionType", + "traits": { + "smithy.api#documentation": "

The action type of the mainframe action.

", + "smithy.api#required": {} + } + }, + "properties": { + "target": "com.amazonaws.apptest#MainframeActionProperties", + "traits": { + "smithy.api#documentation": "

The properties of the mainframe action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the mainframe action.

" + } + }, + "com.amazonaws.apptest#MainframeActionProperties": { + "type": "structure", + "members": { + "dmsTaskArn": { + "target": "com.amazonaws.apptest#Variable", + "traits": { + "smithy.api#documentation": "

The DMS task ARN of the mainframe action properties.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the mainframe action properties.

" + } + }, + "com.amazonaws.apptest#MainframeActionSummary": { + "type": "union", + "members": { + "batch": { + "target": "com.amazonaws.apptest#BatchSummary", + "traits": { + "smithy.api#documentation": "

The batch of the mainframe action summary.

" + } + }, + "tn3270": { + "target": "com.amazonaws.apptest#TN3270Summary", + "traits": { + "smithy.api#documentation": "

The tn3270 port of the mainframe action summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the mainframe action summary.

" + } + }, + "com.amazonaws.apptest#MainframeActionType": { + "type": "union", + "members": { + "batch": { + "target": "com.amazonaws.apptest#Batch", + "traits": { + "smithy.api#documentation": "

The batch of the mainframe action type.

" + } + }, + "tn3270": { + "target": "com.amazonaws.apptest#TN3270", + "traits": { + "smithy.api#documentation": "

The tn3270 port of the mainframe action type.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the mainframe action type.

" + } + }, + "com.amazonaws.apptest#MainframeResourceSummary": { + "type": "union", + "members": { + "m2ManagedApplication": { + "target": "com.amazonaws.apptest#M2ManagedApplicationSummary", + "traits": { + "smithy.api#documentation": "

The AWS Mainframe Modernization managed application in the mainframe resource summary.

" + } + }, + "m2NonManagedApplication": { + "target": "com.amazonaws.apptest#M2NonManagedApplicationSummary", + "traits": { + "smithy.api#documentation": "

The AWS Mainframe Modernization non-managed application in the mainframe resource summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the mainframe resource summary.

" + } + }, + "com.amazonaws.apptest#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.apptest#NextToken": { + "type": "string", + "traits": { + "smithy.api#pattern": "^\\S{1,2000}$" + } + }, + "com.amazonaws.apptest#Output": { + "type": "union", + "members": { + "file": { + "target": "com.amazonaws.apptest#OutputFile", + "traits": { + "smithy.api#documentation": "

The file of the output.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies an output.

" + } + }, + "com.amazonaws.apptest#OutputFile": { + "type": "structure", + "members": { + "fileLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The file location of the output file.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies an output file.

" + } + }, + "com.amazonaws.apptest#Properties": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.apptest#Resource": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the resource.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.apptest#ResourceType", + "traits": { + "smithy.api#documentation": "

The type of the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a resource.

" + } + }, + "com.amazonaws.apptest#ResourceAction": { + "type": "union", + "members": { + "m2ManagedApplicationAction": { + "target": "com.amazonaws.apptest#M2ManagedApplicationAction", + "traits": { + "smithy.api#documentation": "

The AWS Mainframe Modernization managed application action of the resource action.

" + } + }, + "m2NonManagedApplicationAction": { + "target": "com.amazonaws.apptest#M2NonManagedApplicationAction", + "traits": { + "smithy.api#documentation": "

The AWS Mainframe Modernization non-managed application action of the resource action.

" + } + }, + "cloudFormationAction": { + "target": "com.amazonaws.apptest#CloudFormationAction", + "traits": { + "smithy.api#documentation": "

The CloudFormation action of the resource action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a resource action.

" + } + }, + "com.amazonaws.apptest#ResourceActionSummary": { + "type": "union", + "members": { + "cloudFormation": { + "target": "com.amazonaws.apptest#CloudFormationStepSummary", + "traits": { + "smithy.api#documentation": "

The CloudFormation template of the resource action summary.

" + } + }, + "m2ManagedApplication": { + "target": "com.amazonaws.apptest#M2ManagedApplicationStepSummary", + "traits": { + "smithy.api#documentation": "

The AWS Mainframe Modernization managed application of the resource action summary.

" + } + }, + "m2NonManagedApplication": { + "target": "com.amazonaws.apptest#M2NonManagedApplicationStepSummary", + "traits": { + "smithy.api#documentation": "

The AWS Mainframe Modernization non-managed application of the resource action summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the resource action summary.

" + } + }, + "com.amazonaws.apptest#ResourceDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1000 + } + } + }, + "com.amazonaws.apptest#ResourceList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#Resource" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + } + } + }, + "com.amazonaws.apptest#ResourceName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[A-Za-z][A-Za-z0-9_\\-]{1,59}$" + } + }, + "com.amazonaws.apptest#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The resource ID of the resource not found.

" + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The resource type of the resource not found.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The specified resource was not found.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.apptest#ResourceType": { + "type": "union", + "members": { + "cloudFormation": { + "target": "com.amazonaws.apptest#CloudFormation", + "traits": { + "smithy.api#documentation": "

The CloudFormation template of the resource type.

" + } + }, + "m2ManagedApplication": { + "target": "com.amazonaws.apptest#M2ManagedApplication", + "traits": { + "smithy.api#documentation": "

The AWS Mainframe Modernization managed application of the resource type.

" + } + }, + "m2NonManagedApplication": { + "target": "com.amazonaws.apptest#M2NonManagedApplication", + "traits": { + "smithy.api#documentation": "

The AWS Mainframe Modernization non-managed application of the resource type.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the resource type.

" + } + }, + "com.amazonaws.apptest#S3Uri": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, + "com.amazonaws.apptest#Script": { + "type": "structure", + "members": { + "scriptLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The script location of the scripts.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.apptest#ScriptType", + "traits": { + "smithy.api#documentation": "

The type of the scripts.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the script.

" + } + }, + "com.amazonaws.apptest#ScriptSummary": { + "type": "structure", + "members": { + "scriptLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The script location of the script summary.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.apptest#ScriptType", + "traits": { + "smithy.api#documentation": "

The type of the script summary.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the scripts summary.

" + } + }, + "com.amazonaws.apptest#ScriptType": { + "type": "enum", + "members": { + "SELENIUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Selenium" + } + } + } + }, + "com.amazonaws.apptest#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The resource ID of AWS Application Testing that exceeded the limit.

" + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The resource type of AWS Application Testing that exceeded the limit.

" + } + }, + "serviceCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The service code of AWS Application Testing that exceeded the limit.

" + } + }, + "quotaCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The quote codes of AWS Application Testing that exceeded the limit.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

One or more quotas for AWS Application Testing exceeds the limit.

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.apptest#ServiceSettings": { + "type": "structure", + "members": { + "kmsKeyId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The KMS key ID of the service settings.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the service settings.

" + } + }, + "com.amazonaws.apptest#SourceDatabase": { + "type": "enum", + "members": { + "Z_OS_DB2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "z/OS-DB2" + } + } + } + }, + "com.amazonaws.apptest#SourceDatabaseMetadata": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.apptest#SourceDatabase", + "traits": { + "smithy.api#documentation": "

The type of the source database metadata.

", + "smithy.api#required": {} + } + }, + "captureTool": { + "target": "com.amazonaws.apptest#CaptureTool", + "traits": { + "smithy.api#documentation": "

The capture tool of the source database metadata.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the source database metadata.

" + } + }, + "com.amazonaws.apptest#StartTestRun": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#StartTestRunRequest" + }, + "output": { + "target": "com.amazonaws.apptest#StartTestRunResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#ConflictException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#conditionKeys": [ + "aws:RequestTag/${TagKey}", + "aws:TagKeys" + ], + "aws.iam#iamAction": { + "name": "StartTestRun", + "documentation": "Grants permission to start a test run", + "createsResources": [ + "TestRun" + ], + "requiredActions": [ + "cloudformation:CreateStack", + "cloudformation:DeleteStack", + "cloudformation:DescribeStacks", + "dms:DescribeReplicationTasks", + "dms:StartReplicationTask", + "dms:StopReplicationTask", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeVpcEndpointServiceConfigurations", + "ec2:DescribeVpcEndpointServices", + "m2:CreateDataSetImportTask", + "m2:GetApplication", + "m2:GetBatchJobExecution", + "m2:GetDataSetDetails", + "m2:GetDataSetImportTask", + "m2:StartApplication", + "m2:StartBatchJob", + "m2:StopApplication", + "s3:DeleteObject", + "s3:DeleteObjects", + "s3:CopyObject", + "s3:HeadBucket", + "s3:CreateBucket", + "s3:UploadPart", + "s3:HeadObject", + "s3:CreateMultipartUpload", + "s3:CompleteMultipartUpload", + "s3:GetObject", + "s3:ListBucket", + "s3:PutObject", + "s3:ListObjectsV2" + ] + }, + "smithy.api#documentation": "

Starts a test run.

", + "smithy.api#http": { + "method": "POST", + "uri": "/testrun", + "code": 200 + } + } + }, + "com.amazonaws.apptest#StartTestRunRequest": { + "type": "structure", + "members": { + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test suite ID of the test run.

", + "smithy.api#required": {} + } + }, + "testConfigurationId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The configuration ID of the test run.

" + } + }, + "clientToken": { + "target": "com.amazonaws.apptest#IdempotencyTokenString", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The client token of the test run.

", + "smithy.api#idempotencyToken": {} + } + }, + "tags": { + "target": "com.amazonaws.apptest#TagMap", + "traits": { + "smithy.api#documentation": "

The tags of the test run.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#StartTestRunResponse": { + "type": "structure", + "members": { + "testRunId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test run ID of the test run.

", + "smithy.api#required": {} + } + }, + "testRunStatus": { + "target": "com.amazonaws.apptest#TestRunStatus", + "traits": { + "smithy.api#documentation": "

The test run status of the test run.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#Step": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the step.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.apptest#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the step.

" + } + }, + "action": { + "target": "com.amazonaws.apptest#StepAction", + "traits": { + "smithy.api#documentation": "

The action of the step.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines a step.

" + } + }, + "com.amazonaws.apptest#StepAction": { + "type": "union", + "members": { + "resourceAction": { + "target": "com.amazonaws.apptest#ResourceAction", + "traits": { + "smithy.api#documentation": "

The resource action of the step action.

" + } + }, + "mainframeAction": { + "target": "com.amazonaws.apptest#MainframeAction", + "traits": { + "smithy.api#documentation": "

The mainframe action of the step action.

" + } + }, + "compareAction": { + "target": "com.amazonaws.apptest#CompareAction", + "traits": { + "smithy.api#documentation": "

The compare action of the step action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a step action.

" + } + }, + "com.amazonaws.apptest#StepList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#Step" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + } + } + }, + "com.amazonaws.apptest#StepRunStatus": { + "type": "enum", + "members": { + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Success" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + }, + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Running" + } + } + } + }, + "com.amazonaws.apptest#StepRunSummary": { + "type": "union", + "members": { + "mainframeAction": { + "target": "com.amazonaws.apptest#MainframeActionSummary", + "traits": { + "smithy.api#documentation": "

The mainframe action of the step run summary.

" + } + }, + "compareAction": { + "target": "com.amazonaws.apptest#CompareActionSummary", + "traits": { + "smithy.api#documentation": "

The compare action of the step run summary.

" + } + }, + "resourceAction": { + "target": "com.amazonaws.apptest#ResourceActionSummary", + "traits": { + "smithy.api#documentation": "

The resource action of the step run summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines the step run summary.

" + } + }, + "com.amazonaws.apptest#String100": { + "type": "string", + "traits": { + "smithy.api#pattern": "^\\S{1,100}$" + } + }, + "com.amazonaws.apptest#String50": { + "type": "string", + "traits": { + "smithy.api#pattern": "^\\S{1,50}$" + } + }, + "com.amazonaws.apptest#TN3270": { + "type": "structure", + "members": { + "script": { + "target": "com.amazonaws.apptest#Script", + "traits": { + "smithy.api#documentation": "

The script of the TN3270 protocol.

", + "smithy.api#required": {} + } + }, + "exportDataSetNames": { + "target": "com.amazonaws.apptest#ExportDataSetNames", + "traits": { + "smithy.api#documentation": "

The data set names of the TN3270 protocol.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the TN3270 protocol.

" + } + }, + "com.amazonaws.apptest#TN3270StepInput": { + "type": "structure", + "members": { + "resource": { + "target": "com.amazonaws.apptest#MainframeResourceSummary", + "traits": { + "smithy.api#documentation": "

The resource of the TN3270 step input.

", + "smithy.api#required": {} + } + }, + "script": { + "target": "com.amazonaws.apptest#ScriptSummary", + "traits": { + "smithy.api#documentation": "

The script of the TN3270 step input.

", + "smithy.api#required": {} + } + }, + "exportDataSetNames": { + "target": "com.amazonaws.apptest#ExportDataSetNames", + "traits": { + "smithy.api#documentation": "

The export data set names of the TN3270 step input.

" + } + }, + "properties": { + "target": "com.amazonaws.apptest#MainframeActionProperties", + "traits": { + "smithy.api#documentation": "

The properties of the TN3270 step input.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a TN3270 step input.

" + } + }, + "com.amazonaws.apptest#TN3270StepOutput": { + "type": "structure", + "members": { + "dataSetExportLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The data set export location of the TN3270 step output.

" + } + }, + "dmsOutputLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The output location of the TN3270 step output.

" + } + }, + "dataSetDetails": { + "target": "com.amazonaws.apptest#DataSetList", + "traits": { + "smithy.api#documentation": "

The data set details of the TN3270 step output.

" + } + }, + "scriptOutputLocation": { + "target": "com.amazonaws.apptest#S3Uri", + "traits": { + "smithy.api#documentation": "

The script output location of the TN3270 step output.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a TN3270 step output.

" + } + }, + "com.amazonaws.apptest#TN3270Summary": { + "type": "structure", + "members": { + "stepInput": { + "target": "com.amazonaws.apptest#TN3270StepInput", + "traits": { + "smithy.api#documentation": "

The step input of the TN3270 summary.

", + "smithy.api#required": {} + } + }, + "stepOutput": { + "target": "com.amazonaws.apptest#TN3270StepOutput", + "traits": { + "smithy.api#documentation": "

The step output of the TN3270 summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a TN3270 summary.

" + } + }, + "com.amazonaws.apptest#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^(?!aws:).+$" + } + }, + "com.amazonaws.apptest#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#TagKey" + } + }, + "com.amazonaws.apptest#TagMap": { + "type": "map", + "key": { + "target": "com.amazonaws.apptest#TagKey" + }, + "value": { + "target": "com.amazonaws.apptest#TagValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.apptest#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.apptest#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#conditionKeys": [ + "aws:RequestTag/${TagKey}", + "aws:TagKeys" + ], + "aws.iam#iamAction": { + "name": "TagResource", + "documentation": "Grants permission to tag a resource", + "requiredActions": [], + "resources": { + "required": {}, + "optional": {} + } + }, + "smithy.api#documentation": "

Specifies tags of a resource.

", + "smithy.api#http": { + "method": "POST", + "uri": "/tags/{resourceArn}", + "code": 200 + } + } + }, + "com.amazonaws.apptest#TagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.apptest#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the tag resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.apptest#TagMap", + "traits": { + "smithy.api#documentation": "

The tags of the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "aws.api#data": "tagging", + "smithy.api#input": {}, + "smithy.api#references": [ + { + "resource": "com.amazonaws.apptest#TestCase", + "ids": { + "testCaseId": "resourceArn" + } + }, + { + "resource": "com.amazonaws.apptest#TestSuite", + "ids": { + "testSuiteId": "resourceArn" + } + }, + { + "resource": "com.amazonaws.apptest#TestConfiguration", + "ids": { + "testConfigurationId": "resourceArn" + } + }, + { + "resource": "com.amazonaws.apptest#TestRun", + "ids": { + "testRunId": "resourceArn" + } + } + ] + } + }, + "com.amazonaws.apptest#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, + "com.amazonaws.apptest#TargetDatabase": { + "type": "enum", + "members": { + "POSTGRESQL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PostgreSQL" + } + } + } + }, + "com.amazonaws.apptest#TargetDatabaseMetadata": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.apptest#TargetDatabase", + "traits": { + "smithy.api#documentation": "

The type of the target database metadata.

", + "smithy.api#required": {} + } + }, + "captureTool": { + "target": "com.amazonaws.apptest#CaptureTool", + "traits": { + "smithy.api#documentation": "

The capture tool of the target database metadata.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a target database metadata.

" + } + }, + "com.amazonaws.apptest#TestCase": { + "type": "resource", + "identifiers": { + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier" + } + }, + "create": { + "target": "com.amazonaws.apptest#CreateTestCase" + }, + "read": { + "target": "com.amazonaws.apptest#GetTestCase" + }, + "update": { + "target": "com.amazonaws.apptest#UpdateTestCase" + }, + "delete": { + "target": "com.amazonaws.apptest#DeleteTestCase" + }, + "list": { + "target": "com.amazonaws.apptest#ListTestCases" + }, + "traits": { + "aws.api#arn": { + "template": "testcase/{testCaseId}" + }, + "aws.iam#conditionKeys": [ + "aws:ResourceTag/${TagKey}" + ], + "aws.iam#disableConditionKeyInference": {} + } + }, + "com.amazonaws.apptest#TestCaseIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#Identifier" + } + }, + "com.amazonaws.apptest#TestCaseLatestVersion": { + "type": "structure", + "members": { + "version": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The version of the test case latest version.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.apptest#TestCaseLifecycle", + "traits": { + "smithy.api#documentation": "

The status of the test case latest version.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test case latest version.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the latest version of a test case.

" + } + }, + "com.amazonaws.apptest#TestCaseLifecycle": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Active" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deleting" + } + } + } + }, + "com.amazonaws.apptest#TestCaseList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#Identifier" + } + }, + "com.amazonaws.apptest#TestCaseRunStatus": { + "type": "enum", + "members": { + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Success" + } + }, + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Running" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + } + } + }, + "com.amazonaws.apptest#TestCaseRunSummary": { + "type": "structure", + "members": { + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test case id of the test case run summary.

", + "smithy.api#required": {} + } + }, + "testCaseVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test case version of the test case run summary.

", + "smithy.api#required": {} + } + }, + "testRunId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test run id of the test case run summary.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.apptest#TestCaseRunStatus", + "traits": { + "smithy.api#documentation": "

The status of the test case run summary.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test case run summary.

" + } + }, + "runStartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The run start time of the test case run summary.

", + "smithy.api#required": {} + } + }, + "runEndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The run end time of the test case run summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the test case run summary.

" + } + }, + "com.amazonaws.apptest#TestCaseRunSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#TestCaseRunSummary" + } + }, + "com.amazonaws.apptest#TestCaseSummary": { + "type": "structure", + "members": { + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test case ID of the test case summary.

", + "smithy.api#required": {} + } + }, + "testCaseArn": { + "target": "com.amazonaws.apptest#Arn", + "traits": { + "smithy.api#documentation": "

The test case Amazon Resource Name (ARN) of the test case summary.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the test case summary.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test case summary.

" + } + }, + "latestVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The latest version of the test case summary.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.apptest#TestCaseLifecycle", + "traits": { + "smithy.api#documentation": "

The status of the test case summary.

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The creation time of the test case summary.

", + "smithy.api#required": {} + } + }, + "lastUpdateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The last update time of the test case summary.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a test case summary.

" + } + }, + "com.amazonaws.apptest#TestCaseSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#TestCaseSummary" + } + }, + "com.amazonaws.apptest#TestCases": { + "type": "union", + "members": { + "sequential": { + "target": "com.amazonaws.apptest#TestCaseList", + "traits": { + "smithy.api#documentation": "

The sequential of the test case.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies test cases.

" + } + }, + "com.amazonaws.apptest#TestConfiguration": { + "type": "resource", + "identifiers": { + "testConfigurationId": { + "target": "com.amazonaws.apptest#Identifier" + } + }, + "create": { + "target": "com.amazonaws.apptest#CreateTestConfiguration" + }, + "read": { + "target": "com.amazonaws.apptest#GetTestConfiguration" + }, + "update": { + "target": "com.amazonaws.apptest#UpdateTestConfiguration" + }, + "delete": { + "target": "com.amazonaws.apptest#DeleteTestConfiguration" + }, + "list": { + "target": "com.amazonaws.apptest#ListTestConfigurations" + }, + "traits": { + "aws.api#arn": { + "template": "testconfiguration/{testConfigurationId}" + }, + "aws.iam#conditionKeys": [ + "aws:ResourceTag/${TagKey}" + ], + "aws.iam#disableConditionKeyInference": {} + } + }, + "com.amazonaws.apptest#TestConfigurationIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#Identifier" + } + }, + "com.amazonaws.apptest#TestConfigurationLatestVersion": { + "type": "structure", + "members": { + "version": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The version of the test configuration latest version.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.apptest#TestConfigurationLifecycle", + "traits": { + "smithy.api#documentation": "

The status of the test configuration latest version.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test configuration latest version.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the latest version of the test configuration.

" + } + }, + "com.amazonaws.apptest#TestConfigurationLifecycle": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Active" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deleting" + } + } + } + }, + "com.amazonaws.apptest#TestConfigurationList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#TestConfigurationSummary" + } + }, + "com.amazonaws.apptest#TestConfigurationSummary": { + "type": "structure", + "members": { + "testConfigurationId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test configuration ID of the test configuration summary.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the test configuration summary.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test configuration summary.

" + } + }, + "latestVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The latest version of the test configuration summary.

", + "smithy.api#required": {} + } + }, + "testConfigurationArn": { + "target": "com.amazonaws.apptest#Arn", + "traits": { + "smithy.api#documentation": "

The test configuration ARN of the test configuration summary.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.apptest#TestConfigurationLifecycle", + "traits": { + "smithy.api#documentation": "

The status of the test configuration summary.

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The creation time of the test configuration summary.

", + "smithy.api#required": {} + } + }, + "lastUpdateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The last update time of the test configuration summary.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a test configuration summary.

" + } + }, + "com.amazonaws.apptest#TestRun": { + "type": "resource", + "identifiers": { + "testRunId": { + "target": "com.amazonaws.apptest#Identifier" + } + }, + "create": { + "target": "com.amazonaws.apptest#StartTestRun" + }, + "delete": { + "target": "com.amazonaws.apptest#DeleteTestRun" + }, + "list": { + "target": "com.amazonaws.apptest#ListTestRuns" + }, + "operations": [ + { + "target": "com.amazonaws.apptest#GetTestRunStep" + }, + { + "target": "com.amazonaws.apptest#ListTestRunSteps" + }, + { + "target": "com.amazonaws.apptest#ListTestRunTestCases" + } + ], + "traits": { + "aws.api#arn": { + "template": "testrun/{testRunId}" + }, + "aws.iam#conditionKeys": [ + "aws:ResourceTag/${TagKey}" + ], + "aws.iam#disableConditionKeyInference": {} + } + }, + "com.amazonaws.apptest#TestRunIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#Identifier" + } + }, + "com.amazonaws.apptest#TestRunStatus": { + "type": "enum", + "members": { + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Success" + } + }, + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Running" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deleting" + } + } + } + }, + "com.amazonaws.apptest#TestRunStepSummary": { + "type": "structure", + "members": { + "stepName": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The step name of the test run step summary.

", + "smithy.api#required": {} + } + }, + "testRunId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test run ID of the test run step summary.

", + "smithy.api#required": {} + } + }, + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test case ID of the test run step summary.

" + } + }, + "testCaseVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test case version of the test run step summary.

" + } + }, + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test suite ID of the test run step summary.

" + } + }, + "testSuiteVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test suite version of the test run step summary.

" + } + }, + "beforeStep": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

The before step of the test run step summary.

" + } + }, + "afterStep": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

The after step of the test run step summary.

" + } + }, + "status": { + "target": "com.amazonaws.apptest#StepRunStatus", + "traits": { + "smithy.api#documentation": "

The status of the test run step summary.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test run step summary.

" + } + }, + "runStartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The run start time of the test run step summary.

", + "smithy.api#required": {} + } + }, + "runEndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The run end time of the test run step summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a test run step summary.

" + } + }, + "com.amazonaws.apptest#TestRunStepSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#TestRunStepSummary" + } + }, + "com.amazonaws.apptest#TestRunSummary": { + "type": "structure", + "members": { + "testRunId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test run ID of the test run summary.

", + "smithy.api#required": {} + } + }, + "testRunArn": { + "target": "com.amazonaws.apptest#Arn", + "traits": { + "smithy.api#documentation": "

The test run ARN of the test run summary.

", + "smithy.api#required": {} + } + }, + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test suite ID of the test run summary.

", + "smithy.api#required": {} + } + }, + "testSuiteVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test suite version of the test run summary.

", + "smithy.api#required": {} + } + }, + "testConfigurationId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test configuration ID of the test run summary.

" + } + }, + "testConfigurationVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test configuration version of the test run summary.

" + } + }, + "status": { + "target": "com.amazonaws.apptest#TestRunStatus", + "traits": { + "smithy.api#documentation": "

The status of the test run summary.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test run summary.

" + } + }, + "runStartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The run start time of the test run summary.

", + "smithy.api#required": {} + } + }, + "runEndTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The run end time of the test run summary.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a test run summary.

" + } + }, + "com.amazonaws.apptest#TestRunSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#TestRunSummary" + } + }, + "com.amazonaws.apptest#TestSuite": { + "type": "resource", + "identifiers": { + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier" + } + }, + "create": { + "target": "com.amazonaws.apptest#CreateTestSuite" + }, + "read": { + "target": "com.amazonaws.apptest#GetTestSuite" + }, + "update": { + "target": "com.amazonaws.apptest#UpdateTestSuite" + }, + "delete": { + "target": "com.amazonaws.apptest#DeleteTestSuite" + }, + "list": { + "target": "com.amazonaws.apptest#ListTestSuites" + }, + "traits": { + "aws.api#arn": { + "template": "testsuite/{testSuiteId}" + }, + "aws.iam#conditionKeys": [ + "aws:ResourceTag/${TagKey}" + ], + "aws.iam#disableConditionKeyInference": {} + } + }, + "com.amazonaws.apptest#TestSuiteIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#Identifier" + } + }, + "com.amazonaws.apptest#TestSuiteLatestVersion": { + "type": "structure", + "members": { + "version": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The version of the test suite latest version.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.apptest#TestSuiteLifecycle", + "traits": { + "smithy.api#documentation": "

The status of the test suite latest version.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test suite latest version.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the latest version of a test suite.

" + } + }, + "com.amazonaws.apptest#TestSuiteLifecycle": { + "type": "enum", + "members": { + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Creating" + } + }, + "UPDATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Updating" + } + }, + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Active" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deleting" + } + } + } + }, + "com.amazonaws.apptest#TestSuiteList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#TestSuiteSummary" + } + }, + "com.amazonaws.apptest#TestSuiteSummary": { + "type": "structure", + "members": { + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test suite ID of the test suite summary.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.apptest#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the test suite summary.

", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status reason of the test suite summary.

" + } + }, + "latestVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The latest version of the test suite summary.

", + "smithy.api#required": {} + } + }, + "testSuiteArn": { + "target": "com.amazonaws.apptest#Arn", + "traits": { + "smithy.api#documentation": "

The test suite Amazon Resource Name (ARN) of the test suite summary.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.apptest#TestSuiteLifecycle", + "traits": { + "smithy.api#documentation": "

The status of the test suite summary.

", + "smithy.api#required": {} + } + }, + "creationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The creation time of the test suite summary.

", + "smithy.api#required": {} + } + }, + "lastUpdateTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The last update time of the test suite summary.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the test suite summary.

" + } + }, + "com.amazonaws.apptest#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "serviceCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The service code of requests that exceed the limit.

" + } + }, + "quotaCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The quota code of requests that exceed the limit.

" + } + }, + "retryAfterSeconds": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of seconds to retry after for requests that exceed the limit.

", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

The number of requests made exceeds the limit.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": { + "throttling": true + } + } + }, + "com.amazonaws.apptest#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.apptest#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#conditionKeys": [ + "aws:TagKeys" + ], + "aws.iam#iamAction": { + "documentation": "Grants permission to untag a resource" + }, + "smithy.api#documentation": "

Untags a resource.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/tags/{resourceArn}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.apptest#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.apptest#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.apptest#TagKeyList", + "traits": { + "smithy.api#documentation": "

The tag keys of the resource.

", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + }, + "traits": { + "aws.api#data": "tagging", + "smithy.api#input": {}, + "smithy.api#references": [ + { + "resource": "com.amazonaws.apptest#TestCase", + "ids": { + "testCaseId": "resourceArn" + } + }, + { + "resource": "com.amazonaws.apptest#TestSuite", + "ids": { + "testSuiteId": "resourceArn" + } + }, + { + "resource": "com.amazonaws.apptest#TestConfiguration", + "ids": { + "testConfigurationId": "resourceArn" + } + }, + { + "resource": "com.amazonaws.apptest#TestRun", + "ids": { + "testRunId": "resourceArn" + } + } + ] + } + }, + "com.amazonaws.apptest#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#UpdateTestCase": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#UpdateTestCaseRequest" + }, + "output": { + "target": "com.amazonaws.apptest#UpdateTestCaseResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#ConflictException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "UpdateTestCase", + "documentation": "Grants permission to update a test case" + }, + "smithy.api#documentation": "

Updates a test case.

", + "smithy.api#http": { + "method": "PATCH", + "uri": "/testcases/{testCaseId}", + "code": 200 + } + } + }, + "com.amazonaws.apptest#UpdateTestCaseRequest": { + "type": "structure", + "members": { + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test case ID of the test case.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.apptest#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the test case.

" + } + }, + "steps": { + "target": "com.amazonaws.apptest#StepList", + "traits": { + "smithy.api#documentation": "

The steps of the test case.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#UpdateTestCaseResponse": { + "type": "structure", + "members": { + "testCaseId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test case ID of the test case.

", + "smithy.api#required": {} + } + }, + "testCaseVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test case version of the test case.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#UpdateTestConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#UpdateTestConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.apptest#UpdateTestConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#ConflictException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "UpdateTestConfiguration", + "documentation": "Grants permission to update a test configuration" + }, + "smithy.api#documentation": "

Updates a test configuration.

", + "smithy.api#http": { + "method": "PATCH", + "uri": "/testconfigurations/{testConfigurationId}", + "code": 200 + } + } + }, + "com.amazonaws.apptest#UpdateTestConfigurationRequest": { + "type": "structure", + "members": { + "testConfigurationId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test configuration ID of the test configuration.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.apptest#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the test configuration.

" + } + }, + "resources": { + "target": "com.amazonaws.apptest#ResourceList", + "traits": { + "smithy.api#documentation": "

The resources of the test configuration.

" + } + }, + "properties": { + "target": "com.amazonaws.apptest#Properties", + "traits": { + "smithy.api#documentation": "

The properties of the test configuration.

" + } + }, + "serviceSettings": { + "target": "com.amazonaws.apptest#ServiceSettings", + "traits": { + "smithy.api#documentation": "

The service settings of the test configuration.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#UpdateTestConfigurationResponse": { + "type": "structure", + "members": { + "testConfigurationId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The configuration ID of the test configuration.

", + "smithy.api#required": {} + } + }, + "testConfigurationVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The configuration version of the test configuration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#UpdateTestSuite": { + "type": "operation", + "input": { + "target": "com.amazonaws.apptest#UpdateTestSuiteRequest" + }, + "output": { + "target": "com.amazonaws.apptest#UpdateTestSuiteResponse" + }, + "errors": [ + { + "target": "com.amazonaws.apptest#AccessDeniedException" + }, + { + "target": "com.amazonaws.apptest#ConflictException" + }, + { + "target": "com.amazonaws.apptest#InternalServerException" + }, + { + "target": "com.amazonaws.apptest#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.apptest#ThrottlingException" + }, + { + "target": "com.amazonaws.apptest#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "UpdateTestSuite", + "documentation": "Grants permission to update a test suite" + }, + "smithy.api#documentation": "

Updates a test suite.

", + "smithy.api#http": { + "method": "PATCH", + "uri": "/testsuites/{testSuiteId}", + "code": 200 + } + } + }, + "com.amazonaws.apptest#UpdateTestSuiteRequest": { + "type": "structure", + "members": { + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test suite ID of the test suite.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.apptest#ResourceDescription", + "traits": { + "smithy.api#documentation": "

The description of the test suite.

" + } + }, + "beforeSteps": { + "target": "com.amazonaws.apptest#StepList", + "traits": { + "smithy.api#documentation": "

The before steps for the test suite.

" + } + }, + "afterSteps": { + "target": "com.amazonaws.apptest#StepList", + "traits": { + "smithy.api#documentation": "

The after steps of the test suite.

" + } + }, + "testCases": { + "target": "com.amazonaws.apptest#TestCases", + "traits": { + "smithy.api#documentation": "

The test cases in the test suite.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.apptest#UpdateTestSuiteResponse": { + "type": "structure", + "members": { + "testSuiteId": { + "target": "com.amazonaws.apptest#Identifier", + "traits": { + "smithy.api#documentation": "

The test suite ID of the test suite.

", + "smithy.api#required": {} + } + }, + "testSuiteVersion": { + "target": "com.amazonaws.apptest#Version", + "traits": { + "smithy.api#documentation": "

The test suite version of the test suite.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.apptest#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "reason": { + "target": "com.amazonaws.apptest#ValidationExceptionReason", + "traits": { + "smithy.api#documentation": "

The reason for the validation exception.

" + } + }, + "fieldList": { + "target": "com.amazonaws.apptest#ValidationExceptionFieldList", + "traits": { + "smithy.api#documentation": "

The field list of the validation exception.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

One or more parameter provided in the request is not valid.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.apptest#ValidationExceptionField": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the validation exception field.

", + "smithy.api#required": {} + } + }, + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The message stating reason for why service validation failed.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a validation exception field.

" + } + }, + "com.amazonaws.apptest#ValidationExceptionFieldList": { + "type": "list", + "member": { + "target": "com.amazonaws.apptest#ValidationExceptionField" + } + }, + "com.amazonaws.apptest#ValidationExceptionReason": { + "type": "enum", + "members": { + "UNKNOWN_OPERATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "unknownOperation" + } + }, + "CANNOT_PARSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "cannotParse" + } + }, + "FIELD_VALIDATION_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "fieldValidationFailed" + } + }, + "OTHER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "other" + } + } + } + }, + "com.amazonaws.apptest#Variable": { + "type": "string", + "traits": { + "smithy.api#pattern": "^\\S{1,1000}$" + } + }, + "com.amazonaws.apptest#Version": { + "type": "integer" + } + } +} \ No newline at end of file diff --git a/models/artifact.json b/models/artifact.json index cd5db0c4fb..2cbf6b493b 100644 --- a/models/artifact.json +++ b/models/artifact.json @@ -1678,6 +1678,12 @@ "traits": { "smithy.api#documentation": "

The message associated with the current upload state.

" } + }, + "acceptanceType": { + "target": "com.amazonaws.artifact#AcceptanceType", + "traits": { + "smithy.api#documentation": "

Acceptance type for report.

" + } } }, "traits": { diff --git a/models/athena.json b/models/athena.json index e4d668b23c..eaee8c9732 100644 --- a/models/athena.json +++ b/models/athena.json @@ -4188,7 +4188,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns query execution runtime statistics related to a single execution of a query if\n you have access to the workgroup in which the query ran. Query execution runtime\n statistics are returned only when QueryExecutionStatus$State is in a\n SUCCEEDED or FAILED state. Stage-level input and output row count and data size\n statistics are not shown when a query has row-level filters defined in Lake\n Formation.

" + "smithy.api#documentation": "

Returns query execution runtime statistics related to a single execution of a query if\n you have access to the workgroup in which the query ran. Statistics from the\n Timeline section of the response object are available as soon as QueryExecutionStatus$State is in a SUCCEEDED or FAILED state. The\n remaining non-timeline statistics in the response (like stage-level input and output row\n count and data size) are updated asynchronously and may not be available immediately\n after a query completes. The non-timeline statistics are also not included when a query\n has row-level filters defined in Lake Formation.

" } }, "com.amazonaws.athena#GetQueryRuntimeStatisticsInput": { @@ -4568,7 +4568,7 @@ } ], "traits": { - "smithy.api#documentation": "

Imports a single ipynb file to a Spark enabled workgroup. To import the\n notebook, the request must specify a value for either Payload or NoteBookS3LocationUri. If neither is specified or both are specified, an\n InvalidRequestException occurs. The maximum file size that can be imported is 10\n megabytes. If an ipynb file with the same name already exists in the\n workgroup, throws an error.

" + "smithy.api#documentation": "

Imports a single ipynb file to a Spark enabled workgroup. To import the\n notebook, the request must specify a value for either Payload or\n NoteBookS3LocationUri. If neither is specified or both are specified,\n an InvalidRequestException occurs. The maximum file size that can be\n imported is 10 megabytes. If an ipynb file with the same name already\n exists in the workgroup, throws an error.

" } }, "com.amazonaws.athena#ImportNotebookInput": { @@ -4591,7 +4591,7 @@ "Payload": { "target": "com.amazonaws.athena#Payload", "traits": { - "smithy.api#documentation": "

The notebook content to be imported. The payload must be in ipynb format.

" + "smithy.api#documentation": "

The notebook content to be imported. The payload must be in ipynb\n format.

" } }, "Type": { @@ -4604,7 +4604,7 @@ "NotebookS3LocationUri": { "target": "com.amazonaws.athena#S3Uri", "traits": { - "smithy.api#documentation": "

A URI that specifies the Amazon S3 location of a notebook file in ipynb format.

" + "smithy.api#documentation": "

A URI that specifies the Amazon S3 location of a notebook file in\n ipynb format.

" } }, "ClientRequestToken": { @@ -5229,7 +5229,20 @@ "inputToken": "NextToken", "outputToken": "NextToken", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListNamedQueriesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.athena#ListNamedQueriesInput": { @@ -6212,7 +6225,7 @@ "min": 1, "max": 255 }, - "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+$" + "smithy.api#pattern": "^(?!.*[/:\\\\])[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]+$" } }, "com.amazonaws.athena#NotebookSessionSummary": { @@ -6950,7 +6963,7 @@ "OutputLocation": { "target": "com.amazonaws.athena#ResultOutputLocation", "traits": { - "smithy.api#documentation": "

The location in Amazon S3 where your query and calculation results are stored,\n such as s3://path/to/query/bucket/. To run the query, you must specify the\n query results location using one of the ways: either for individual queries using either\n this setting (client-side), or in the workgroup, using WorkGroupConfiguration. If none of them is set, Athena\n issues an error that no output location is provided. For more information, see Working with query\n results, recent queries, and output files. If workgroup settings override\n client-side settings, then the query uses the settings specified for the workgroup. See\n WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "smithy.api#documentation": "

The location in Amazon S3 where your query and calculation results are stored,\n such as s3://path/to/query/bucket/. To run the query, you must specify the\n query results location using one of the ways: either for individual queries using either\n this setting (client-side), or in the workgroup, using WorkGroupConfiguration. If none of them is set, Athena\n issues an error that no output location is provided. If workgroup settings override\n client-side settings, then the query uses the settings specified for the workgroup. See\n WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" } }, "EncryptionConfiguration": { @@ -6982,7 +6995,7 @@ "OutputLocation": { "target": "com.amazonaws.athena#ResultOutputLocation", "traits": { - "smithy.api#documentation": "

The location in Amazon S3 where your query and calculation results are stored,\n such as s3://path/to/query/bucket/. For more information, see Working with query\n results, recent queries, and output files. If workgroup settings override\n client-side settings, then the query uses the location for the query results and the\n encryption configuration that are specified for the workgroup. The \"workgroup settings\n override\" is specified in EnforceWorkGroupConfiguration (true/false) in the\n WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" + "smithy.api#documentation": "

The location in Amazon S3 where your query and calculation results are stored,\n such as s3://path/to/query/bucket/. If workgroup settings override\n client-side settings, then the query uses the location for the query results and the\n encryption configuration that are specified for the workgroup. The \"workgroup settings\n override\" is specified in EnforceWorkGroupConfiguration (true/false) in the\n WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration.

" } }, "RemoveOutputLocation": { @@ -8720,7 +8733,7 @@ "ResultConfiguration": { "target": "com.amazonaws.athena#ResultConfiguration", "traits": { - "smithy.api#documentation": "

The configuration for the workgroup, which includes the location in Amazon S3\n where query and calculation results are stored and the encryption option, if any, used\n for query and calculation results. To run the query, you must specify the query results\n location using one of the ways: either in the workgroup using this setting, or for\n individual queries (client-side), using ResultConfiguration$OutputLocation. If none of them is set, Athena issues an error that no output location is provided. For more\n information, see Working with query results, recent queries, and output files.

" + "smithy.api#documentation": "

The configuration for the workgroup, which includes the location in Amazon S3\n where query and calculation results are stored and the encryption option, if any, used\n for query and calculation results. To run the query, you must specify the query results\n location using one of the ways: either in the workgroup using this setting, or for\n individual queries (client-side), using ResultConfiguration$OutputLocation. If none of them is set, Athena issues an error that no output location is provided.

" } }, "EnforceWorkGroupConfiguration": { diff --git a/models/auditmanager.json b/models/auditmanager.json index 8a990bb257..003dfcb8a5 100644 --- a/models/auditmanager.json +++ b/models/auditmanager.json @@ -1977,7 +1977,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2020,7 +2019,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -2033,7 +2033,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2047,7 +2046,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2070,7 +2068,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2105,7 +2102,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -2116,14 +2112,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -2137,14 +2135,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -2153,11 +2149,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -2168,14 +2164,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -2189,7 +2187,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -2209,7 +2206,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -2220,14 +2216,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -2238,9 +2236,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -2868,12 +2868,28 @@ "traits": { "smithy.api#documentation": "

The tags associated with the control.

" } + }, + "state": { + "target": "com.amazonaws.auditmanager#ControlState", + "traits": { + "smithy.api#documentation": "

The state of the control. The END_OF_SUPPORT state is applicable to\n standard controls only. This state indicates that the standard control can still be used to\n collect evidence, but Audit Manager is no longer updating or maintaining that\n control.

" + } } }, "traits": { "smithy.api#documentation": "

A control in Audit Manager.

" } }, + "com.amazonaws.auditmanager#ControlCatalogId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 13, + "max": 2048 + }, + "smithy.api#pattern": "^arn:.*:controlcatalog:.*|UNCATEGORIZED$" + } + }, "com.amazonaws.auditmanager#ControlComment": { "type": "structure", "members": { @@ -2924,22 +2940,33 @@ "min": 0, "max": 1000 }, - "smithy.api#pattern": "^[\\w\\W\\s\\S]*$" + "smithy.api#pattern": "^[\\w\\W\\s\\S]*$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.auditmanager#ControlDomainId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 13, + "max": 2048 + }, + "smithy.api#pattern": "^arn:.*:controlcatalog:.*:.*:domain/.*|UNCATEGORIZED|^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" } }, "com.amazonaws.auditmanager#ControlDomainInsights": { "type": "structure", "members": { "name": { - "target": "com.amazonaws.auditmanager#NonEmptyString", + "target": "com.amazonaws.auditmanager#String", "traits": { "smithy.api#documentation": "

The name of the control domain.

" } }, "id": { - "target": "com.amazonaws.auditmanager#UUID", + "target": "com.amazonaws.auditmanager#ControlDomainId", "traits": { - "smithy.api#documentation": "

The unique identifier for the control domain.

" + "smithy.api#documentation": "

The unique identifier for the control domain. Audit Manager supports the control\n domains that are provided by Amazon Web Services Control Catalog. For information about how\n to find a list of available control domains, see \n ListDomains\n in the Amazon Web Services Control Catalog API\n Reference.

" } }, "controlsCountByNoncompliantEvidence": { @@ -2993,13 +3020,13 @@ "type": "structure", "members": { "name": { - "target": "com.amazonaws.auditmanager#NonEmptyString", + "target": "com.amazonaws.auditmanager#String", "traits": { "smithy.api#documentation": "

The name of the assessment control.

" } }, "id": { - "target": "com.amazonaws.auditmanager#UUID", + "target": "com.amazonaws.auditmanager#ControlDomainId", "traits": { "smithy.api#documentation": "

The unique identifier for the assessment control.

" } @@ -3031,13 +3058,13 @@ "type": "structure", "members": { "name": { - "target": "com.amazonaws.auditmanager#NonEmptyString", + "target": "com.amazonaws.auditmanager#String", "traits": { "smithy.api#documentation": "

The name of the control.

" } }, "id": { - "target": "com.amazonaws.auditmanager#UUID", + "target": "com.amazonaws.auditmanager#ControlDomainId", "traits": { "smithy.api#documentation": "

The unique identifier for the control.

" } @@ -3083,13 +3110,13 @@ "sourceSetUpOption": { "target": "com.amazonaws.auditmanager#SourceSetUpOption", "traits": { - "smithy.api#documentation": "

The setup option for the data source. This option reflects if the evidence collection\n is automated or manual.

" + "smithy.api#documentation": "

The setup option for the data source. This option reflects if the evidence collection\n method is automated or manual. If you don’t provide a value for\n sourceSetUpOption, Audit Manager automatically infers and populates\n the correct value based on the sourceType that you specify.

" } }, "sourceType": { "target": "com.amazonaws.auditmanager#SourceType", "traits": { - "smithy.api#documentation": "

Specifies one of the five data source types for evidence collection.

" + "smithy.api#documentation": "

Specifies which type of data source is used to collect evidence.

\n
    \n
  • \n

    The source can be an individual data source type, such as\n AWS_Cloudtrail, AWS_Config,\n AWS_Security_Hub, AWS_API_Call, or MANUAL.\n

    \n
  • \n
  • \n

    The source can also be a managed grouping of data sources, such as a\n Core_Control or a Common_Control.

    \n
  • \n
" } }, "sourceKeyword": { @@ -3309,6 +3336,23 @@ "smithy.api#pattern": "^[a-zA-Z_0-9-\\s.,]+$" } }, + "com.amazonaws.auditmanager#ControlState": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "END_OF_SUPPORT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "END_OF_SUPPORT" + } + } + } + }, "com.amazonaws.auditmanager#ControlStatus": { "type": "enum", "members": { @@ -3346,6 +3390,12 @@ "traits": { "smithy.api#enumValue": "Custom" } + }, + "CORE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Core" + } } } }, @@ -3387,6 +3437,9 @@ { "target": "com.amazonaws.auditmanager#ServiceQuotaExceededException" }, + { + "target": "com.amazonaws.auditmanager#ThrottlingException" + }, { "target": "com.amazonaws.auditmanager#ValidationException" } @@ -3743,13 +3796,13 @@ "sourceSetUpOption": { "target": "com.amazonaws.auditmanager#SourceSetUpOption", "traits": { - "smithy.api#documentation": "

The setup option for the data source, which reflects if the evidence collection is\n automated or manual.

" + "smithy.api#documentation": "

The setup option for the data source. This option reflects if the evidence collection\n method is automated or manual. If you don’t provide a value for\n sourceSetUpOption, Audit Manager automatically infers and populates\n the correct value based on the sourceType that you specify.

" } }, "sourceType": { "target": "com.amazonaws.auditmanager#SourceType", "traits": { - "smithy.api#documentation": "

Specifies one of the five types of data sources for evidence collection.

" + "smithy.api#documentation": "

Specifies which type of data source is used to collect evidence.

\n
    \n
  • \n

    The source can be an individual data source type, such as\n AWS_Cloudtrail, AWS_Config,\n AWS_Security_Hub, AWS_API_Call, or MANUAL.\n

    \n
  • \n
  • \n

    The source can also be a managed grouping of data sources, such as a\n Core_Control or a Common_Control.

    \n
  • \n
" } }, "sourceKeyword": { @@ -3769,7 +3822,7 @@ } }, "traits": { - "smithy.api#documentation": "

The control mapping fields that represent the source for evidence collection, along\n with related parameters and metadata. This doesn't contain mappingID.

" + "smithy.api#documentation": "

The mapping attributes that determine the evidence source for a given control, along\n with related parameters and metadata. This doesn't contain mappingID.

" } }, "com.amazonaws.auditmanager#CreateControlMappingSources": { @@ -3905,6 +3958,41 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.auditmanager#DataSourceType": { + "type": "enum", + "members": { + "AWS_CLOUDTRAIL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_Cloudtrail" + } + }, + "AWS_CONFIG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_Config" + } + }, + "AWS_SECURITY_HUB": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_Security_Hub" + } + }, + "AWS_API_CALL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_API_Call" + } + }, + "MANUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MANUAL" + } + } + } + }, "com.amazonaws.auditmanager#DefaultExportDestination": { "type": "structure", "members": { @@ -6262,7 +6350,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets a list of all of the Amazon Web Services that you can choose to include in\n your assessment. When you create an assessment, specify which of these services you want to include to\n narrow the assessment's scope.

", + "smithy.api#documentation": "

Gets a list of the Amazon Web Services from which Audit Manager can collect\n evidence.

\n

Audit Manager defines which Amazon Web Services are in scope for an\n assessment. Audit Manager infers this scope by examining the assessment’s controls and\n their data sources, and then mapping this information to one or more of the corresponding\n Amazon Web Services that are in this list.

\n \n

For information about why it's no longer possible to specify services in scope manually, see\n I can't edit the services in scope for my assessment in\n the Troubleshooting section of the Audit Manager user\n guide.

\n
", "smithy.api#http": { "method": "GET", "uri": "/services", @@ -6512,7 +6600,7 @@ "min": 1, "max": 100 }, - "smithy.api#pattern": "^[a-zA-Z_0-9-\\s().]+$" + "smithy.api#pattern": "^[a-zA-Z_0-9-\\s().:\\/]+$" } }, "com.amazonaws.auditmanager#Keywords": { @@ -6582,9 +6670,9 @@ "type": "structure", "members": { "controlDomainId": { - "target": "com.amazonaws.auditmanager#UUID", + "target": "com.amazonaws.auditmanager#ControlDomainId", "traits": { - "smithy.api#documentation": "

The unique identifier for the control domain.

", + "smithy.api#documentation": "

The unique identifier for the control domain.

\n

Audit Manager supports the control domains that are provided by Amazon Web Services\n Control Catalog. For information about how to find a list of available control domains, see\n \n ListDomains\n in the Amazon Web Services Control\n Catalog API Reference.

", "smithy.api#httpQuery": "controlDomainId", "smithy.api#required": {} } @@ -6988,7 +7076,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the latest analytics data for control domains across all of your active\n assessments.

\n \n

A control domain is listed only if at least one of the controls within that domain\n collected evidence on the lastUpdated date of\n controlDomainInsights. If this condition isn’t met, no data is listed\n for that control domain.

\n
", + "smithy.api#documentation": "

Lists the latest analytics data for control domains across all of your active\n assessments.

\n

Audit Manager supports the control domains that are provided by Amazon Web Services\n Control Catalog. For information about how to find a list of available control domains, see\n \n ListDomains\n in the Amazon Web Services Control\n Catalog API Reference.

\n \n

A control domain is listed only if at least one of the controls within that domain\n collected evidence on the lastUpdated date of\n controlDomainInsights. If this condition isn’t met, no data is listed\n for that control domain.

\n
", "smithy.api#http": { "method": "GET", "uri": "/insights/control-domains", @@ -7024,7 +7112,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists analytics data for control domains within a specified active assessment.

\n \n

A control domain is listed only if at least one of the controls within that domain\n collected evidence on the lastUpdated date of\n controlDomainInsights. If this condition isn’t met, no data is listed\n for that domain.

\n
", + "smithy.api#documentation": "

Lists analytics data for control domains within a specified active assessment.

\n

Audit Manager supports the control domains that are provided by Amazon Web Services\n Control Catalog. For information about how to find a list of available control domains, see\n \n ListDomains\n in the Amazon Web Services Control\n Catalog API Reference.

\n \n

A control domain is listed only if at least one of the controls within that domain\n collected evidence on the lastUpdated date of\n controlDomainInsights. If this condition isn’t met, no data is listed\n for that domain.

\n
", "smithy.api#http": { "method": "GET", "uri": "/insights/control-domains-by-assessment", @@ -7169,9 +7257,9 @@ "type": "structure", "members": { "controlDomainId": { - "target": "com.amazonaws.auditmanager#UUID", + "target": "com.amazonaws.auditmanager#ControlDomainId", "traits": { - "smithy.api#documentation": "

The unique identifier for the control domain.

", + "smithy.api#documentation": "

The unique identifier for the control domain.

\n

Audit Manager supports the control domains that are provided by Amazon Web Services\n Control Catalog. For information about how to find a list of available control domains, see\n \n ListDomains\n in the Amazon Web Services Control\n Catalog API Reference.

", "smithy.api#httpQuery": "controlDomainId", "smithy.api#required": {} } @@ -7254,7 +7342,7 @@ "controlType": { "target": "com.amazonaws.auditmanager#ControlType", "traits": { - "smithy.api#documentation": "

The type of control, such as a standard control or a custom control.

", + "smithy.api#documentation": "

A filter that narrows the list of controls to a specific type.

", "smithy.api#httpQuery": "controlType", "smithy.api#required": {} } @@ -7262,16 +7350,23 @@ "nextToken": { "target": "com.amazonaws.auditmanager#Token", "traits": { - "smithy.api#documentation": "

The pagination token that's used to fetch the next set of results.

", + "smithy.api#documentation": "

The pagination token that's used to fetch the next set of results.

", "smithy.api#httpQuery": "nextToken" } }, "maxResults": { "target": "com.amazonaws.auditmanager#MaxResults", "traits": { - "smithy.api#documentation": "

Represents the maximum number of results on a page or for an API request call.

", + "smithy.api#documentation": "

The maximum number of results on a page or for an API request call.

", "smithy.api#httpQuery": "maxResults" } + }, + "controlCatalogId": { + "target": "com.amazonaws.auditmanager#ControlCatalogId", + "traits": { + "smithy.api#documentation": "

A filter that narrows the list of controls to a specific resource from the Amazon Web Services \n Control Catalog.

\n

To use this parameter, specify the ARN of the Control Catalog resource. You can specify either \n a control domain, a control objective, or a common control. For information about how to find the ARNs\n for these resources, see \n ListDomains\n , \n ListObjectives\n , and \n ListCommonControls\n .

\n \n

You can only filter by one Control Catalog resource at a time. \n Specifying multiple resource ARNs isn’t currently supported. If you want to filter by more \n than one ARN, we recommend that you run the ListControls operation separately \n for each ARN.

\n
\n

Alternatively, specify UNCATEGORIZED to list controls that aren't\n mapped to a Control Catalog resource. For example, this operation might return a list of \n custom controls that don't belong to any control domain or control objective.

", + "smithy.api#httpQuery": "controlCatalogId" + } } }, "traits": { @@ -7290,7 +7385,7 @@ "nextToken": { "target": "com.amazonaws.auditmanager#Token", "traits": { - "smithy.api#documentation": "

The pagination token that's used to fetch the next set of results.

" + "smithy.api#documentation": "

The pagination token that's used to fetch the next set of results.

" } } }, @@ -7318,7 +7413,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of keywords that are pre-mapped to the specified control data source.\n

", + "smithy.api#documentation": "

Returns a list of keywords that are pre-mapped to the specified control data\n source.

", "smithy.api#http": { "method": "GET", "uri": "/dataSourceKeywords", @@ -7335,9 +7430,9 @@ "type": "structure", "members": { "source": { - "target": "com.amazonaws.auditmanager#SourceType", + "target": "com.amazonaws.auditmanager#DataSourceType", "traits": { - "smithy.api#documentation": "

The control mapping data source that the keywords apply to.

", + "smithy.api#documentation": "

The control mapping data source that the keywords apply to.

", "smithy.api#httpQuery": "source", "smithy.api#required": {} } @@ -7367,7 +7462,7 @@ "keywords": { "target": "com.amazonaws.auditmanager#Keywords", "traits": { - "smithy.api#documentation": "

The list of keywords for the event mapping source.

" + "smithy.api#documentation": "

The list of keywords for the control mapping source.

" } }, "nextToken": { @@ -7987,12 +8082,15 @@ "awsServices": { "target": "com.amazonaws.auditmanager#AWSServices", "traits": { - "smithy.api#documentation": "

The Amazon Web Services services that are included in the scope of the assessment.\n

" + "smithy.api#deprecated": { + "message": "You can't specify services in scope when creating/updating an assessment. If you use the parameter to specify one or more AWS services, Audit Manager ignores the input. Instead the value of the parameter will show as empty indicating that the services are defined and managed by Audit Manager." + }, + "smithy.api#documentation": "

The Amazon Web Services services that are included in the scope of the assessment.\n

\n \n

This API parameter is no longer supported. If you use this parameter to specify one\n or more Amazon Web Services, Audit Manager ignores this input. Instead, the\n value for awsServices will show as empty.

\n
" } } }, "traits": { - "smithy.api#documentation": "

The wrapper that contains the Amazon Web Services accounts and services that are in\n scope for the assessment.

", + "smithy.api#documentation": "

The wrapper that contains the Amazon Web Services accounts that are in\n scope for the assessment.

\n \n

You no longer need to specify which Amazon Web Services are in scope when you\n create or update an assessment. Audit Manager infers the services in scope by\n examining your assessment controls and their data sources, and then mapping this\n information to the relevant Amazon Web Services.

\n

If an underlying data source changes for your assessment, we automatically update the\n services scope as needed to reflect the correct Amazon Web Services. This\n ensures that your assessment collects accurate and comprehensive evidence about all of\n the relevant services in your AWS environment.

\n
", "smithy.api#sensitive": {} } }, @@ -8330,7 +8428,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 100 + "max": 300 } } }, @@ -8383,6 +8481,18 @@ "traits": { "smithy.api#enumValue": "MANUAL" } + }, + "COMMON_CONTROL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Common_Control" + } + }, + "CORE_CONTROL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Core_Control" + } } } }, @@ -8606,7 +8716,7 @@ "traits": { "smithy.api#documentation": "

The request was denied due to request throttling.

", "smithy.api#error": "client", - "smithy.api#httpError": 400 + "smithy.api#httpError": 429 } }, "com.amazonaws.auditmanager#Timestamp": { @@ -8750,6 +8860,9 @@ { "target": "com.amazonaws.auditmanager#ResourceNotFoundException" }, + { + "target": "com.amazonaws.auditmanager#ThrottlingException" + }, { "target": "com.amazonaws.auditmanager#ValidationException" } diff --git a/models/auto-scaling.json b/models/auto-scaling.json index 8786398bda..e9c3eaaa56 100644 --- a/models/auto-scaling.json +++ b/models/auto-scaling.json @@ -483,7 +483,7 @@ } ], "traits": { - "smithy.api#documentation": "

Attaches one or more EC2 instances to the specified Auto Scaling group.

\n

When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the\n number of instances being attached. If the number of instances being attached plus the\n desired capacity of the group exceeds the maximum size of the group, the operation\n fails.

\n

If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are\n also registered with the load balancer. If there are target groups attached to your Auto Scaling\n group, the instances are also registered with the target groups.

\n

For more information, see Attach EC2 instances to\n your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

", + "smithy.api#documentation": "

Attaches one or more EC2 instances to the specified Auto Scaling group.

\n

When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the\n number of instances being attached. If the number of instances being attached plus the\n desired capacity of the group exceeds the maximum size of the group, the operation\n fails.

\n

If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are\n also registered with the load balancer. If there are target groups attached to your Auto Scaling\n group, the instances are also registered with the target groups.

\n

For more information, see Detach\n or attach instances in the Amazon EC2 Auto Scaling User Guide.

", "smithy.api#examples": [ { "title": "To attach an instance to an Auto Scaling group", @@ -1046,7 +1046,7 @@ "target": "com.amazonaws.autoscaling#XmlStringMaxLen32", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The lifecycle state for the instance. The Quarantined state is not used.\n For information about lifecycle states, see Instance\n lifecycle in the Amazon EC2 Auto Scaling User Guide.

\n

Valid values: Pending | Pending:Wait |\n Pending:Proceed | Quarantined | InService |\n Terminating | Terminating:Wait |\n Terminating:Proceed | Terminated | Detaching\n | Detached | EnteringStandby | Standby |\n Warmed:Pending | Warmed:Pending:Wait |\n Warmed:Pending:Proceed | Warmed:Terminating |\n Warmed:Terminating:Wait | Warmed:Terminating:Proceed |\n Warmed:Terminated | Warmed:Stopped |\n Warmed:Running\n

", + "smithy.api#documentation": "

The lifecycle state for the instance. The Quarantined state is not used.\n For more information, see Amazon EC2 Auto Scaling instance\n lifecycle in the Amazon EC2 Auto Scaling User Guide.

\n

Valid values: Pending | Pending:Wait |\n Pending:Proceed | Quarantined | InService |\n Terminating | Terminating:Wait |\n Terminating:Proceed | Terminated | Detaching\n | Detached | EnteringStandby | Standby |\n Warmed:Pending | Warmed:Pending:Wait |\n Warmed:Pending:Proceed | Warmed:Terminating |\n Warmed:Terminating:Wait | Warmed:Terminating:Proceed |\n Warmed:Terminated | Warmed:Stopped |\n Warmed:Running\n

", "smithy.api#required": {} } }, @@ -1331,7 +1331,7 @@ "name": "autoscaling" }, "aws.protocols#awsQuery": {}, - "smithy.api#documentation": "Amazon EC2 Auto Scaling\n

Amazon EC2 Auto Scaling is designed to automatically launch and terminate EC2 instances\n based on user-defined scaling policies, scheduled actions, and health checks.

\n

For more information, see the Amazon EC2 Auto Scaling User Guide and the Amazon EC2 Auto Scaling API Reference.

", + "smithy.api#documentation": "Amazon EC2 Auto Scaling\n

Amazon EC2 Auto Scaling is designed to automatically launch and terminate EC2 instances\n based on user-defined scaling policies, scheduled actions, and health checks.

\n

For more information, see the Amazon EC2 Auto Scaling User Guide and the Amazon EC2 Auto Scaling API Reference.

", "smithy.api#title": "Auto Scaling", "smithy.api#xmlNamespace": { "uri": "http://autoscaling.amazonaws.com/doc/2011-01-01/" @@ -2504,14 +2504,14 @@ "VirtualName": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { - "smithy.api#documentation": "

The name of the instance store volume (virtual device) to attach to an instance at\n launch. The name must be in the form ephemeralX where\n X is a number starting from zero (0), for example,\n ephemeral0.

" + "smithy.api#documentation": "

The name of the instance store volume (virtual device) to attach to an instance at\n launch. The name must be in the form ephemeralX where\n X is a number starting from zero (0), for example,\n ephemeral0.

" } }, "DeviceName": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The device name assigned to the volume (for example, /dev/sdh or\n xvdh). For more information, see Device naming on Linux\n instances in the Amazon EC2 User Guide for Linux Instances.

\n \n

To define a block device mapping, set the device name and exactly one of the\n following properties: Ebs, NoDevice, or\n VirtualName.

\n
", + "smithy.api#documentation": "

The device name assigned to the volume (for example, /dev/sdh or\n xvdh). For more information, see Device naming on Linux\n instances in the Amazon EC2 User Guide for Linux Instances.

\n \n

To define a block device mapping, set the device name and exactly one of the\n following properties: Ebs, NoDevice, or\n VirtualName.

\n
", "smithy.api#required": {} } }, @@ -2802,22 +2802,6 @@ "traits": { "smithy.api#documentation": "

\n We strongly recommend using a launch template when calling this operation to ensure full functionality for Amazon EC2 Auto Scaling and Amazon EC2.\n

\n

Creates an Auto Scaling group with the specified name and attributes.

\n

If you exceed your maximum limit of Auto Scaling groups, the call fails. To query this limit,\n call the DescribeAccountLimits API. For information about updating\n this limit, see Quotas for\n Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

\n

If you're new to Amazon EC2 Auto Scaling, see the introductory tutorials in Get started\n with Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

\n

Every Auto Scaling group has three size properties (DesiredCapacity,\n MaxSize, and MinSize). Usually, you set these sizes based\n on a specific number of instances. However, if you configure a mixed instances policy\n that defines weights for the instance types, you must specify these sizes with the same\n units that you use for weighting instances.

", "smithy.api#examples": [ - { - "title": "To create an Auto Scaling group", - "documentation": "This example creates an Auto Scaling group.", - "input": { - "AutoScalingGroupName": "my-auto-scaling-group", - "LaunchTemplate": { - "LaunchTemplateName": "my-template-for-auto-scaling", - "Version": "$Default" - }, - "MinSize": 1, - "MaxSize": 3, - "MaxInstanceLifetime": 2592000, - "DefaultInstanceWarmup": 120, - "VPCZoneIdentifier": "subnet-057fa0918fEXAMPLE" - } - }, { "title": "To create an Auto Scaling group with an attached target group", "documentation": "This example creates an Auto Scaling group and attaches the specified target group.", @@ -2915,6 +2899,22 @@ "DesiredCapacityType": "units", "VPCZoneIdentifier": "subnet-057fa0918fEXAMPLE, subnet-610acd08EXAMPLE" } + }, + { + "title": "To create an Auto Scaling group", + "documentation": "This example creates an Auto Scaling group.", + "input": { + "AutoScalingGroupName": "my-auto-scaling-group", + "LaunchTemplate": { + "LaunchTemplateName": "my-template-for-auto-scaling", + "Version": "$Default" + }, + "MinSize": 1, + "MaxSize": 3, + "MaxInstanceLifetime": 2592000, + "DefaultInstanceWarmup": 120, + "VPCZoneIdentifier": "subnet-057fa0918fEXAMPLE" + } } ] } @@ -2939,7 +2939,7 @@ "LaunchTemplate": { "target": "com.amazonaws.autoscaling#LaunchTemplateSpecification", "traits": { - "smithy.api#documentation": "

Information used to specify the launch template and version to use to launch\n instances.

\n

Conditional: You must specify either a launch template (LaunchTemplate or\n MixedInstancesPolicy) or a launch configuration\n (LaunchConfigurationName or InstanceId).

\n \n

The launch template that is specified must be configured for use with an Auto Scaling\n group. For more information, see Creating a launch\n template for an Auto Scaling group in the\n Amazon EC2 Auto Scaling User Guide.

\n
" + "smithy.api#documentation": "

Information used to specify the launch template and version to use to launch\n instances.

\n

Conditional: You must specify either a launch template (LaunchTemplate or\n MixedInstancesPolicy) or a launch configuration\n (LaunchConfigurationName or InstanceId).

\n \n

The launch template that is specified must be configured for use with an Auto Scaling\n group. For more information, see Create a launch\n template for an Auto Scaling group in the\n Amazon EC2 Auto Scaling User Guide.

\n
" } }, "MixedInstancesPolicy": { @@ -2951,7 +2951,7 @@ "InstanceId": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen19", "traits": { - "smithy.api#documentation": "

The ID of the instance used to base the launch configuration on. If specified, Amazon\n EC2 Auto Scaling uses the configuration values from the specified instance to create a\n new launch configuration. To get the instance ID, use the Amazon EC2 DescribeInstances API operation. For more information, see Creating an Auto Scaling group using an EC2 instance in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

The ID of the instance used to base the launch configuration on. If specified, Amazon\n EC2 Auto Scaling uses the configuration values from the specified instance to create a\n new launch configuration. To get the instance ID, use the Amazon EC2 DescribeInstances API operation. For more information, see Create an Auto Scaling group using parameters from an existing instance in the\n Amazon EC2 Auto Scaling User Guide.

" } }, "MinSize": { @@ -2979,7 +2979,7 @@ "DefaultCooldown": { "target": "com.amazonaws.autoscaling#Cooldown", "traits": { - "smithy.api#documentation": "

\n Only needed if you use simple scaling policies.\n

\n

The amount of time, in seconds, between one scaling activity ending and another one\n starting due to simple scaling policies. For more information, see Scaling cooldowns\n for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

\n

Default: 300 seconds

" + "smithy.api#documentation": "

\n Only needed if you use simple scaling policies.\n

\n

The amount of time, in seconds, between one scaling activity ending and another one\n starting due to simple scaling policies. For more information, see Scaling\n cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

\n

Default: 300 seconds

" } }, "AvailabilityZones": { @@ -3003,7 +3003,7 @@ "HealthCheckType": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen32", "traits": { - "smithy.api#documentation": "

A comma-separated value string of one or more health check types.

\n

The valid values are EC2, ELB, and VPC_LATTICE.\n EC2 is the default health check and cannot be disabled. For more\n information, see Health checks for Auto Scaling\n instances in the Amazon EC2 Auto Scaling User Guide.

\n

Only specify EC2 if you must clear a value that was previously\n set.

" + "smithy.api#documentation": "

A comma-separated value string of one or more health check types.

\n

The valid values are EC2, ELB, and VPC_LATTICE.\n EC2 is the default health check and cannot be disabled. For more\n information, see Health checks\n for instances in an Auto Scaling group in the\n Amazon EC2 Auto Scaling User Guide.

\n

Only specify EC2 if you must clear a value that was previously\n set.

" } }, "HealthCheckGracePeriod": { @@ -3027,13 +3027,13 @@ "TerminationPolicies": { "target": "com.amazonaws.autoscaling#TerminationPolicies", "traits": { - "smithy.api#documentation": "

A policy or a list of policies that are used to select the instance to terminate.\n These policies are executed in the order that you list them. For more information, see\n Work with\n Amazon EC2 Auto Scaling termination policies in the\n Amazon EC2 Auto Scaling User Guide.

\n

Valid values: Default | AllocationStrategy |\n ClosestToNextInstanceHour | NewestInstance |\n OldestInstance | OldestLaunchConfiguration |\n OldestLaunchTemplate |\n arn:aws:lambda:region:account-id:function:my-function:my-alias\n

" + "smithy.api#documentation": "

A policy or a list of policies that are used to select the instance to terminate.\n These policies are executed in the order that you list them. For more information, see\n Configure\n termination policies for Amazon EC2 Auto Scaling in the\n Amazon EC2 Auto Scaling User Guide.

\n

Valid values: Default | AllocationStrategy |\n ClosestToNextInstanceHour | NewestInstance |\n OldestInstance | OldestLaunchConfiguration |\n OldestLaunchTemplate |\n arn:aws:lambda:region:account-id:function:my-function:my-alias\n

" } }, "NewInstancesProtectedFromScaleIn": { "target": "com.amazonaws.autoscaling#InstanceProtected", "traits": { - "smithy.api#documentation": "

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling\n when scaling in. For more information about preventing instances from terminating on\n scale in, see Using\n instance scale-in protection in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling\n when scaling in. For more information about preventing instances from terminating on\n scale in, see Use\n instance scale-in protection in the\n Amazon EC2 Auto Scaling User Guide.

" } }, "CapacityRebalance": { @@ -3063,7 +3063,7 @@ "MaxInstanceLifetime": { "target": "com.amazonaws.autoscaling#MaxInstanceLifetime", "traits": { - "smithy.api#documentation": "

The maximum amount of time, in seconds, that an instance can be in service. The\n default is null. If specified, the value must be either 0 or a number equal to or\n greater than 86,400 seconds (1 day). For more information, see Replacing Auto Scaling instances based on maximum instance lifetime in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

The maximum amount of time, in seconds, that an instance can be in service. The\n default is null. If specified, the value must be either 0 or a number equal to or\n greater than 86,400 seconds (1 day). For more information, see Replace Auto Scaling instances based on maximum instance lifetime in the\n Amazon EC2 Auto Scaling User Guide.

" } }, "Context": { @@ -3075,7 +3075,7 @@ "DesiredCapacityType": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { - "smithy.api#documentation": "

The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling\n supports DesiredCapacityType for attribute-based instance type selection\n only. For more information, see Creating\n an Auto Scaling group using attribute-based instance type selection in the\n Amazon EC2 Auto Scaling User Guide.

\n

By default, Amazon EC2 Auto Scaling specifies units, which translates into number of\n instances.

\n

Valid values: units | vcpu | memory-mib\n

" + "smithy.api#documentation": "

The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling\n supports DesiredCapacityType for attribute-based instance type selection\n only. For more information, see Create a mixed instances group using attribute-based instance type\n selection in the Amazon EC2 Auto Scaling User Guide.

\n

By default, Amazon EC2 Auto Scaling specifies units, which translates into number of\n instances.

\n

Valid values: units | vcpu | memory-mib\n

" } }, "DefaultInstanceWarmup": { @@ -3118,7 +3118,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a launch configuration.

\n

If you exceed your maximum limit of launch configurations, the call fails. To query\n this limit, call the DescribeAccountLimits API. For information about\n updating this limit, see Quotas for\n Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

\n

For more information, see Launch\n configurations in the Amazon EC2 Auto Scaling User Guide.

\n \n

Amazon EC2 Auto Scaling configures instances launched as part of an Auto Scaling group using either a\n launch template or a launch configuration. We strongly recommend that you do not use\n launch configurations. They do not provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2.\n For information about using launch templates, see Launch templates in the Amazon EC2 Auto Scaling User Guide.

\n
", + "smithy.api#documentation": "

Creates a launch configuration.

\n

If you exceed your maximum limit of launch configurations, the call fails. To query\n this limit, call the DescribeAccountLimits API. For information about\n updating this limit, see Quotas for\n Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

\n

For more information, see Launch\n configurations in the Amazon EC2 Auto Scaling User Guide.

\n \n

Amazon EC2 Auto Scaling configures instances launched as part of an Auto Scaling group using either a\n launch template or a launch configuration. We strongly recommend that you do not use\n launch configurations. They do not provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2.\n For information about using launch templates, see Launch templates in the Amazon EC2 Auto Scaling User Guide.

\n
", "smithy.api#examples": [ { "title": "To create a launch configuration", @@ -3150,19 +3150,19 @@ "ImageId": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Machine Image (AMI) that was assigned during registration. For\n more information, see Finding a Linux AMI in the\n Amazon EC2 User Guide for Linux Instances.

\n

If you specify InstanceId, an ImageId is not\n required.

" + "smithy.api#documentation": "

The ID of the Amazon Machine Image (AMI) that was assigned during registration. For\n more information, see Find a Linux AMI in the\n Amazon EC2 User Guide for Linux Instances.

\n

If you specify InstanceId, an ImageId is not\n required.

" } }, "KeyName": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { - "smithy.api#documentation": "

The name of the key pair. For more information, see Amazon EC2 key pairs and Linux\n instances in the Amazon EC2 User Guide for Linux Instances.

" + "smithy.api#documentation": "

The name of the key pair. For more information, see Amazon EC2 key pairs and Amazon EC2\n instances in the Amazon EC2 User Guide for Linux Instances.

" } }, "SecurityGroups": { "target": "com.amazonaws.autoscaling#SecurityGroups", "traits": { - "smithy.api#documentation": "

A list that contains the security group IDs to assign to the instances in the Auto Scaling\n group. For more information, see Control traffic to\n resources using security groups in the Amazon Virtual Private\n Cloud User Guide.

" + "smithy.api#documentation": "

A list that contains the security group IDs to assign to the instances in the Auto Scaling\n group. For more information, see Control traffic to your Amazon Web Services\n resources using security groups in the Amazon Virtual Private\n Cloud User Guide.

" } }, "ClassicLinkVPCId": { @@ -3180,13 +3180,13 @@ "UserData": { "target": "com.amazonaws.autoscaling#XmlStringUserData", "traits": { - "smithy.api#documentation": "

The user data to make available to the launched EC2 instances. For more information,\n see Instance metadata and user data (Linux) and Instance metadata and\n user data (Windows). If you are using a command line tool, base64-encoding\n is performed for you, and you can load the text from a file. Otherwise, you must provide\n base64-encoded text. User data is limited to 16 KB.

" + "smithy.api#documentation": "

The user data to make available to the launched EC2 instances. For more information,\n see Instance metadata and user data (Linux) and Instance metadata and\n user data (Windows). If you are using a command line tool, base64-encoding\n is performed for you, and you can load the text from a file. Otherwise, you must provide\n base64-encoded text. User data is limited to 16 KB.

" } }, "InstanceId": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen19", "traits": { - "smithy.api#documentation": "

The ID of the instance to use to create the launch configuration. The new launch\n configuration derives attributes from the instance, except for the block device\n mapping.

\n

To create a launch configuration with a block device mapping or override any other\n instance attributes, specify them as part of the same request.

\n

For more information, see Creating a launch\n configuration using an EC2 instance in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

The ID of the instance to use to create the launch configuration. The new launch\n configuration derives attributes from the instance, except for the block device\n mapping.

\n

To create a launch configuration with a block device mapping or override any other\n instance attributes, specify them as part of the same request.

\n

For more information, see Create a launch\n configuration in the Amazon EC2 Auto Scaling User Guide.

" } }, "InstanceType": { @@ -3198,13 +3198,13 @@ "KernelId": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { - "smithy.api#documentation": "

The ID of the kernel associated with the AMI.

\n \n

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more\n information, see User provided\n kernels in the Amazon EC2 User Guide for Linux\n Instances.

\n
" + "smithy.api#documentation": "

The ID of the kernel associated with the AMI.

\n \n

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more\n information, see User provided\n kernels in the Amazon EC2 User Guide for Linux Instances.

\n
" } }, "RamdiskId": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { - "smithy.api#documentation": "

The ID of the RAM disk to select.

\n \n

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more\n information, see User provided\n kernels in the Amazon EC2 User Guide for Linux\n Instances.

\n
" + "smithy.api#documentation": "

The ID of the RAM disk to select.

\n \n

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more\n information, see User provided\n kernels in the Amazon EC2 User Guide for Linux Instances.

\n
" } }, "BlockDeviceMappings": { @@ -3216,13 +3216,13 @@ "InstanceMonitoring": { "target": "com.amazonaws.autoscaling#InstanceMonitoring", "traits": { - "smithy.api#documentation": "

Controls whether instances in this group are launched with detailed\n (true) or basic (false) monitoring.

\n

The default value is true (enabled).

\n \n

When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and\n your account is charged a fee. When you disable detailed monitoring, CloudWatch generates\n metrics every 5 minutes. For more information, see Configure\n Monitoring for Auto Scaling Instances in the\n Amazon EC2 Auto Scaling User Guide.

\n
" + "smithy.api#documentation": "

Controls whether instances in this group are launched with detailed\n (true) or basic (false) monitoring.

\n

The default value is true (enabled).

\n \n

When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and\n your account is charged a fee. When you disable detailed monitoring, CloudWatch generates\n metrics every 5 minutes. For more information, see Configure\n monitoring for Auto Scaling instances in the\n Amazon EC2 Auto Scaling User Guide.

\n
" } }, "SpotPrice": { "target": "com.amazonaws.autoscaling#SpotPrice", "traits": { - "smithy.api#documentation": "

The maximum hourly price to be paid for any Spot Instance launched to fulfill the\n request. Spot Instances are launched when the price you specify exceeds the current Spot\n price. For more information, see Request Spot\n Instances for fault-tolerant and flexible applications in the\n Amazon EC2 Auto Scaling User Guide.

\n

Valid Range: Minimum value of 0.001

\n \n

When you change your maximum price by creating a new launch configuration, running\n instances will continue to run as long as the maximum price for those running\n instances is higher than the current Spot price.

\n
" + "smithy.api#documentation": "

The maximum hourly price to be paid for any Spot Instance launched to fulfill the\n request. Spot Instances are launched when the price you specify exceeds the current Spot\n price. For more information, see Request Spot\n Instances for fault-tolerant and flexible applications in the\n Amazon EC2 Auto Scaling User Guide.

\n

Valid Range: Minimum value of 0.001

\n \n

When you change your maximum price by creating a new launch configuration, running\n instances will continue to run as long as the maximum price for those running\n instances is higher than the current Spot price.

\n
" } }, "IamInstanceProfile": { @@ -3234,25 +3234,25 @@ "EbsOptimized": { "target": "com.amazonaws.autoscaling#EbsOptimized", "traits": { - "smithy.api#documentation": "

Specifies whether the launch configuration is optimized for EBS I/O\n (true) or not (false). The optimization provides dedicated\n throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O\n performance. This optimization is not available with all instance types. Additional fees\n are incurred when you enable EBS optimization for an instance type that is not\n EBS-optimized by default. For more information, see Amazon EBS-optimized instances in\n the Amazon EC2 User Guide for Linux Instances.

\n

The default value is false.

" + "smithy.api#documentation": "

Specifies whether the launch configuration is optimized for EBS I/O\n (true) or not (false). The optimization provides dedicated\n throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O\n performance. This optimization is not available with all instance types. Additional fees\n are incurred when you enable EBS optimization for an instance type that is not\n EBS-optimized by default. For more information, see Amazon EBS-optimized instances\n in the Amazon EC2 User Guide for Linux Instances.

\n

The default value is false.

" } }, "AssociatePublicIpAddress": { "target": "com.amazonaws.autoscaling#AssociatePublicIpAddress", "traits": { - "smithy.api#documentation": "

Specifies whether to assign a public IPv4 address to the group's instances. If the\n instance is launched into a default subnet, the default is to assign a public IPv4\n address, unless you disabled the option to assign a public IPv4 address on the subnet.\n If the instance is launched into a nondefault subnet, the default is not to assign a\n public IPv4 address, unless you enabled the option to assign a public IPv4 address on\n the subnet.

\n

If you specify true, each instance in the Auto Scaling group receives a unique\n public IPv4 address. For more information, see Launching Auto Scaling instances in a\n VPC in the Amazon EC2 Auto Scaling User Guide.

\n

If you specify this property, you must specify at least one subnet for\n VPCZoneIdentifier when you create your group.

" + "smithy.api#documentation": "

Specifies whether to assign a public IPv4 address to the group's instances. If the\n instance is launched into a default subnet, the default is to assign a public IPv4\n address, unless you disabled the option to assign a public IPv4 address on the subnet.\n If the instance is launched into a nondefault subnet, the default is not to assign a\n public IPv4 address, unless you enabled the option to assign a public IPv4 address on\n the subnet.

\n

If you specify true, each instance in the Auto Scaling group receives a unique\n public IPv4 address. For more information, see Provide network connectivity for\n your Auto Scaling instances using Amazon VPC in the\n Amazon EC2 Auto Scaling User Guide.

\n

If you specify this property, you must specify at least one subnet for\n VPCZoneIdentifier when you create your group.

" } }, "PlacementTenancy": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen64", "traits": { - "smithy.api#documentation": "

The tenancy of the instance, either default or dedicated. An\n instance with dedicated tenancy runs on isolated, single-tenant hardware\n and can only be launched into a VPC. To launch dedicated instances into a shared tenancy\n VPC (a VPC with the instance placement tenancy attribute set to default),\n you must set the value of this property to dedicated. For more information,\n see Configuring\n instance tenancy with Amazon EC2 Auto Scaling in the\n Amazon EC2 Auto Scaling User Guide.

\n

If you specify PlacementTenancy, you must specify at least one subnet for\n VPCZoneIdentifier when you create your group.

\n

Valid values: default | dedicated\n

" + "smithy.api#documentation": "

The tenancy of the instance, either default or dedicated. An\n instance with dedicated tenancy runs on isolated, single-tenant hardware\n and can only be launched into a VPC. To launch dedicated instances into a shared tenancy\n VPC (a VPC with the instance placement tenancy attribute set to default),\n you must set the value of this property to dedicated.

\n

If you specify PlacementTenancy, you must specify at least one subnet for\n VPCZoneIdentifier when you create your group.

\n

Valid values: default | dedicated\n

" } }, "MetadataOptions": { "target": "com.amazonaws.autoscaling#InstanceMetadataOptions", "traits": { - "smithy.api#documentation": "

The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

The metadata options for the instances. For more information, see Configure the instance metadata options in the\n Amazon EC2 Auto Scaling User Guide.

" } } } @@ -3572,7 +3572,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified scaling policy.

\n

Deleting either a step scaling policy or a simple scaling policy deletes the\n underlying alarm action, but does not delete the alarm, even if it no longer has an\n associated action.

\n

For more information, see Deleting a scaling\n policy in the Amazon EC2 Auto Scaling User Guide.

", + "smithy.api#documentation": "

Deletes the specified scaling policy.

\n

Deleting either a step scaling policy or a simple scaling policy deletes the\n underlying alarm action, but does not delete the alarm, even if it no longer has an\n associated action.

\n

For more information, see Delete a scaling\n policy in the Amazon EC2 Auto Scaling User Guide.

", "smithy.api#examples": [ { "title": "To delete an Auto Scaling policy", @@ -4837,7 +4837,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about the scaling activities in the account and Region.

\n

When scaling events occur, you see a record of the scaling activity in the scaling\n activities. For more information, see Verifying a scaling\n activity for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

\n

If the scaling event succeeds, the value of the StatusCode element in the\n response is Successful. If an attempt to launch instances failed, the\n StatusCode value is Failed or Cancelled and\n the StatusMessage element in the response indicates the cause of the\n failure. For help interpreting the StatusMessage, see Troubleshooting Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

", + "smithy.api#documentation": "

Gets information about the scaling activities in the account and Region.

\n

When scaling events occur, you see a record of the scaling activity in the scaling\n activities. For more information, see Verify a scaling\n activity for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

\n

If the scaling event succeeds, the value of the StatusCode element in the\n response is Successful. If an attempt to launch instances failed, the\n StatusCode value is Failed or Cancelled and\n the StatusMessage element in the response indicates the cause of the\n failure. For help interpreting the StatusMessage, see Troubleshooting Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

", "smithy.api#examples": [ { "title": "To describe the scaling activities for an Auto Scaling group", @@ -5145,7 +5145,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the termination policies supported by Amazon EC2 Auto Scaling.

\n

For more information, see Work with\n Amazon EC2 Auto Scaling termination policies in the\n Amazon EC2 Auto Scaling User Guide.

", + "smithy.api#documentation": "

Describes the termination policies supported by Amazon EC2 Auto Scaling.

\n

For more information, see Configure\n termination policies for Amazon EC2 Auto Scaling in the\n Amazon EC2 Auto Scaling User Guide.

", "smithy.api#examples": [ { "title": "To describe termination policy types", @@ -5355,7 +5355,7 @@ "LaunchTemplate": { "target": "com.amazonaws.autoscaling#LaunchTemplateSpecification", "traits": { - "smithy.api#documentation": "

Describes the launch template and the version of the launch template that Amazon EC2 Auto Scaling\n uses to launch Amazon EC2 instances. For more information about launch templates, see Launch\n templates in the Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Describes the launch template and the version of the launch template that Amazon EC2 Auto Scaling\n uses to launch Amazon EC2 instances. For more information about launch templates, see Launch\n templates in the Amazon EC2 Auto Scaling User Guide.

" } }, "MixedInstancesPolicy": { @@ -5383,7 +5383,7 @@ } ], "traits": { - "smithy.api#documentation": "

Removes one or more instances from the specified Auto Scaling group.

\n

After the instances are detached, you can manage them independent of the Auto Scaling\n group.

\n

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches\n instances to replace the ones that are detached.

\n

If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are\n deregistered from the load balancer. If there are target groups attached to the Auto Scaling\n group, the instances are deregistered from the target groups.

\n

For more information, see Detach EC2 instances from\n your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.

", + "smithy.api#documentation": "

Removes one or more instances from the specified Auto Scaling group.

\n

After the instances are detached, you can manage them independent of the Auto Scaling\n group.

\n

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches\n instances to replace the ones that are detached.

\n

If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are\n deregistered from the load balancer. If there are target groups attached to the Auto Scaling\n group, the instances are deregistered from the target groups.

\n

For more information, see Detach\n or attach instances in the Amazon EC2 Auto Scaling User Guide.

", "smithy.api#examples": [ { "title": "To detach an instance from an Auto Scaling group", @@ -5659,7 +5659,7 @@ "Metrics": { "target": "com.amazonaws.autoscaling#Metrics", "traits": { - "smithy.api#documentation": "

Identifies the metrics to disable.

\n

You can specify one or more of the following metrics:

\n
    \n
  • \n

    \n GroupMinSize\n

    \n
  • \n
  • \n

    \n GroupMaxSize\n

    \n
  • \n
  • \n

    \n GroupDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupInServiceInstances\n

    \n
  • \n
  • \n

    \n GroupPendingInstances\n

    \n
  • \n
  • \n

    \n GroupStandbyInstances\n

    \n
  • \n
  • \n

    \n GroupTerminatingInstances\n

    \n
  • \n
  • \n

    \n GroupTotalInstances\n

    \n
  • \n
  • \n

    \n GroupInServiceCapacity\n

    \n
  • \n
  • \n

    \n GroupPendingCapacity\n

    \n
  • \n
  • \n

    \n GroupStandbyCapacity\n

    \n
  • \n
  • \n

    \n GroupTerminatingCapacity\n

    \n
  • \n
  • \n

    \n GroupTotalCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolWarmedCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolPendingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTerminatingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTotalCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolTotalCapacity\n

    \n
  • \n
\n

If you omit this property, all metrics are disabled.

\n

For more information, see Auto Scaling group metrics in the Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Identifies the metrics to disable.

\n

You can specify one or more of the following metrics:

\n
    \n
  • \n

    \n GroupMinSize\n

    \n
  • \n
  • \n

    \n GroupMaxSize\n

    \n
  • \n
  • \n

    \n GroupDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupInServiceInstances\n

    \n
  • \n
  • \n

    \n GroupPendingInstances\n

    \n
  • \n
  • \n

    \n GroupStandbyInstances\n

    \n
  • \n
  • \n

    \n GroupTerminatingInstances\n

    \n
  • \n
  • \n

    \n GroupTotalInstances\n

    \n
  • \n
  • \n

    \n GroupInServiceCapacity\n

    \n
  • \n
  • \n

    \n GroupPendingCapacity\n

    \n
  • \n
  • \n

    \n GroupStandbyCapacity\n

    \n
  • \n
  • \n

    \n GroupTerminatingCapacity\n

    \n
  • \n
  • \n

    \n GroupTotalCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolWarmedCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolPendingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTerminatingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTotalCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolTotalCapacity\n

    \n
  • \n
\n

If you omit this property, all metrics are disabled.

\n

For more information, see Amazon CloudWatch metrics for\n Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" } } } @@ -5685,7 +5685,7 @@ "VolumeType": { "target": "com.amazonaws.autoscaling#BlockDeviceEbsVolumeType", "traits": { - "smithy.api#documentation": "

The volume type. For more information, see Amazon EBS volume types in the\n Amazon EC2 User Guide for Linux Instances.

\n

Valid values: standard | io1 | gp2 |\n st1 | sc1 | gp3\n

" + "smithy.api#documentation": "

The volume type. For more information, see Amazon EBS volume types in the\n Amazon EBS User Guide.

\n

Valid values: standard | io1 | gp2 |\n st1 | sc1 | gp3\n

" } }, "DeleteOnTermination": { @@ -5697,13 +5697,13 @@ "Iops": { "target": "com.amazonaws.autoscaling#BlockDeviceEbsIops", "traits": { - "smithy.api#documentation": "

The number of input/output (I/O) operations per second (IOPS) to provision for the\n volume. For gp3 and io1 volumes, this represents the number of\n IOPS that are provisioned for the volume. For gp2 volumes, this represents\n the baseline performance of the volume and the rate at which the volume accumulates I/O\n credits for bursting.

\n

The following are the supported values for each volume type:

\n
    \n
  • \n

    \n gp3: 3,000-16,000 IOPS

    \n
  • \n
  • \n

    \n io1: 100-64,000 IOPS

    \n
  • \n
\n

For io1 volumes, we guarantee 64,000 IOPS only for Instances\n built on the Nitro System. Other instance families guarantee performance up\n to 32,000 IOPS.

\n

\n Iops is supported when the volume type is gp3 or\n io1 and required only when the volume type is io1. (Not\n used with standard, gp2, st1, or sc1\n volumes.)

" + "smithy.api#documentation": "

The number of input/output (I/O) operations per second (IOPS) to provision for the\n volume. For gp3 and io1 volumes, this represents the number of\n IOPS that are provisioned for the volume. For gp2 volumes, this represents\n the baseline performance of the volume and the rate at which the volume accumulates I/O\n credits for bursting.

\n

The following are the supported values for each volume type:

\n
    \n
  • \n

    \n gp3: 3,000-16,000 IOPS

    \n
  • \n
  • \n

    \n io1: 100-64,000 IOPS

    \n
  • \n
\n

For io1 volumes, we guarantee 64,000 IOPS only for Instances built on the Amazon Web Services Nitro System. Other instance families\n guarantee performance up to 32,000 IOPS.

\n

\n Iops is supported when the volume type is gp3 or\n io1 and required only when the volume type is io1. (Not\n used with standard, gp2, st1, or sc1\n volumes.)

" } }, "Encrypted": { "target": "com.amazonaws.autoscaling#BlockDeviceEbsEncrypted", "traits": { - "smithy.api#documentation": "

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be\n attached to instances that support Amazon EBS encryption. For more information, see Supported instance types. If your AMI uses encrypted volumes, you can also\n only launch it on supported instance types.

\n \n

If you are creating a volume from a snapshot, you cannot create an unencrypted\n volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using\n a launch configuration.

\n

If you enable encryption by default, the EBS volumes that you create are always\n encrypted, either using the Amazon Web Services managed KMS key or a customer-managed KMS key,\n regardless of whether the snapshot was encrypted.

\n

For more information, see Use Amazon Web Services KMS keys to encrypt Amazon EBS volumes in the\n Amazon EC2 Auto Scaling User Guide.

\n
" + "smithy.api#documentation": "

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be\n attached to instances that support Amazon EBS encryption. For more information, see Requirements for Amazon EBS encryption in the Amazon EBS User Guide. If your AMI uses encrypted volumes, you\n can also only launch it on supported instance types.

\n \n

If you are creating a volume from a snapshot, you cannot create an unencrypted\n volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using\n a launch configuration.

\n

If you enable encryption by default, the EBS volumes that you create are always\n encrypted, either using the Amazon Web Services managed KMS key or a customer-managed KMS key,\n regardless of whether the snapshot was encrypted.

\n

For more information, see Use Amazon Web Services KMS keys to encrypt Amazon EBS volumes in the\n Amazon EC2 Auto Scaling User Guide.

\n
" } }, "Throughput": { @@ -5761,7 +5761,7 @@ "Metrics": { "target": "com.amazonaws.autoscaling#Metrics", "traits": { - "smithy.api#documentation": "

Identifies the metrics to enable.

\n

You can specify one or more of the following metrics:

\n
    \n
  • \n

    \n GroupMinSize\n

    \n
  • \n
  • \n

    \n GroupMaxSize\n

    \n
  • \n
  • \n

    \n GroupDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupInServiceInstances\n

    \n
  • \n
  • \n

    \n GroupPendingInstances\n

    \n
  • \n
  • \n

    \n GroupStandbyInstances\n

    \n
  • \n
  • \n

    \n GroupTerminatingInstances\n

    \n
  • \n
  • \n

    \n GroupTotalInstances\n

    \n
  • \n
  • \n

    \n GroupInServiceCapacity\n

    \n
  • \n
  • \n

    \n GroupPendingCapacity\n

    \n
  • \n
  • \n

    \n GroupStandbyCapacity\n

    \n
  • \n
  • \n

    \n GroupTerminatingCapacity\n

    \n
  • \n
  • \n

    \n GroupTotalCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolWarmedCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolPendingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTerminatingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTotalCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolTotalCapacity\n

    \n
  • \n
\n

If you specify Granularity and don't specify any metrics, all metrics are\n enabled.

\n

For more information, see Auto Scaling group metrics in the Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Identifies the metrics to enable.

\n

You can specify one or more of the following metrics:

\n
    \n
  • \n

    \n GroupMinSize\n

    \n
  • \n
  • \n

    \n GroupMaxSize\n

    \n
  • \n
  • \n

    \n GroupDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupInServiceInstances\n

    \n
  • \n
  • \n

    \n GroupPendingInstances\n

    \n
  • \n
  • \n

    \n GroupStandbyInstances\n

    \n
  • \n
  • \n

    \n GroupTerminatingInstances\n

    \n
  • \n
  • \n

    \n GroupTotalInstances\n

    \n
  • \n
  • \n

    \n GroupInServiceCapacity\n

    \n
  • \n
  • \n

    \n GroupPendingCapacity\n

    \n
  • \n
  • \n

    \n GroupStandbyCapacity\n

    \n
  • \n
  • \n

    \n GroupTerminatingCapacity\n

    \n
  • \n
  • \n

    \n GroupTotalCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolWarmedCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolPendingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTerminatingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTotalCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolTotalCapacity\n

    \n
  • \n
\n

If you specify Granularity and don't specify any metrics, all metrics are\n enabled.

\n

For more information, see Amazon CloudWatch metrics for\n Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" } }, "Granularity": { @@ -5780,7 +5780,7 @@ "Metric": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { - "smithy.api#documentation": "

One of the following metrics:

\n
    \n
  • \n

    \n GroupMinSize\n

    \n
  • \n
  • \n

    \n GroupMaxSize\n

    \n
  • \n
  • \n

    \n GroupDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupInServiceInstances\n

    \n
  • \n
  • \n

    \n GroupPendingInstances\n

    \n
  • \n
  • \n

    \n GroupStandbyInstances\n

    \n
  • \n
  • \n

    \n GroupTerminatingInstances\n

    \n
  • \n
  • \n

    \n GroupTotalInstances\n

    \n
  • \n
  • \n

    \n GroupInServiceCapacity\n

    \n
  • \n
  • \n

    \n GroupPendingCapacity\n

    \n
  • \n
  • \n

    \n GroupStandbyCapacity\n

    \n
  • \n
  • \n

    \n GroupTerminatingCapacity\n

    \n
  • \n
  • \n

    \n GroupTotalCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolWarmedCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolPendingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTerminatingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTotalCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolTotalCapacity\n

    \n
  • \n
\n

For more information, see Auto Scaling group metrics in the Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

One of the following metrics:

\n
    \n
  • \n

    \n GroupMinSize\n

    \n
  • \n
  • \n

    \n GroupMaxSize\n

    \n
  • \n
  • \n

    \n GroupDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupInServiceInstances\n

    \n
  • \n
  • \n

    \n GroupPendingInstances\n

    \n
  • \n
  • \n

    \n GroupStandbyInstances\n

    \n
  • \n
  • \n

    \n GroupTerminatingInstances\n

    \n
  • \n
  • \n

    \n GroupTotalInstances\n

    \n
  • \n
  • \n

    \n GroupInServiceCapacity\n

    \n
  • \n
  • \n

    \n GroupPendingCapacity\n

    \n
  • \n
  • \n

    \n GroupStandbyCapacity\n

    \n
  • \n
  • \n

    \n GroupTerminatingCapacity\n

    \n
  • \n
  • \n

    \n GroupTotalCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolWarmedCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolPendingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTerminatingCapacity\n

    \n
  • \n
  • \n

    \n WarmPoolTotalCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolDesiredCapacity\n

    \n
  • \n
  • \n

    \n GroupAndWarmPoolTotalCapacity\n

    \n
  • \n
\n

For more information, see Amazon CloudWatch metrics for\n Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" } }, "Granularity": { @@ -5959,7 +5959,7 @@ "HonorCooldown": { "target": "com.amazonaws.autoscaling#HonorCooldown", "traits": { - "smithy.api#documentation": "

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing\n the policy.

\n

Valid only if the policy type is SimpleScaling. For more information, see\n Scaling\n cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to complete before executing\n the policy.

\n

Valid only if the policy type is SimpleScaling. For more information, see\n Scaling\n cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" } }, "MetricValue": { @@ -6239,7 +6239,7 @@ "target": "com.amazonaws.autoscaling#LifecycleState", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A description of the current lifecycle state. The Quarantined state is\n not used. For information about lifecycle states, see Instance\n lifecycle in the Amazon EC2 Auto Scaling User Guide.

", + "smithy.api#documentation": "

A description of the current lifecycle state. The Quarantined state is\n not used. For more information, see Amazon EC2 Auto Scaling instance\n lifecycle in the Amazon EC2 Auto Scaling User Guide.

", "smithy.api#required": {} } }, @@ -6397,7 +6397,7 @@ } }, "traits": { - "smithy.api#documentation": "

The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

The metadata options for the instances. For more information, see Configure the instance metadata options in the\n Amazon EC2 Auto Scaling User Guide.

" } }, "com.amazonaws.autoscaling#InstanceMonitoring": { @@ -6798,7 +6798,7 @@ } }, "traits": { - "smithy.api#documentation": "

The attributes for the instance types for a mixed instances policy. Amazon EC2 Auto Scaling uses your\n specified requirements to identify instance types. Then, it uses your On-Demand and Spot\n allocation strategies to launch instances from these instance types.

\n

When you specify multiple attributes, you get instance types that satisfy all of the\n specified attributes. If you specify multiple values for an attribute, you get instance\n types that satisfy any of the specified values.

\n

To limit the list of instance types from which Amazon EC2 Auto Scaling can identify matching instance\n types, you can use one of the following parameters, but not both in the same\n request:

\n
    \n
  • \n

    \n AllowedInstanceTypes - The instance types to include in the list.\n All other instance types are ignored, even if they match your specified\n attributes.

    \n
  • \n
  • \n

    \n ExcludedInstanceTypes - The instance types to exclude from the\n list, even if they match your specified attributes.

    \n
  • \n
\n \n

You must specify VCpuCount and MemoryMiB. All other\n attributes are optional. Any unspecified optional attribute is set to its\n default.

\n
\n

For more information, see Creating\n an Auto Scaling group using attribute-based instance type selection in the\n Amazon EC2 Auto Scaling User Guide. For help determining which instance types match\n your attributes before you apply them to your Auto Scaling group, see Preview instance types with specified attributes in the\n Amazon EC2 User Guide for Linux Instances.

" + "smithy.api#documentation": "

The attributes for the instance types for a mixed instances policy. Amazon EC2 Auto Scaling uses your\n specified requirements to identify instance types. Then, it uses your On-Demand and Spot\n allocation strategies to launch instances from these instance types.

\n

When you specify multiple attributes, you get instance types that satisfy all of the\n specified attributes. If you specify multiple values for an attribute, you get instance\n types that satisfy any of the specified values.

\n

To limit the list of instance types from which Amazon EC2 Auto Scaling can identify matching instance\n types, you can use one of the following parameters, but not both in the same\n request:

\n
    \n
  • \n

    \n AllowedInstanceTypes - The instance types to include in the list.\n All other instance types are ignored, even if they match your specified\n attributes.

    \n
  • \n
  • \n

    \n ExcludedInstanceTypes - The instance types to exclude from the\n list, even if they match your specified attributes.

    \n
  • \n
\n \n

You must specify VCpuCount and MemoryMiB. All other\n attributes are optional. Any unspecified optional attribute is set to its\n default.

\n
\n

For more information, see Create a mixed instances group using attribute-based instance type\n selection in the Amazon EC2 Auto Scaling User Guide. For help determining\n which instance types match your attributes before you apply them to your Auto Scaling group, see\n Preview instance types with specified attributes in the\n Amazon EC2 User Guide for Linux Instances.

" } }, "com.amazonaws.autoscaling#InstanceReusePolicy": { @@ -6967,20 +6967,20 @@ "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. For more\n information, see Find a Linux AMI in the\n Amazon EC2 User Guide for Linux Instances.

", + "smithy.api#documentation": "

The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. For more\n information, see Find a Linux AMI in the\n Amazon EC2 User Guide for Linux Instances.

", "smithy.api#required": {} } }, "KeyName": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { - "smithy.api#documentation": "

The name of the key pair.

\n

For more information, see Amazon EC2 Key Pairs in the\n Amazon EC2 User Guide for Linux Instances.

" + "smithy.api#documentation": "

The name of the key pair.

\n

For more information, see Amazon EC2 key pairs and Amazon EC2\n instances in the Amazon EC2 User Guide for Linux Instances.

" } }, "SecurityGroups": { "target": "com.amazonaws.autoscaling#SecurityGroups", "traits": { - "smithy.api#documentation": "

A list that contains the security groups to assign to the instances in the Auto Scaling group.\n For more information, see Security Groups for Your\n VPC in the Amazon Virtual Private Cloud User\n Guide.

" + "smithy.api#documentation": "

A list that contains the security groups to assign to the instances in the Auto Scaling group.\n For more information, see Control traffic to your Amazon Web Services\n resources using security groups in the Amazon Virtual Private\n Cloud User Guide.

" } }, "ClassicLinkVPCId": { @@ -6998,7 +6998,7 @@ "UserData": { "target": "com.amazonaws.autoscaling#XmlStringUserData", "traits": { - "smithy.api#documentation": "

The user data to make available to the launched EC2 instances. For more information,\n see Instance metadata and user data (Linux) and Instance metadata and\n user data (Windows). If you are using a command line tool, base64-encoding\n is performed for you, and you can load the text from a file. Otherwise, you must provide\n base64-encoded text. User data is limited to 16 KB.

" + "smithy.api#documentation": "

The user data to make available to the launched EC2 instances. For more information,\n see Instance metadata and user data (Linux) and Instance metadata and\n user data (Windows). If you are using a command line tool, base64-encoding\n is performed for you, and you can load the text from a file. Otherwise, you must provide\n base64-encoded text. User data is limited to 16 KB.

" } }, "InstanceType": { @@ -7024,19 +7024,19 @@ "BlockDeviceMappings": { "target": "com.amazonaws.autoscaling#BlockDeviceMappings", "traits": { - "smithy.api#documentation": "

The block device mapping entries that define the block devices to attach to the\n instances at launch. By default, the block devices specified in the block device mapping\n for the AMI are used. For more information, see Block Device\n Mapping in the Amazon EC2 User Guide for Linux Instances.

" + "smithy.api#documentation": "

The block device mapping entries that define the block devices to attach to the\n instances at launch. By default, the block devices specified in the block device mapping\n for the AMI are used. For more information, see Block device\n mappings in the Amazon EC2 User Guide for Linux Instances.

" } }, "InstanceMonitoring": { "target": "com.amazonaws.autoscaling#InstanceMonitoring", "traits": { - "smithy.api#documentation": "

Controls whether instances in this group are launched with detailed\n (true) or basic (false) monitoring.

\n

For more information, see Configure\n Monitoring for Auto Scaling Instances in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Controls whether instances in this group are launched with detailed\n (true) or basic (false) monitoring.

\n

For more information, see Configure\n monitoring for Auto Scaling instances in the\n Amazon EC2 Auto Scaling User Guide.

" } }, "SpotPrice": { "target": "com.amazonaws.autoscaling#SpotPrice", "traits": { - "smithy.api#documentation": "

The maximum hourly price to be paid for any Spot Instance launched to fulfill the\n request. Spot Instances are launched when the price you specify exceeds the current Spot\n price. For more information, see Requesting Spot\n Instances in the Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

The maximum hourly price to be paid for any Spot Instance launched to fulfill the\n request. Spot Instances are launched when the price you specify exceeds the current Spot\n price. For more information, see Requesting Spot\n Instances for fault-tolerant and flexible applications in the Amazon EC2 Auto Scaling User Guide.

" } }, "IamInstanceProfile": { @@ -7056,25 +7056,25 @@ "EbsOptimized": { "target": "com.amazonaws.autoscaling#EbsOptimized", "traits": { - "smithy.api#documentation": "

Specifies whether the launch configuration is optimized for EBS I/O\n (true) or not (false). For more information, see Amazon\n EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

" + "smithy.api#documentation": "

Specifies whether the launch configuration is optimized for EBS I/O\n (true) or not (false). For more information, see Amazon EBS-optimized instances in the\n Amazon EC2 User Guide for Linux Instances.

" } }, "AssociatePublicIpAddress": { "target": "com.amazonaws.autoscaling#AssociatePublicIpAddress", "traits": { - "smithy.api#documentation": "

Specifies whether to assign a public IPv4 address to the group's instances. If the\n instance is launched into a default subnet, the default is to assign a public IPv4\n address, unless you disabled the option to assign a public IPv4 address on the subnet.\n If the instance is launched into a nondefault subnet, the default is not to assign a\n public IPv4 address, unless you enabled the option to assign a public IPv4 address on\n the subnet. For more information, see Launching Auto Scaling instances in a\n VPC in the Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Specifies whether to assign a public IPv4 address to the group's instances. If the\n instance is launched into a default subnet, the default is to assign a public IPv4\n address, unless you disabled the option to assign a public IPv4 address on the subnet.\n If the instance is launched into a nondefault subnet, the default is not to assign a\n public IPv4 address, unless you enabled the option to assign a public IPv4 address on\n the subnet. For more information, see Provide network connectivity for\n your Auto Scaling instances using Amazon VPC in the\n Amazon EC2 Auto Scaling User Guide.

" } }, "PlacementTenancy": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen64", "traits": { - "smithy.api#documentation": "

The tenancy of the instance, either default or dedicated. An\n instance with dedicated tenancy runs on isolated, single-tenant hardware\n and can only be launched into a VPC.

\n

For more information, see Configuring\n instance tenancy with Amazon EC2 Auto Scaling in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

The tenancy of the instance, either default or dedicated. An\n instance with dedicated tenancy runs on isolated, single-tenant hardware\n and can only be launched into a VPC.

" } }, "MetadataOptions": { "target": "com.amazonaws.autoscaling#InstanceMetadataOptions", "traits": { - "smithy.api#documentation": "

The metadata options for the instances. For more information, see Configuring the Instance Metadata Options in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

The metadata options for the instances. For more information, see Configure the instance metadata options in the\n Amazon EC2 Auto Scaling User Guide.

" } } }, @@ -7119,7 +7119,7 @@ "MaxRecords": { "target": "com.amazonaws.autoscaling#MaxRecords", "traits": { - "smithy.api#documentation": "

The maximum number of items to return with this call. The default value is\n 50 and the maximum value is 100.

" + "smithy.api#documentation": "

The maximum number of items to return with this call. The default value is\n 50 and the maximum value is 100.

" } } } @@ -7144,7 +7144,7 @@ "NextToken": { "target": "com.amazonaws.autoscaling#XmlString", "traits": { - "smithy.api#documentation": "

A string that indicates that the response contains more items than can be returned in\n a single response. To receive additional items, specify this string for the\n NextToken value when requesting the next set of items. This value is\n null when there are no more items to return.

" + "smithy.api#documentation": "

A string that indicates that the response contains more items than can be returned in\n a single response. To receive additional items, specify this string for the\n NextToken value when requesting the next set of items. This value is\n null when there are no more items to return.

" } } } @@ -7185,13 +7185,13 @@ "InstanceType": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { - "smithy.api#documentation": "

The instance type, such as m3.xlarge. You must specify an instance type\n that is supported in your requested Region and Availability Zones. For more information,\n see Instance types in the Amazon Elastic Compute Cloud User\n Guide.

\n

You can specify up to 40 instance types per Auto Scaling group.

" + "smithy.api#documentation": "

The instance type, such as m3.xlarge. You must specify an instance type\n that is supported in your requested Region and Availability Zones. For more information,\n see Instance types in the Amazon EC2 User Guide for Linux Instances.

\n

You can specify up to 40 instance types per Auto Scaling group.

" } }, "WeightedCapacity": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen32", "traits": { - "smithy.api#documentation": "

If you provide a list of instance types to use, you can specify the number of capacity\n units provided by each instance type in terms of virtual CPUs, memory, storage,\n throughput, or other relative performance characteristic. When a Spot or On-Demand\n Instance is launched, the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling\n launches instances until the desired capacity is totally fulfilled, even if this results\n in an overage. For example, if there are two units remaining to fulfill capacity, and\n Amazon EC2 Auto Scaling can only launch an instance with a WeightedCapacity of five units,\n the instance is launched, and the desired capacity is exceeded by three units. For more\n information, see Configuring instance weighting for Amazon EC2 Auto Scaling in the\n Amazon EC2 Auto Scaling User Guide. Value must be in the range of 1–999.

\n

If you specify a value for WeightedCapacity for one instance type, you\n must specify a value for WeightedCapacity for all of them.

\n \n

Every Auto Scaling group has three size parameters (DesiredCapacity,\n MaxSize, and MinSize). Usually, you set these sizes\n based on a specific number of instances. However, if you configure a mixed instances\n policy that defines weights for the instance types, you must specify these sizes\n with the same units that you use for weighting instances.

\n
" + "smithy.api#documentation": "

If you provide a list of instance types to use, you can specify the number of capacity\n units provided by each instance type in terms of virtual CPUs, memory, storage,\n throughput, or other relative performance characteristic. When a Spot or On-Demand\n Instance is launched, the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling\n launches instances until the desired capacity is totally fulfilled, even if this results\n in an overage. For example, if there are two units remaining to fulfill capacity, and\n Amazon EC2 Auto Scaling can only launch an instance with a WeightedCapacity of five units,\n the instance is launched, and the desired capacity is exceeded by three units. For more\n information, see Configure an Auto Scaling group to use instance weights in the\n Amazon EC2 Auto Scaling User Guide. Value must be in the range of 1–999.

\n

If you specify a value for WeightedCapacity for one instance type, you\n must specify a value for WeightedCapacity for all of them.

\n \n

Every Auto Scaling group has three size parameters (DesiredCapacity,\n MaxSize, and MinSize). Usually, you set these sizes\n based on a specific number of instances. However, if you configure a mixed instances\n policy that defines weights for the instance types, you must specify these sizes\n with the same units that you use for weighting instances.

\n
" } }, "LaunchTemplateSpecification": { @@ -7234,7 +7234,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the launch template and the version of the launch template that Amazon EC2 Auto Scaling\n uses to launch Amazon EC2 instances. For more information about launch templates, see Launch\n templates in the Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Describes the launch template and the version of the launch template that Amazon EC2 Auto Scaling\n uses to launch Amazon EC2 instances. For more information about launch templates, see Launch\n templates in the Amazon EC2 Auto Scaling User Guide.

" } }, "com.amazonaws.autoscaling#LifecycleActionResult": { @@ -7369,7 +7369,7 @@ "RoleARN": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { - "smithy.api#documentation": "

The ARN of the IAM role that allows the Auto Scaling group to publish to the specified\n notification target. For information about creating this role, see Configure a notification target for a lifecycle hook in the\n Amazon EC2 Auto Scaling User Guide.

\n

Valid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.

" + "smithy.api#documentation": "

The ARN of the IAM role that allows the Auto Scaling group to publish to the specified\n notification target. For information about creating this role, see Prepare to\n add a lifecycle hook to your Auto Scaling group in the\n Amazon EC2 Auto Scaling User Guide.

\n

Valid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.

" } } }, @@ -8382,7 +8382,7 @@ "MaxCapacityBreachBehavior": { "target": "com.amazonaws.autoscaling#PredictiveScalingMaxCapacityBreachBehavior", "traits": { - "smithy.api#documentation": "

Defines the behavior that should be applied if the forecast capacity approaches or\n exceeds the maximum capacity of the Auto Scaling group. Defaults to\n HonorMaxCapacity if not specified.

\n

The following are possible values:

\n
    \n
  • \n

    \n HonorMaxCapacity - Amazon EC2 Auto Scaling cannot scale out capacity higher than\n the maximum capacity. The maximum capacity is enforced as a hard limit.

    \n
  • \n
  • \n

    \n IncreaseMaxCapacity - Amazon EC2 Auto Scaling can scale out capacity higher than\n the maximum capacity when the forecast capacity is close to or exceeds the\n maximum capacity. The upper limit is determined by the forecasted capacity and\n the value for MaxCapacityBuffer.

    \n
  • \n
" + "smithy.api#documentation": "

Defines the behavior that should be applied if the forecast capacity approaches or\n exceeds the maximum capacity of the Auto Scaling group. Defaults to\n HonorMaxCapacity if not specified.

\n

The following are possible values:

\n
    \n
  • \n

    \n HonorMaxCapacity - Amazon EC2 Auto Scaling can't increase the maximum capacity\n of the group when the forecast capacity is close to or exceeds the maximum\n capacity.

    \n
  • \n
  • \n

    \n IncreaseMaxCapacity - Amazon EC2 Auto Scaling can increase the maximum capacity\n of the group when the forecast capacity is close to or exceeds the maximum\n capacity. The upper limit is determined by the forecasted capacity and the value\n for MaxCapacityBuffer.

    \n
  • \n
\n \n

Use caution when allowing the maximum capacity to be automatically increased. This\n can lead to more instances being launched than intended if the increased maximum\n capacity is not monitored and managed. The increased maximum capacity then becomes\n the new normal maximum capacity for the Auto Scaling group until you manually update\n it. The maximum capacity does not automatically decrease back to the original\n maximum.

\n
" } }, "MaxCapacityBuffer": { @@ -8650,7 +8650,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes a process type.

\n

For more information, see Scaling processes in the Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Describes a process type.

\n

For more information, see Types\n of processes in the Amazon EC2 Auto Scaling User Guide.

" } }, "com.amazonaws.autoscaling#Processes": { @@ -8793,7 +8793,7 @@ } ], "traits": { - "smithy.api#documentation": "

Configures an Auto Scaling group to send notifications when specified events take place.\n Subscribers to the specified topic can have messages delivered to an endpoint such as a\n web server or an email address.

\n

This configuration overwrites any existing configuration.

\n

For more information, see Getting Amazon SNS\n notifications when your Auto Scaling group scales in the\n Amazon EC2 Auto Scaling User Guide.

\n

If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call\n fails.

", + "smithy.api#documentation": "

Configures an Auto Scaling group to send notifications when specified events take place.\n Subscribers to the specified topic can have messages delivered to an endpoint such as a\n web server or an email address.

\n

This configuration overwrites any existing configuration.

\n

For more information, see Amazon SNS\n notification options for Amazon EC2 Auto Scaling in the\n Amazon EC2 Auto Scaling User Guide.

\n

If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call\n fails.

", "smithy.api#examples": [ { "title": "To add an Auto Scaling notification", @@ -8944,7 +8944,7 @@ "Cooldown": { "target": "com.amazonaws.autoscaling#Cooldown", "traits": { - "smithy.api#documentation": "

A cooldown period, in seconds, that applies to a specific simple scaling policy. When\n a cooldown period is specified here, it overrides the default cooldown.

\n

Valid only if the policy type is SimpleScaling. For more information, see\n Scaling\n cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

\n

Default: None

" + "smithy.api#documentation": "

A cooldown period, in seconds, that applies to a specific simple scaling policy. When\n a cooldown period is specified here, it overrides the default cooldown.

\n

Valid only if the policy type is SimpleScaling. For more information, see\n Scaling\n cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

\n

Default: None

" } }, "MetricAggregationType": { @@ -8974,7 +8974,7 @@ "Enabled": { "target": "com.amazonaws.autoscaling#ScalingPolicyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the scaling policy is enabled or disabled. The default is enabled.\n For more information, see Disabling a\n scaling policy for an Auto Scaling group in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Indicates whether the scaling policy is enabled or disabled. The default is enabled.\n For more information, see Disable a\n scaling policy for an Auto Scaling group in the\n Amazon EC2 Auto Scaling User Guide.

" } }, "PredictiveScalingConfiguration": { @@ -9005,7 +9005,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates a scheduled scaling action for an Auto Scaling group.

\n

For more information, see Scheduled scaling in the\n Amazon EC2 Auto Scaling User Guide.

\n

You can view the scheduled actions for an Auto Scaling group using the DescribeScheduledActions API call. If you are no longer using a\n scheduled action, you can delete it by calling the DeleteScheduledAction API.

\n

If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error\n message.

", + "smithy.api#documentation": "

Creates or updates a scheduled scaling action for an Auto Scaling group.

\n

For more information, see Scheduled scaling in the\n Amazon EC2 Auto Scaling User Guide.

\n

You can view the scheduled actions for an Auto Scaling group using the DescribeScheduledActions API call. If you are no longer using a\n scheduled action, you can delete it by calling the DeleteScheduledAction API.

\n

If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error\n message.

", "smithy.api#examples": [ { "title": "To add a scheduled action to an Auto Scaling group", @@ -9109,7 +9109,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates a warm pool for the specified Auto Scaling group. A warm pool is a pool of\n pre-initialized EC2 instances that sits alongside the Auto Scaling group. Whenever your\n application needs to scale out, the Auto Scaling group can draw on the warm pool to meet its new\n desired capacity. For more information and example configurations, see Warm pools for\n Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

\n

This operation must be called from the Region in which the Auto Scaling group was created.\n This operation cannot be called on an Auto Scaling group that has a mixed instances policy or a\n launch template or launch configuration that requests Spot Instances.

\n

You can view the instances in the warm pool using the DescribeWarmPool API call. If you are no longer using a warm pool, you can delete it by calling the\n DeleteWarmPool API.

", + "smithy.api#documentation": "

Creates or updates a warm pool for the specified Auto Scaling group. A warm pool is a pool of\n pre-initialized EC2 instances that sits alongside the Auto Scaling group. Whenever your\n application needs to scale out, the Auto Scaling group can draw on the warm pool to meet its new\n desired capacity.

\n

This operation must be called from the Region in which the Auto Scaling group was\n created.

\n

You can view the instances in the warm pool using the DescribeWarmPool API call. If you are no longer using a warm pool, you can delete it by calling the\n DeleteWarmPool API.

\n

For more information, see Warm pools for\n Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

", "smithy.api#examples": [ { "title": "To create a warm pool for an Auto Scaling group", @@ -9258,7 +9258,7 @@ "CheckpointPercentages": { "target": "com.amazonaws.autoscaling#CheckpointPercentages", "traits": { - "smithy.api#documentation": "

(Optional) Threshold values for each checkpoint in ascending order. Each number must\n be unique. To replace all instances in the Auto Scaling group, the last number in the array must\n be 100.

\n

For usage examples, see Adding\n checkpoints to an instance refresh in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

(Optional) Threshold values for each checkpoint in ascending order. Each number must\n be unique. To replace all instances in the Auto Scaling group, the last number in the array must\n be 100.

\n

For usage examples, see Add checkpoints to an instance refresh in the\n Amazon EC2 Auto Scaling User Guide.

" } }, "CheckpointDelay": { @@ -9386,7 +9386,7 @@ } ], "traits": { - "smithy.api#documentation": "

Resumes the specified suspended auto scaling processes, or all suspended process, for\n the specified Auto Scaling group.

\n

For more information, see Suspending and\n resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.

", + "smithy.api#documentation": "

Resumes the specified suspended auto scaling processes, or all suspended process, for\n the specified Auto Scaling group.

\n

For more information, see Suspend and resume\n Amazon EC2 Auto Scaling processes in the Amazon EC2 Auto Scaling User Guide.

", "smithy.api#examples": [ { "title": "To resume Auto Scaling processes", @@ -9960,7 +9960,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sets the size of the specified Auto Scaling group.

\n

If a scale-in activity occurs as a result of a new DesiredCapacity value\n that is lower than the current size of the group, the Auto Scaling group uses its termination\n policy to determine which instances to terminate.

\n

For more information, see Manual scaling in the\n Amazon EC2 Auto Scaling User Guide.

", + "smithy.api#documentation": "

Sets the size of the specified Auto Scaling group.

\n

If a scale-in activity occurs as a result of a new DesiredCapacity value\n that is lower than the current size of the group, the Auto Scaling group uses its termination\n policy to determine which instances to terminate.

\n

For more information, see Manual\n scaling in the Amazon EC2 Auto Scaling User Guide.

", "smithy.api#examples": [ { "title": "To set the desired capacity for an Auto Scaling group", @@ -10015,7 +10015,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sets the health status of the specified instance.

\n

For more information, see Health checks for Auto Scaling\n instances in the Amazon EC2 Auto Scaling User Guide.

", + "smithy.api#documentation": "

Sets the health status of the specified instance.

\n

For more information, see Health checks\n for instances in an Auto Scaling group in the\n Amazon EC2 Auto Scaling User Guide.

", "smithy.api#examples": [ { "title": "To set the health status of an instance", @@ -10050,7 +10050,7 @@ "ShouldRespectGracePeriod": { "target": "com.amazonaws.autoscaling#ShouldRespectGracePeriod", "traits": { - "smithy.api#documentation": "

If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod\n specified for the group, by default, this call respects the grace period. Set this to\n False, to have the call not respect the grace period associated with\n the group.

\n

For more information about the health check grace\n period, see CreateAutoScalingGroup in the Amazon EC2 Auto Scaling API\n Reference.

" + "smithy.api#documentation": "

If the Auto Scaling group of the specified instance has a HealthCheckGracePeriod\n specified for the group, by default, this call respects the grace period. Set this to\n False, to have the call not respect the grace period associated with\n the group.

\n

For more information about the health check grace period, see Set the health check grace period for an Auto Scaling group in the\n Amazon EC2 Auto Scaling User Guide.

" } } } @@ -10072,7 +10072,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the instance protection settings of the specified instances. This operation\n cannot be called on instances in a warm pool.

\n

For more information about preventing instances that are part of an Auto Scaling group from\n terminating on scale in, see Using\n instance scale-in protection in the\n Amazon EC2 Auto Scaling User Guide.

\n

If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call\n fails.

", + "smithy.api#documentation": "

Updates the instance protection settings of the specified instances. This operation\n cannot be called on instances in a warm pool.

\n

For more information, see Use\n instance scale-in protection in the\n Amazon EC2 Auto Scaling User Guide.

\n

If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling group, the call\n fails.

", "smithy.api#examples": [ { "title": "To enable instance protection for an instance", @@ -10321,7 +10321,7 @@ } ], "traits": { - "smithy.api#documentation": "

Suspends the specified auto scaling processes, or all processes, for the specified\n Auto Scaling group.

\n

If you suspend either the Launch or Terminate process types,\n it can prevent other process types from functioning properly. For more information, see\n Suspending and\n resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.

\n

To resume processes that have been suspended, call the ResumeProcesses API.

", + "smithy.api#documentation": "

Suspends the specified auto scaling processes, or all processes, for the specified\n Auto Scaling group.

\n

If you suspend either the Launch or Terminate process types,\n it can prevent other process types from functioning properly. For more information, see\n Suspend and resume\n Amazon EC2 Auto Scaling processes in the Amazon EC2 Auto Scaling User Guide.

\n

To resume processes that have been suspended, call the ResumeProcesses API.

", "smithy.api#examples": [ { "title": "To suspend Auto Scaling processes", @@ -10353,7 +10353,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes an auto scaling process that has been suspended.

\n

For more information, see Scaling processes in the Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Describes an auto scaling process that has been suspended.

\n

For more information, see Types\n of processes in the Amazon EC2 Auto Scaling User Guide.

" } }, "com.amazonaws.autoscaling#SuspendedProcesses": { @@ -10539,7 +10539,7 @@ "type": "structure", "members": { "Id": { - "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", + "target": "com.amazonaws.autoscaling#XmlStringMaxLen64", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "

A short name that identifies the object's results in the response. This name must be\n unique among all TargetTrackingMetricDataQuery objects specified for a\n single scaling policy. If you are performing math expressions on this set of data, this\n name represents that data and can serve as a variable in the mathematical expression.\n The valid characters are letters, numbers, and underscores. The first character must be\n a lowercase letter.

", @@ -10622,7 +10622,7 @@ } ], "traits": { - "smithy.api#documentation": "

Terminates the specified instance and optionally adjusts the desired group size. This\n operation cannot be called on instances in a warm pool.

\n

This call simply makes a termination request. The instance is not terminated\n immediately. When an instance is terminated, the instance status changes to\n terminated. You can't connect to or start an instance after you've\n terminated it.

\n

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches\n instances to replace the ones that are terminated.

\n

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you\n decrement the desired capacity, your Auto Scaling group can become unbalanced between\n Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might\n terminate instances in other zones. For more information, see Rebalancing activities in the Amazon EC2 Auto Scaling User Guide.

", + "smithy.api#documentation": "

Terminates the specified instance and optionally adjusts the desired group size. This\n operation cannot be called on instances in a warm pool.

\n

This call simply makes a termination request. The instance is not terminated\n immediately. When an instance is terminated, the instance status changes to\n terminated. You can't connect to or start an instance after you've\n terminated it.

\n

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches\n instances to replace the ones that are terminated.

\n

By default, Amazon EC2 Auto Scaling balances instances across all Availability Zones. If you\n decrement the desired capacity, your Auto Scaling group can become unbalanced between\n Availability Zones. Amazon EC2 Auto Scaling tries to rebalance the group, and rebalancing might\n terminate instances in other zones. For more information, see Manual\n scaling in the Amazon EC2 Auto Scaling User Guide.

", "smithy.api#examples": [ { "title": "To terminate an instance in an Auto Scaling group", @@ -10843,7 +10843,7 @@ "DefaultCooldown": { "target": "com.amazonaws.autoscaling#Cooldown", "traits": { - "smithy.api#documentation": "

\n Only needed if you use simple scaling policies.\n

\n

The amount of time, in seconds, between one scaling activity ending and another one\n starting due to simple scaling policies. For more information, see Scaling cooldowns\n for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

\n Only needed if you use simple scaling policies.\n

\n

The amount of time, in seconds, between one scaling activity ending and another one\n starting due to simple scaling policies. For more information, see Scaling\n cooldowns for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

" } }, "AvailabilityZones": { @@ -10855,7 +10855,7 @@ "HealthCheckType": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen32", "traits": { - "smithy.api#documentation": "

A comma-separated value string of one or more health check types.

\n

The valid values are EC2, ELB, and VPC_LATTICE.\n EC2 is the default health check and cannot be disabled. For more\n information, see Health checks for Auto Scaling\n instances in the Amazon EC2 Auto Scaling User Guide.

\n

Only specify EC2 if you must clear a value that was previously\n set.

" + "smithy.api#documentation": "

A comma-separated value string of one or more health check types.

\n

The valid values are EC2, ELB, and VPC_LATTICE.\n EC2 is the default health check and cannot be disabled. For more\n information, see Health checks\n for instances in an Auto Scaling group in the\n Amazon EC2 Auto Scaling User Guide.

\n

Only specify EC2 if you must clear a value that was previously\n set.

" } }, "HealthCheckGracePeriod": { @@ -10879,13 +10879,13 @@ "TerminationPolicies": { "target": "com.amazonaws.autoscaling#TerminationPolicies", "traits": { - "smithy.api#documentation": "

A policy or a list of policies that are used to select the instances to terminate. The\n policies are executed in the order that you list them. For more information, see Work with\n Amazon EC2 Auto Scaling termination policies in the\n Amazon EC2 Auto Scaling User Guide.

\n

Valid values: Default | AllocationStrategy |\n ClosestToNextInstanceHour | NewestInstance |\n OldestInstance | OldestLaunchConfiguration |\n OldestLaunchTemplate |\n arn:aws:lambda:region:account-id:function:my-function:my-alias\n

" + "smithy.api#documentation": "

A policy or a list of policies that are used to select the instances to terminate. The\n policies are executed in the order that you list them. For more information, see Configure\n termination policies for Amazon EC2 Auto Scaling in the\n Amazon EC2 Auto Scaling User Guide.

\n

Valid values: Default | AllocationStrategy |\n ClosestToNextInstanceHour | NewestInstance |\n OldestInstance | OldestLaunchConfiguration |\n OldestLaunchTemplate |\n arn:aws:lambda:region:account-id:function:my-function:my-alias\n

" } }, "NewInstancesProtectedFromScaleIn": { "target": "com.amazonaws.autoscaling#InstanceProtected", "traits": { - "smithy.api#documentation": "

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling\n when scaling in. For more information about preventing instances from terminating on\n scale in, see Using\n instance scale-in protection in the\n Amazon EC2 Auto Scaling User Guide.

" + "smithy.api#documentation": "

Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling\n when scaling in. For more information about preventing instances from terminating on\n scale in, see Use\n instance scale-in protection in the\n Amazon EC2 Auto Scaling User Guide.

" } }, "ServiceLinkedRoleARN": { @@ -10915,7 +10915,7 @@ "DesiredCapacityType": { "target": "com.amazonaws.autoscaling#XmlStringMaxLen255", "traits": { - "smithy.api#documentation": "

The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling\n supports DesiredCapacityType for attribute-based instance type selection\n only. For more information, see Creating\n an Auto Scaling group using attribute-based instance type selection in the\n Amazon EC2 Auto Scaling User Guide.

\n

By default, Amazon EC2 Auto Scaling specifies units, which translates into number of\n instances.

\n

Valid values: units | vcpu | memory-mib\n

" + "smithy.api#documentation": "

The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling\n supports DesiredCapacityType for attribute-based instance type selection\n only. For more information, see Create a mixed instances group using attribute-based instance type\n selection in the Amazon EC2 Auto Scaling User Guide.

\n

By default, Amazon EC2 Auto Scaling specifies units, which translates into number of\n instances.

\n

Valid values: units | vcpu | memory-mib\n

" } }, "DefaultInstanceWarmup": { diff --git a/models/b2bi.json b/models/b2bi.json index b689537afa..962797f812 100644 --- a/models/b2bi.json +++ b/models/b2bi.json @@ -1242,7 +1242,8 @@ "capabilities": { "target": "com.amazonaws.b2bi#PartnershipCapabilities", "traits": { - "smithy.api#documentation": "

Specifies a list of the capabilities associated with this partnership.

" + "smithy.api#documentation": "

Specifies a list of the capabilities associated with this partnership.

", + "smithy.api#required": {} } }, "clientToken": { @@ -1642,7 +1643,7 @@ "mappingTemplate": { "target": "com.amazonaws.b2bi#MappingTemplate", "traits": { - "smithy.api#documentation": "

Specifies the name of the mapping template for the transformer. This template is used to convert the input document into the\n correct set of objects.

", + "smithy.api#documentation": "

Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", "smithy.api#required": {} } }, @@ -1712,7 +1713,7 @@ "mappingTemplate": { "target": "com.amazonaws.b2bi#MappingTemplate", "traits": { - "smithy.api#documentation": "

Returns the name of the mapping template for the transformer. This template is used to convert the input document into the\n correct set of objects.

", + "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", "smithy.api#required": {} } }, @@ -2762,7 +2763,7 @@ "mappingTemplate": { "target": "com.amazonaws.b2bi#MappingTemplate", "traits": { - "smithy.api#documentation": "

Returns the name of the mapping template for the transformer. This template is used to convert the input document into the\n correct set of objects.

", + "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", "smithy.api#required": {} } }, @@ -2848,6 +2849,20 @@ "output": { "target": "com.amazonaws.b2bi#ListCapabilitiesResponse" }, + "errors": [ + { + "target": "com.amazonaws.b2bi#AccessDeniedException" + }, + { + "target": "com.amazonaws.b2bi#InternalServerException" + }, + { + "target": "com.amazonaws.b2bi#ThrottlingException" + }, + { + "target": "com.amazonaws.b2bi#ValidationException" + } + ], "traits": { "smithy.api#documentation": "

Lists the capabilities associated with your Amazon Web Services account for your current or specified region. A trading capability contains the information required to transform incoming EDI documents into JSON or XML outputs.

", "smithy.api#examples": [ @@ -3053,6 +3068,20 @@ "output": { "target": "com.amazonaws.b2bi#ListProfilesResponse" }, + "errors": [ + { + "target": "com.amazonaws.b2bi#AccessDeniedException" + }, + { + "target": "com.amazonaws.b2bi#InternalServerException" + }, + { + "target": "com.amazonaws.b2bi#ThrottlingException" + }, + { + "target": "com.amazonaws.b2bi#ValidationException" + } + ], "traits": { "smithy.api#documentation": "

Lists the profiles associated with your Amazon Web Services account for your current or specified region. A profile is the mechanism used to create the concept of\n a private network.

", "smithy.api#examples": [ @@ -3217,6 +3246,20 @@ "output": { "target": "com.amazonaws.b2bi#ListTransformersResponse" }, + "errors": [ + { + "target": "com.amazonaws.b2bi#AccessDeniedException" + }, + { + "target": "com.amazonaws.b2bi#InternalServerException" + }, + { + "target": "com.amazonaws.b2bi#ThrottlingException" + }, + { + "target": "com.amazonaws.b2bi#ValidationException" + } + ], "traits": { "smithy.api#documentation": "

Lists the available transformers. A transformer\n describes how to process the incoming EDI documents and extract the necessary\n information to the output file.

", "smithy.api#examples": [ @@ -3739,6 +3782,9 @@ { "target": "com.amazonaws.b2bi#AccessDeniedException" }, + { + "target": "com.amazonaws.b2bi#ConflictException" + }, { "target": "com.amazonaws.b2bi#InternalServerException" }, @@ -4034,7 +4080,7 @@ "mappingTemplate": { "target": "com.amazonaws.b2bi#MappingTemplate", "traits": { - "smithy.api#documentation": "

Specifies the name of the mapping template for the transformer. This template is used to convert the input document into the\n correct set of objects.

", + "smithy.api#documentation": "

Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", "smithy.api#required": {} } }, @@ -4338,7 +4384,7 @@ "mappingTemplate": { "target": "com.amazonaws.b2bi#MappingTemplate", "traits": { - "smithy.api#documentation": "

Returns the name of the mapping template for the transformer. This template is used to convert the input document into the\n correct set of objects.

", + "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", "smithy.api#required": {} } }, @@ -5118,7 +5164,7 @@ "mappingTemplate": { "target": "com.amazonaws.b2bi#MappingTemplate", "traits": { - "smithy.api#documentation": "

Specifies the name of the mapping template for the transformer. This template is used to convert the input document into the\n correct set of objects.

" + "smithy.api#documentation": "

Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

" } }, "status": { @@ -5178,7 +5224,7 @@ "mappingTemplate": { "target": "com.amazonaws.b2bi#MappingTemplate", "traits": { - "smithy.api#documentation": "

Returns the name of the mapping template for the transformer. This template is used to convert the input document into the\n correct set of objects.

", + "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", "smithy.api#required": {} } }, diff --git a/models/backupstorage.json b/models/backupstorage.json deleted file mode 100644 index 0a73a92835..0000000000 --- a/models/backupstorage.json +++ /dev/null @@ -1,2071 +0,0 @@ -{ - "smithy": "2.0", - "metadata": { - "suppressions": [ - { - "id": "HttpMethodSemantics", - "namespace": "*" - }, - { - "id": "HttpResponseCodeSemantics", - "namespace": "*" - }, - { - "id": "PaginatedTrait", - "namespace": "*" - }, - { - "id": "HttpHeaderTrait", - "namespace": "*" - }, - { - "id": "HttpUriConflict", - "namespace": "*" - }, - { - "id": "Service", - "namespace": "*" - } - ] - }, - "shapes": { - "com.amazonaws.backupstorage#AccessDeniedException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.backupstorage#ServiceErrorMessage" - } - }, - "traits": { - "aws.protocols#awsQueryError": { - "code": "AccessDenied", - "httpResponseCode": 403 - }, - "smithy.api#error": "client", - "smithy.api#httpError": 403 - } - }, - "com.amazonaws.backupstorage#BackupObject": { - "type": "structure", - "members": { - "Name": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Object name", - "smithy.api#required": {} - } - }, - "ChunksCount": { - "target": "com.amazonaws.backupstorage#OptionalLong", - "traits": { - "smithy.api#documentation": "Number of chunks in object" - } - }, - "MetadataString": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Metadata string associated with the Object" - } - }, - "ObjectChecksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Object checksum", - "smithy.api#required": {} - } - }, - "ObjectChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#SummaryChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "Checksum algorithm", - "smithy.api#required": {} - } - }, - "ObjectToken": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Object token", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "Object" - } - }, - "com.amazonaws.backupstorage#Chunk": { - "type": "structure", - "members": { - "Index": { - "target": "com.amazonaws.backupstorage#long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "Chunk index", - "smithy.api#required": {} - } - }, - "Length": { - "target": "com.amazonaws.backupstorage#long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "Chunk length", - "smithy.api#required": {} - } - }, - "Checksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Chunk checksum", - "smithy.api#required": {} - } - }, - "ChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#DataChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "Checksum algorithm", - "smithy.api#required": {} - } - }, - "ChunkToken": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Chunk token", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "Chunk" - } - }, - "com.amazonaws.backupstorage#ChunkList": { - "type": "list", - "member": { - "target": "com.amazonaws.backupstorage#Chunk" - } - }, - "com.amazonaws.backupstorage#CryoStorageFrontendService": { - "type": "service", - "version": "2018-04-10", - "operations": [ - { - "target": "com.amazonaws.backupstorage#DeleteObject" - }, - { - "target": "com.amazonaws.backupstorage#GetChunk" - }, - { - "target": "com.amazonaws.backupstorage#GetObjectMetadata" - }, - { - "target": "com.amazonaws.backupstorage#ListChunks" - }, - { - "target": "com.amazonaws.backupstorage#ListObjects" - }, - { - "target": "com.amazonaws.backupstorage#NotifyObjectComplete" - }, - { - "target": "com.amazonaws.backupstorage#PutChunk" - }, - { - "target": "com.amazonaws.backupstorage#PutObject" - }, - { - "target": "com.amazonaws.backupstorage#StartObject" - } - ], - "traits": { - "aws.api#service": { - "sdkId": "BackupStorage", - "arnNamespace": "backup-storage", - "cloudFormationName": "BackupStorage", - "cloudTrailEventSource": "backupstorage.amazonaws.com", - "endpointPrefix": "backupstorage" - }, - "aws.auth#sigv4": { - "name": "backup-storage" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "The frontend service for Cryo Storage.", - "smithy.api#title": "AWS Backup Storage", - "smithy.rules#endpointRuleSet": { - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://backupstorage-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - }, - true - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://backupstorage-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://backupstorage.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://backupstorage.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] - }, - "smithy.rules#endpointTests": { - "testCases": [ - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://backupstorage-fips.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://backupstorage-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://backupstorage.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://backupstorage.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://backupstorage-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://backupstorage-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://backupstorage.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://backupstorage.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://backupstorage-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://backupstorage-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://backupstorage.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://backupstorage.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://backupstorage-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://backupstorage.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://backupstorage-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://backupstorage.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "Missing region", - "expect": { - "error": "Invalid Configuration: Missing Region" - } - } - ], - "version": "1.0" - } - } - }, - "com.amazonaws.backupstorage#DataAlreadyExistsException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.backupstorage#string" - }, - "Checksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Data checksum used" - } - }, - "ChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Checksum algorithm used" - } - } - }, - "traits": { - "smithy.api#documentation": "Non-retryable exception. Attempted to create already existing object or chunk.\n This message contains a checksum of already presented data.", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.backupstorage#DataChecksumAlgorithm": { - "type": "enum", - "members": { - "SHA256": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SHA256" - } - } - } - }, - "com.amazonaws.backupstorage#DeleteObject": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupstorage#DeleteObjectInput" - }, - "output": { - "target": "smithy.api#Unit" - }, - "errors": [ - { - "target": "com.amazonaws.backupstorage#AccessDeniedException" - }, - { - "target": "com.amazonaws.backupstorage#IllegalArgumentException" - }, - { - "target": "com.amazonaws.backupstorage#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.backupstorage#RetryableException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceInternalException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.backupstorage#ThrottlingException" - } - ], - "traits": { - "smithy.api#documentation": "Delete Object from the incremental base Backup.", - "smithy.api#http": { - "method": "DELETE", - "uri": "/backup-jobs/{BackupJobId}/object/{ObjectName}", - "code": 200 - } - } - }, - "com.amazonaws.backupstorage#DeleteObjectInput": { - "type": "structure", - "members": { - "BackupJobId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Backup job Id for the in-progress backup.", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "ObjectName": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "The name of the Object.", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.backupstorage#GetChunk": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupstorage#GetChunkInput" - }, - "output": { - "target": "com.amazonaws.backupstorage#GetChunkOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupstorage#AccessDeniedException" - }, - { - "target": "com.amazonaws.backupstorage#IllegalArgumentException" - }, - { - "target": "com.amazonaws.backupstorage#KMSInvalidKeyUsageException" - }, - { - "target": "com.amazonaws.backupstorage#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.backupstorage#RetryableException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceInternalException" - }, - { - "target": "com.amazonaws.backupstorage#ThrottlingException" - } - ], - "traits": { - "smithy.api#documentation": "Gets the specified object's chunk.", - "smithy.api#http": { - "method": "GET", - "uri": "/restore-jobs/{StorageJobId}/chunk/{ChunkToken}", - "code": 200 - } - } - }, - "com.amazonaws.backupstorage#GetChunkInput": { - "type": "structure", - "members": { - "StorageJobId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Storage job id", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "ChunkToken": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Chunk token", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.backupstorage#GetChunkOutput": { - "type": "structure", - "members": { - "Data": { - "target": "com.amazonaws.backupstorage#PayloadBlob", - "traits": { - "smithy.api#documentation": "Chunk data", - "smithy.api#httpPayload": {}, - "smithy.api#required": {} - } - }, - "Length": { - "target": "com.amazonaws.backupstorage#long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "Data length", - "smithy.api#httpHeader": "x-amz-data-length", - "smithy.api#required": {} - } - }, - "Checksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Data checksum", - "smithy.api#httpHeader": "x-amz-checksum", - "smithy.api#required": {} - } - }, - "ChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#DataChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "Checksum algorithm", - "smithy.api#httpHeader": "x-amz-checksum-algorithm", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.backupstorage#GetObjectMetadata": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupstorage#GetObjectMetadataInput" - }, - "output": { - "target": "com.amazonaws.backupstorage#GetObjectMetadataOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupstorage#AccessDeniedException" - }, - { - "target": "com.amazonaws.backupstorage#IllegalArgumentException" - }, - { - "target": "com.amazonaws.backupstorage#KMSInvalidKeyUsageException" - }, - { - "target": "com.amazonaws.backupstorage#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.backupstorage#RetryableException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceInternalException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.backupstorage#ThrottlingException" - } - ], - "traits": { - "smithy.api#documentation": "Get metadata associated with an Object.", - "smithy.api#http": { - "method": "GET", - "uri": "/restore-jobs/{StorageJobId}/object/{ObjectToken}/metadata", - "code": 200 - } - } - }, - "com.amazonaws.backupstorage#GetObjectMetadataInput": { - "type": "structure", - "members": { - "StorageJobId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Backup job id for the in-progress backup.", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "ObjectToken": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Object token.", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.backupstorage#GetObjectMetadataOutput": { - "type": "structure", - "members": { - "MetadataString": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Metadata string.", - "smithy.api#httpHeader": "x-amz-metadata-string" - } - }, - "MetadataBlob": { - "target": "com.amazonaws.backupstorage#PayloadBlob", - "traits": { - "smithy.api#default": "", - "smithy.api#documentation": "Metadata blob.", - "smithy.api#httpPayload": {} - } - }, - "MetadataBlobLength": { - "target": "com.amazonaws.backupstorage#long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "The size of MetadataBlob.", - "smithy.api#httpHeader": "x-amz-data-length" - } - }, - "MetadataBlobChecksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "MetadataBlob checksum.", - "smithy.api#httpHeader": "x-amz-checksum" - } - }, - "MetadataBlobChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#DataChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "Checksum algorithm.", - "smithy.api#httpHeader": "x-amz-checksum-algorithm" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.backupstorage#IllegalArgumentException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.backupstorage#string" - } - }, - "traits": { - "smithy.api#documentation": "Non-retryable exception, indicates client error (wrong argument passed to API).\n See exception message for details.", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.backupstorage#KMSInvalidKeyUsageException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.backupstorage#string" - } - }, - "traits": { - "smithy.api#documentation": "Non-retryable exception. Indicates the KMS key usage is incorrect. See exception message for details.", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.backupstorage#ListChunks": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupstorage#ListChunksInput" - }, - "output": { - "target": "com.amazonaws.backupstorage#ListChunksOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupstorage#AccessDeniedException" - }, - { - "target": "com.amazonaws.backupstorage#IllegalArgumentException" - }, - { - "target": "com.amazonaws.backupstorage#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.backupstorage#RetryableException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceInternalException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceUnavailableException" - } - ], - "traits": { - "smithy.api#documentation": "List chunks in a given Object", - "smithy.api#http": { - "method": "GET", - "uri": "/restore-jobs/{StorageJobId}/chunks/{ObjectToken}/list", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "pageSize": "MaxResults" - } - } - }, - "com.amazonaws.backupstorage#ListChunksInput": { - "type": "structure", - "members": { - "StorageJobId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Storage job id", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "ObjectToken": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Object token", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "MaxResults": { - "target": "com.amazonaws.backupstorage#MaxResults", - "traits": { - "smithy.api#documentation": "Maximum number of chunks", - "smithy.api#httpQuery": "max-results" - } - }, - "NextToken": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Pagination token", - "smithy.api#httpQuery": "next-token" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.backupstorage#ListChunksOutput": { - "type": "structure", - "members": { - "ChunkList": { - "target": "com.amazonaws.backupstorage#ChunkList", - "traits": { - "smithy.api#documentation": "List of chunks", - "smithy.api#required": {} - } - }, - "NextToken": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Pagination token" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.backupstorage#ListObjects": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupstorage#ListObjectsInput" - }, - "output": { - "target": "com.amazonaws.backupstorage#ListObjectsOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupstorage#AccessDeniedException" - }, - { - "target": "com.amazonaws.backupstorage#IllegalArgumentException" - }, - { - "target": "com.amazonaws.backupstorage#KMSInvalidKeyUsageException" - }, - { - "target": "com.amazonaws.backupstorage#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.backupstorage#RetryableException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceInternalException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.backupstorage#ThrottlingException" - } - ], - "traits": { - "smithy.api#documentation": "List all Objects in a given Backup.", - "smithy.api#http": { - "method": "GET", - "uri": "/restore-jobs/{StorageJobId}/objects/list", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "pageSize": "MaxResults" - } - } - }, - "com.amazonaws.backupstorage#ListObjectsInput": { - "type": "structure", - "members": { - "StorageJobId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Storage job id", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "StartingObjectName": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Optional, specifies the starting Object name to list from. Ignored if NextToken is not NULL", - "smithy.api#httpQuery": "starting-object-name" - } - }, - "StartingObjectPrefix": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Optional, specifies the starting Object prefix to list from. Ignored if NextToken is not NULL", - "smithy.api#httpQuery": "starting-object-prefix" - } - }, - "MaxResults": { - "target": "com.amazonaws.backupstorage#MaxResults", - "traits": { - "smithy.api#documentation": "Maximum objects count", - "smithy.api#httpQuery": "max-results" - } - }, - "NextToken": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Pagination token", - "smithy.api#httpQuery": "next-token" - } - }, - "CreatedBefore": { - "target": "com.amazonaws.backupstorage#timestamp", - "traits": { - "smithy.api#documentation": "(Optional) Created before filter", - "smithy.api#httpQuery": "created-before" - } - }, - "CreatedAfter": { - "target": "com.amazonaws.backupstorage#timestamp", - "traits": { - "smithy.api#documentation": "(Optional) Created after filter", - "smithy.api#httpQuery": "created-after" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.backupstorage#ListObjectsOutput": { - "type": "structure", - "members": { - "ObjectList": { - "target": "com.amazonaws.backupstorage#ObjectList", - "traits": { - "smithy.api#documentation": "Object list", - "smithy.api#required": {} - } - }, - "NextToken": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Pagination token" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.backupstorage#MaxResults": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1, - "max": 100 - } - } - }, - "com.amazonaws.backupstorage#MetadataString": { - "type": "string", - "traits": { - "smithy.api#pattern": "^.{1,256}$" - } - }, - "com.amazonaws.backupstorage#NotReadableInputStreamException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.backupstorage#string" - } - }, - "traits": { - "smithy.api#documentation": "Retryalble exception. Indicated issues while reading an input stream due to the networking issues or connection drop on the client side.", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.backupstorage#NotifyObjectComplete": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupstorage#NotifyObjectCompleteInput" - }, - "output": { - "target": "com.amazonaws.backupstorage#NotifyObjectCompleteOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupstorage#AccessDeniedException" - }, - { - "target": "com.amazonaws.backupstorage#IllegalArgumentException" - }, - { - "target": "com.amazonaws.backupstorage#KMSInvalidKeyUsageException" - }, - { - "target": "com.amazonaws.backupstorage#NotReadableInputStreamException" - }, - { - "target": "com.amazonaws.backupstorage#RetryableException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceInternalException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.backupstorage#ThrottlingException" - } - ], - "traits": { - "aws.auth#unsignedPayload": {}, - "smithy.api#auth": [ - "aws.auth#sigv4" - ], - "smithy.api#documentation": "Complete upload", - "smithy.api#http": { - "method": "PUT", - "uri": "/backup-jobs/{BackupJobId}/object/{UploadId}/complete", - "code": 200 - } - } - }, - "com.amazonaws.backupstorage#NotifyObjectCompleteInput": { - "type": "structure", - "members": { - "BackupJobId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Backup job Id for the in-progress backup", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "UploadId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Upload Id for the in-progress upload", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "ObjectChecksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Object checksum", - "smithy.api#httpQuery": "checksum", - "smithy.api#required": {} - } - }, - "ObjectChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#SummaryChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "Checksum algorithm", - "smithy.api#httpQuery": "checksum-algorithm", - "smithy.api#required": {} - } - }, - "MetadataString": { - "target": "com.amazonaws.backupstorage#MetadataString", - "traits": { - "smithy.api#documentation": "Optional metadata associated with an Object. Maximum string length is 256 bytes.", - "smithy.api#httpQuery": "metadata-string" - } - }, - "MetadataBlob": { - "target": "com.amazonaws.backupstorage#PayloadBlob", - "traits": { - "smithy.api#default": "", - "smithy.api#documentation": "Optional metadata associated with an Object. Maximum length is 4MB.", - "smithy.api#httpPayload": {} - } - }, - "MetadataBlobLength": { - "target": "com.amazonaws.backupstorage#long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "The size of MetadataBlob.", - "smithy.api#httpQuery": "metadata-blob-length" - } - }, - "MetadataBlobChecksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Checksum of MetadataBlob.", - "smithy.api#httpQuery": "metadata-checksum" - } - }, - "MetadataBlobChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#DataChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "Checksum algorithm.", - "smithy.api#httpQuery": "metadata-checksum-algorithm" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.backupstorage#NotifyObjectCompleteOutput": { - "type": "structure", - "members": { - "ObjectChecksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Object checksum", - "smithy.api#required": {} - } - }, - "ObjectChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#SummaryChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "Checksum algorithm", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.backupstorage#ObjectList": { - "type": "list", - "member": { - "target": "com.amazonaws.backupstorage#BackupObject" - } - }, - "com.amazonaws.backupstorage#OptionalLong": { - "type": "long" - }, - "com.amazonaws.backupstorage#PayloadBlob": { - "type": "blob", - "traits": { - "smithy.api#streaming": {} - } - }, - "com.amazonaws.backupstorage#PutChunk": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupstorage#PutChunkInput" - }, - "output": { - "target": "com.amazonaws.backupstorage#PutChunkOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupstorage#AccessDeniedException" - }, - { - "target": "com.amazonaws.backupstorage#IllegalArgumentException" - }, - { - "target": "com.amazonaws.backupstorage#KMSInvalidKeyUsageException" - }, - { - "target": "com.amazonaws.backupstorage#NotReadableInputStreamException" - }, - { - "target": "com.amazonaws.backupstorage#RetryableException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceInternalException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.backupstorage#ThrottlingException" - } - ], - "traits": { - "aws.auth#unsignedPayload": {}, - "smithy.api#auth": [ - "aws.auth#sigv4" - ], - "smithy.api#documentation": "Upload chunk.", - "smithy.api#http": { - "method": "PUT", - "uri": "/backup-jobs/{BackupJobId}/chunk/{UploadId}/{ChunkIndex}", - "code": 200 - } - } - }, - "com.amazonaws.backupstorage#PutChunkInput": { - "type": "structure", - "members": { - "BackupJobId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Backup job Id for the in-progress backup.", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "UploadId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Upload Id for the in-progress upload.", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "ChunkIndex": { - "target": "com.amazonaws.backupstorage#long", - "traits": { - "smithy.api#default": null, - "smithy.api#documentation": "Describes this chunk's position relative to the other chunks", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "Data": { - "target": "com.amazonaws.backupstorage#PayloadBlob", - "traits": { - "smithy.api#documentation": "Data to be uploaded", - "smithy.api#httpPayload": {}, - "smithy.api#required": {} - } - }, - "Length": { - "target": "com.amazonaws.backupstorage#long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "Data length", - "smithy.api#httpQuery": "length", - "smithy.api#required": {} - } - }, - "Checksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Data checksum", - "smithy.api#httpQuery": "checksum", - "smithy.api#required": {} - } - }, - "ChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#DataChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "Checksum algorithm", - "smithy.api#httpQuery": "checksum-algorithm", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.backupstorage#PutChunkOutput": { - "type": "structure", - "members": { - "ChunkChecksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Chunk checksum", - "smithy.api#required": {} - } - }, - "ChunkChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#DataChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "Checksum algorithm", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.backupstorage#PutObject": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupstorage#PutObjectInput" - }, - "output": { - "target": "com.amazonaws.backupstorage#PutObjectOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupstorage#AccessDeniedException" - }, - { - "target": "com.amazonaws.backupstorage#IllegalArgumentException" - }, - { - "target": "com.amazonaws.backupstorage#KMSInvalidKeyUsageException" - }, - { - "target": "com.amazonaws.backupstorage#NotReadableInputStreamException" - }, - { - "target": "com.amazonaws.backupstorage#RetryableException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceInternalException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.backupstorage#ThrottlingException" - } - ], - "traits": { - "aws.auth#unsignedPayload": {}, - "smithy.api#auth": [ - "aws.auth#sigv4" - ], - "smithy.api#documentation": "Upload object that can store object metadata String and data blob in single API call using inline chunk field.", - "smithy.api#http": { - "method": "PUT", - "uri": "/backup-jobs/{BackupJobId}/object/{ObjectName}/put-object", - "code": 200 - } - } - }, - "com.amazonaws.backupstorage#PutObjectInput": { - "type": "structure", - "members": { - "BackupJobId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Backup job Id for the in-progress backup.", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "ObjectName": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "The name of the Object to be uploaded.", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "MetadataString": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Store user defined metadata like backup checksum, disk ids, restore metadata etc.", - "smithy.api#httpQuery": "metadata-string" - } - }, - "InlineChunk": { - "target": "com.amazonaws.backupstorage#PayloadBlob", - "traits": { - "smithy.api#default": "", - "smithy.api#documentation": "Inline chunk data to be uploaded.", - "smithy.api#httpPayload": {} - } - }, - "InlineChunkLength": { - "target": "com.amazonaws.backupstorage#long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "Length of the inline chunk data.", - "smithy.api#httpQuery": "length" - } - }, - "InlineChunkChecksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Inline chunk checksum", - "smithy.api#httpQuery": "checksum" - } - }, - "InlineChunkChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Inline chunk checksum algorithm", - "smithy.api#httpQuery": "checksum-algorithm" - } - }, - "ObjectChecksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "object checksum", - "smithy.api#httpQuery": "object-checksum" - } - }, - "ObjectChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#SummaryChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "object checksum algorithm", - "smithy.api#httpQuery": "object-checksum-algorithm" - } - }, - "ThrowOnDuplicate": { - "target": "com.amazonaws.backupstorage#boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "Throw an exception if Object name is already exist.", - "smithy.api#httpQuery": "throwOnDuplicate" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.backupstorage#PutObjectOutput": { - "type": "structure", - "members": { - "InlineChunkChecksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Inline chunk checksum", - "smithy.api#required": {} - } - }, - "InlineChunkChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#DataChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "Inline chunk checksum algorithm", - "smithy.api#required": {} - } - }, - "ObjectChecksum": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "object checksum", - "smithy.api#required": {} - } - }, - "ObjectChecksumAlgorithm": { - "target": "com.amazonaws.backupstorage#SummaryChecksumAlgorithm", - "traits": { - "smithy.api#documentation": "object checksum algorithm", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.backupstorage#ResourceNotFoundException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.backupstorage#string" - } - }, - "traits": { - "smithy.api#documentation": "Non-retryable exception. Attempted to make an operation on non-existing or expired resource.", - "smithy.api#error": "client", - "smithy.api#httpError": 404 - } - }, - "com.amazonaws.backupstorage#RetryableException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.backupstorage#string" - } - }, - "traits": { - "smithy.api#documentation": "Retryable exception. In general indicates internal failure that can be fixed by retry.", - "smithy.api#error": "server", - "smithy.api#httpError": 500 - } - }, - "com.amazonaws.backupstorage#ServiceErrorMessage": { - "type": "string" - }, - "com.amazonaws.backupstorage#ServiceInternalException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.backupstorage#string" - } - }, - "traits": { - "smithy.api#documentation": "Deprecated. To be removed from the model.", - "smithy.api#error": "server", - "smithy.api#httpError": 500 - } - }, - "com.amazonaws.backupstorage#ServiceUnavailableException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.backupstorage#string" - } - }, - "traits": { - "smithy.api#documentation": "Retryable exception, indicates internal server error.", - "smithy.api#error": "server", - "smithy.api#httpError": 503 - } - }, - "com.amazonaws.backupstorage#StartObject": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupstorage#StartObjectInput" - }, - "output": { - "target": "com.amazonaws.backupstorage#StartObjectOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupstorage#AccessDeniedException" - }, - { - "target": "com.amazonaws.backupstorage#DataAlreadyExistsException" - }, - { - "target": "com.amazonaws.backupstorage#IllegalArgumentException" - }, - { - "target": "com.amazonaws.backupstorage#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.backupstorage#RetryableException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceInternalException" - }, - { - "target": "com.amazonaws.backupstorage#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.backupstorage#ThrottlingException" - } - ], - "traits": { - "smithy.api#documentation": "Start upload containing one or many chunks.", - "smithy.api#http": { - "method": "PUT", - "uri": "/backup-jobs/{BackupJobId}/object/{ObjectName}", - "code": 200 - } - } - }, - "com.amazonaws.backupstorage#StartObjectInput": { - "type": "structure", - "members": { - "BackupJobId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Backup job Id for the in-progress backup", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "ObjectName": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Name for the object.", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "ThrowOnDuplicate": { - "target": "com.amazonaws.backupstorage#boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "Throw an exception if Object name is already exist." - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.backupstorage#StartObjectOutput": { - "type": "structure", - "members": { - "UploadId": { - "target": "com.amazonaws.backupstorage#string", - "traits": { - "smithy.api#documentation": "Upload Id for a given upload.", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.backupstorage#SummaryChecksumAlgorithm": { - "type": "enum", - "members": { - "SUMMARY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SUMMARY" - } - } - } - }, - "com.amazonaws.backupstorage#ThrottlingException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.backupstorage#string" - } - }, - "traits": { - "smithy.api#documentation": "Increased rate over throttling limits. Can be retried with exponential backoff.", - "smithy.api#error": "client", - "smithy.api#httpError": 429 - } - }, - "com.amazonaws.backupstorage#boolean": { - "type": "boolean", - "traits": { - "smithy.api#default": false - } - }, - "com.amazonaws.backupstorage#long": { - "type": "long", - "traits": { - "smithy.api#default": 0 - } - }, - "com.amazonaws.backupstorage#string": { - "type": "string" - }, - "com.amazonaws.backupstorage#timestamp": { - "type": "timestamp" - } - } -} diff --git a/models/batch.json b/models/batch.json index cc3d51ccfb..2e47a6a446 100644 --- a/models/batch.json +++ b/models/batch.json @@ -72,6 +72,9 @@ { "target": "com.amazonaws.batch#DescribeSchedulingPolicies" }, + { + "target": "com.amazonaws.batch#GetJobQueueSnapshot" + }, { "target": "com.amazonaws.batch#ListJobs" }, @@ -3134,7 +3137,20 @@ "outputToken": "nextToken", "items": "computeEnvironments", "pageSize": "maxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeComputeEnvironmentsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.batch#DescribeComputeEnvironmentsRequest": { @@ -4843,6 +4859,107 @@ "com.amazonaws.batch#Float": { "type": "float" }, + "com.amazonaws.batch#FrontOfQueueDetail": { + "type": "structure", + "members": { + "jobs": { + "target": "com.amazonaws.batch#FrontOfQueueJobSummaryList", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Names (ARNs) of the first 100 RUNNABLE jobs in a named job queue. For first-in-first-out (FIFO) job queues, jobs are ordered based on their submission time. For fair share scheduling (FSS) job queues, jobs are ordered based on their job priority and share usage.

" + } + }, + "lastUpdatedAt": { + "target": "com.amazonaws.batch#Long", + "traits": { + "smithy.api#documentation": "

The Unix timestamp (in milliseconds) for when each of the first 100 RUNNABLE jobs were last updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains a list of the first 100 RUNNABLE jobs associated to a single job queue.

" + } + }, + "com.amazonaws.batch#FrontOfQueueJobSummary": { + "type": "structure", + "members": { + "jobArn": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#documentation": "

The ARN for a job in a named job queue.

" + } + }, + "earliestTimeAtPosition": { + "target": "com.amazonaws.batch#Long", + "traits": { + "smithy.api#documentation": "

The Unix timestamp (in milliseconds) for when the job transitioned to its current position in the job queue.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An object that represents summary details for the first 100 RUNNABLE jobs in a job queue.

" + } + }, + "com.amazonaws.batch#FrontOfQueueJobSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.batch#FrontOfQueueJobSummary" + } + }, + "com.amazonaws.batch#GetJobQueueSnapshot": { + "type": "operation", + "input": { + "target": "com.amazonaws.batch#GetJobQueueSnapshotRequest" + }, + "output": { + "target": "com.amazonaws.batch#GetJobQueueSnapshotResponse" + }, + "errors": [ + { + "target": "com.amazonaws.batch#ClientException" + }, + { + "target": "com.amazonaws.batch#ServerException" + } + ], + "traits": { + "smithy.api#documentation": "

Provides a list of the first 100 RUNNABLE jobs associated to a single job queue.

", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/getjobqueuesnapshot", + "code": 200 + } + } + }, + "com.amazonaws.batch#GetJobQueueSnapshotRequest": { + "type": "structure", + "members": { + "jobQueue": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The job queue’s name or full queue Amazon Resource Name (ARN).

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.batch#GetJobQueueSnapshotResponse": { + "type": "structure", + "members": { + "frontOfQueue": { + "target": "com.amazonaws.batch#FrontOfQueueDetail", + "traits": { + "smithy.api#documentation": "

The list of the first 100 RUNNABLE jobs in each job queue. For first-in-first-out (FIFO) job queues, jobs are ordered based on their submission time. For fair share scheduling (FSS) job queues, jobs are ordered based on their job priority and share usage.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.batch#Host": { "type": "structure", "members": { @@ -5880,7 +5997,7 @@ "maxResults": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of results returned by ListJobs in paginated output. When\n this parameter is used, ListJobs only returns maxResults results in\n a single page and a nextToken response element. The remaining results of the\n initial request can be seen by sending another ListJobs request with the returned\n nextToken value. This value can be between 1 and\n 100. If this parameter isn't used, then ListJobs returns up to\n 100 results and a nextToken value if applicable.

" + "smithy.api#documentation": "

The maximum number of results returned by ListJobs in a paginated output. When this parameter is used, ListJobs returns up to maxResults results in a single page and a nextToken response element, if applicable. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value.

\n

The following outlines key parameters and limitations:

\n
    \n
  • \n

    The minimum value is 1.

    \n
  • \n
  • \n

    When --job-status is used, Batch returns up to 1000 values.

    \n
  • \n
  • \n

    When --filters is used, Batch returns up to 100 values.

    \n
  • \n
  • \n

    If neither parameter is used, then ListJobs returns up to\n 1000 results (jobs that are in the RUNNING status) and a nextToken value, if applicable.

    \n
  • \n
" } }, "nextToken": { diff --git a/models/bedrock-agent-runtime.json b/models/bedrock-agent-runtime.json index f31a18b865..f6881f207b 100644 --- a/models/bedrock-agent-runtime.json +++ b/models/bedrock-agent-runtime.json @@ -84,6 +84,27 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.bedrockagentruntime#AdditionalModelRequestFields": { + "type": "map", + "key": { + "target": "com.amazonaws.bedrockagentruntime#AdditionalModelRequestFieldsKey" + }, + "value": { + "target": "com.amazonaws.bedrockagentruntime#AdditionalModelRequestFieldsValue" + } + }, + "com.amazonaws.bedrockagentruntime#AdditionalModelRequestFieldsKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.bedrockagentruntime#AdditionalModelRequestFieldsValue": { + "type": "document" + }, "com.amazonaws.bedrockagentruntime#AgentAliasId": { "type": "string", "traits": { @@ -850,7 +871,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains information about the API operation that the agent predicts should be called.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains information about the API operation that the agent predicts should be called.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#ApiParameter": { @@ -876,7 +897,7 @@ } }, "traits": { - "smithy.api#documentation": "

Information about a parameter to provide to the API request.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Information about a parameter to provide to the API request.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#ApiParameters": { @@ -902,7 +923,7 @@ } }, "traits": { - "smithy.api#documentation": "

The request body to provide for the API request, as the agent elicited from the user.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

The request body to provide for the API request, as the agent elicited from the user.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#ApiResult": { @@ -930,7 +951,7 @@ "responseBody": { "target": "com.amazonaws.bedrockagentruntime#ResponseBody", "traits": { - "smithy.api#documentation": "

The response body from the API operation. The key of the object is the content type. The response may be returned directly or from the Lambda function.

" + "smithy.api#documentation": "

The response body from the API operation. The key of the object is the content type (currently, only TEXT is supported). The response may be returned directly or from the Lambda function.

" } }, "httpStatusCode": { @@ -947,7 +968,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains information about the API operation that was called from the action group and the response body that was returned.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains information about the API operation that was called from the action group and the response body that was returned.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#Attribution": { @@ -1049,7 +1070,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object containing a segment of the generated response that is based on a source in the knowledge base, alongside information about the source.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

An object containing a segment of the generated response that is based on a source in the knowledge base, alongside information about the source.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#Citations": { @@ -1082,7 +1103,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains the body of the API response.

\n

This data type is used in the following API operations:

\n
    \n
  • \n

    In the returnControlInvocationResults field of the Retrieve request\n

    \n
  • \n
" + "smithy.api#documentation": "

Contains the body of the API response.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#ContentMap": { @@ -1198,247 +1219,1041 @@ "promptTemplate": { "target": "com.amazonaws.bedrockagentruntime#PromptTemplate", "traits": { - "smithy.api#documentation": "

Contain the textPromptTemplate string for the external source wrapper object.

" + "smithy.api#documentation": "

Contain the textPromptTemplate string for the external source wrapper object.

" + } + }, + "guardrailConfiguration": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration details for the guardrail.

" + } + }, + "inferenceConfig": { + "target": "com.amazonaws.bedrockagentruntime#InferenceConfig", + "traits": { + "smithy.api#documentation": "

Configuration settings for inference when using RetrieveAndGenerate to generate responses while using an external source.

" + } + }, + "additionalModelRequestFields": { + "target": "com.amazonaws.bedrockagentruntime#AdditionalModelRequestFields", + "traits": { + "smithy.api#documentation": "

Additional model parameters and their corresponding values not included in the textInferenceConfig structure for an external source. Takes in custom model parameters specific to the language model being used.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the generation configuration of the external source wrapper object.

" + } + }, + "com.amazonaws.bedrockagentruntime#ExternalSourcesRetrieveAndGenerateConfiguration": { + "type": "structure", + "members": { + "modelArn": { + "target": "com.amazonaws.bedrockagentruntime#BedrockModelArn", + "traits": { + "smithy.api#documentation": "

The modelArn used with the external source wrapper object in the retrieveAndGenerate function.

", + "smithy.api#required": {} + } + }, + "sources": { + "target": "com.amazonaws.bedrockagentruntime#ExternalSources", + "traits": { + "smithy.api#documentation": "

The document used with the external source wrapper object in the retrieveAndGenerate function.

", + "smithy.api#required": {} + } + }, + "generationConfiguration": { + "target": "com.amazonaws.bedrockagentruntime#ExternalSourcesGenerationConfiguration", + "traits": { + "smithy.api#documentation": "

The prompt used with the external source wrapper object with the retrieveAndGenerate function.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configurations of the external source wrapper object in the retrieveAndGenerate function.

" + } + }, + "com.amazonaws.bedrockagentruntime#FailureReasonString": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#FailureTrace": { + "type": "structure", + "members": { + "traceId": { + "target": "com.amazonaws.bedrockagentruntime#TraceId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the trace.

" + } + }, + "failureReason": { + "target": "com.amazonaws.bedrockagentruntime#FailureReasonString", + "traits": { + "smithy.api#documentation": "

The reason the interaction failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about the failure of the interaction.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#FilterAttribute": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.bedrockagentruntime#FilterKey", + "traits": { + "smithy.api#documentation": "

The name that the metadata attribute must match.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.bedrockagentruntime#FilterValue", + "traits": { + "smithy.api#documentation": "

The value to whcih to compare the value of the metadata attribute.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the name that the metadata attribute must match and the value to which to compare the value of the metadata attribute. For more information, see Query configurations.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#FilterKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.bedrockagentruntime#FilterValue": { + "type": "document" + }, + "com.amazonaws.bedrockagentruntime#FinalResponse": { + "type": "structure", + "members": { + "text": { + "target": "com.amazonaws.bedrockagentruntime#FinalResponseString", + "traits": { + "smithy.api#documentation": "

The text in the response to the user.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about the response to the user.

" + } + }, + "com.amazonaws.bedrockagentruntime#FinalResponseString": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#Function": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#FunctionInvocationInput": { + "type": "structure", + "members": { + "actionGroup": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The action group that the function belongs to.

", + "smithy.api#required": {} + } + }, + "parameters": { + "target": "com.amazonaws.bedrockagentruntime#FunctionParameters", + "traits": { + "smithy.api#documentation": "

A list of parameters of the function.

" + } + }, + "function": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the function.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about the function that the agent predicts should be called.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#FunctionParameter": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the parameter.

" + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The data type of the parameter.

" + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value of the parameter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about a parameter of the function.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#FunctionParameters": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#FunctionParameter" + } + }, + "com.amazonaws.bedrockagentruntime#FunctionResult": { + "type": "structure", + "members": { + "actionGroup": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The action group that the function belongs to.

", + "smithy.api#required": {} + } + }, + "function": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the function that was called.

" + } + }, + "responseBody": { + "target": "com.amazonaws.bedrockagentruntime#ResponseBody", + "traits": { + "smithy.api#documentation": "

The response from the function call using the parameters. The key of the object is the content type (currently, only TEXT is supported). The response may be returned directly or from the Lambda function.

" + } + }, + "responseState": { + "target": "com.amazonaws.bedrockagentruntime#ResponseState", + "traits": { + "smithy.api#documentation": "

Controls the final response state returned to end user when API/Function execution failed. When this state is FAILURE, the request would fail with dependency failure exception. When this state is REPROMPT, the API/function response will be sent to model for re-prompt

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about the function that was called from the action group and the response that was returned.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#GeneratedResponsePart": { + "type": "structure", + "members": { + "textResponsePart": { + "target": "com.amazonaws.bedrockagentruntime#TextResponsePart", + "traits": { + "smithy.api#documentation": "

Contains metadata about a textual part of the generated response that is accompanied by a citation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains metadata about a part of the generated response that is accompanied by a citation.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#GenerationConfiguration": { + "type": "structure", + "members": { + "promptTemplate": { + "target": "com.amazonaws.bedrockagentruntime#PromptTemplate", + "traits": { + "smithy.api#documentation": "

Contains the template for the prompt that's sent to the model for response generation.

" + } + }, + "guardrailConfiguration": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration details for the guardrail.

" + } + }, + "inferenceConfig": { + "target": "com.amazonaws.bedrockagentruntime#InferenceConfig", + "traits": { + "smithy.api#documentation": "

Configuration settings for inference when using RetrieveAndGenerate to generate responses while using a knowledge base as a source.

" + } + }, + "additionalModelRequestFields": { + "target": "com.amazonaws.bedrockagentruntime#AdditionalModelRequestFields", + "traits": { + "smithy.api#documentation": "

Additional model parameters and corresponding values not included in the textInferenceConfig structure for a knowledge base. This allows users to provide custom model parameters specific to the language model being used.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configurations for response generation based on the knowledge base query results.

\n

This data type is used in the following API operations:

\n " + } + }, + "com.amazonaws.bedrockagentruntime#GuadrailAction": { + "type": "enum", + "members": { + "INTERVENED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERVENED" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailAction": { + "type": "enum", + "members": { + "INTERVENED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERVENED" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailAssessment": { + "type": "structure", + "members": { + "topicPolicy": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailTopicPolicyAssessment", + "traits": { + "smithy.api#documentation": "

Topic policy details of the Guardrail.

" + } + }, + "contentPolicy": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailContentPolicyAssessment", + "traits": { + "smithy.api#documentation": "

Content policy details of the Guardrail.

" + } + }, + "wordPolicy": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailWordPolicyAssessment", + "traits": { + "smithy.api#documentation": "

Word policy details of the Guardrail.

" + } + }, + "sensitiveInformationPolicy": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailSensitiveInformationPolicyAssessment", + "traits": { + "smithy.api#documentation": "

Sensitive Information policy details of Guardrail.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Assessment details of the content analyzed by Guardrails.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailAssessmentList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailAssessment" + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailConfiguration": { + "type": "structure", + "members": { + "guardrailId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier for the guardrail.

", + "smithy.api#length": { + "max": 64 + }, + "smithy.api#pattern": "^[a-z0-9]+$", + "smithy.api#required": {} + } + }, + "guardrailVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The version of the guardrail.

", + "smithy.api#length": { + "min": 1, + "max": 5 + }, + "smithy.api#pattern": "^(([1-9][0-9]{0,7})|(DRAFT))$", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration details for the guardrail.

" + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailContentFilter": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailContentFilterType", + "traits": { + "smithy.api#documentation": "

The type of content detected in the filter by the Guardrail.

" + } + }, + "confidence": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailContentFilterConfidence", + "traits": { + "smithy.api#documentation": "

The confidence level regarding the content detected in the filter by the Guardrail.

" + } + }, + "action": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailContentPolicyAction", + "traits": { + "smithy.api#documentation": "

The action placed on the content by the Guardrail filter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details of the content filter used in the Guardrail.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailContentFilterConfidence": { + "type": "enum", + "members": { + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + }, + "LOW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LOW" + } + }, + "MEDIUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MEDIUM" + } + }, + "HIGH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HIGH" + } + } + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailContentFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailContentFilter" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailContentFilterType": { + "type": "enum", + "members": { + "INSULTS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INSULTS" + } + }, + "HATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HATE" + } + }, + "SEXUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SEXUAL" + } + }, + "VIOLENCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VIOLENCE" + } + }, + "MISCONDUCT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MISCONDUCT" + } + }, + "PROMPT_ATTACK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROMPT_ATTACK" + } + } + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailContentPolicyAction": { + "type": "enum", + "members": { + "BLOCKED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BLOCKED" + } + } + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailContentPolicyAssessment": { + "type": "structure", + "members": { + "filters": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailContentFilterList", + "traits": { + "smithy.api#documentation": "

The filter details of the policy assessment used in the Guardrails filter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the policy assessment in the Guardrails filter.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailCustomWord": { + "type": "structure", + "members": { + "match": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The match details for the custom word filter in the Guardrail.

" + } + }, + "action": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailWordPolicyAction", + "traits": { + "smithy.api#documentation": "

The action details for the custom word filter in the Guardrail.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The custom word details for the filter in the Guardrail.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailCustomWordList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailCustomWord" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailManagedWord": { + "type": "structure", + "members": { + "match": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The match details for the managed word filter in the Guardrail.

" + } + }, + "type": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailManagedWordType", + "traits": { + "smithy.api#documentation": "

The type details for the managed word filter in the Guardrail.

" + } + }, + "action": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailWordPolicyAction", + "traits": { + "smithy.api#documentation": "

The action details for the managed word filter in the Guardrail.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The managed word details for the filter in the Guardrail.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailManagedWordList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailManagedWord" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailManagedWordType": { + "type": "enum", + "members": { + "PROFANITY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROFANITY" + } + } + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailPiiEntityFilter": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailPiiEntityType", + "traits": { + "smithy.api#documentation": "

The type of PII the Guardrail filter has identified and removed.

" + } + }, + "match": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The match to settings in the Guardrail filter to identify and remove PII.

" + } + }, + "action": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailSensitiveInformationPolicyAction", + "traits": { + "smithy.api#documentation": "

The action of the Guardrail filter to identify and remove PII.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Guardrail filter to identify and remove personally identifiable information (PII).

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailPiiEntityFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailPiiEntityFilter" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.bedrockagentruntime#GuardrailPiiEntityType": { + "type": "enum", + "members": { + "ADDRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ADDRESS" + } + }, + "AGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AGE" + } + }, + "AWS_ACCESS_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_ACCESS_KEY" + } + }, + "AWS_SECRET_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SECRET_KEY" + } + }, + "CA_HEALTH_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CA_HEALTH_NUMBER" + } + }, + "CA_SOCIAL_INSURANCE_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CA_SOCIAL_INSURANCE_NUMBER" + } + }, + "CREDIT_DEBIT_CARD_CVV": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREDIT_DEBIT_CARD_CVV" + } + }, + "CREDIT_DEBIT_CARD_EXPIRY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREDIT_DEBIT_CARD_EXPIRY" + } + }, + "CREDIT_DEBIT_CARD_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREDIT_DEBIT_CARD_NUMBER" + } + }, + "DRIVER_ID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DRIVER_ID" + } + }, + "EMAIL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EMAIL" + } + }, + "INTERNATIONAL_BANK_ACCOUNT_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNATIONAL_BANK_ACCOUNT_NUMBER" + } + }, + "IP_ADDRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IP_ADDRESS" + } + }, + "LICENSE_PLATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LICENSE_PLATE" + } + }, + "MAC_ADDRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MAC_ADDRESS" + } + }, + "NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NAME" + } + }, + "PASSWORD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PASSWORD" + } + }, + "PHONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PHONE" + } + }, + "PIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PIN" + } + }, + "SWIFT_CODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SWIFT_CODE" + } + }, + "UK_NATIONAL_HEALTH_SERVICE_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UK_NATIONAL_HEALTH_SERVICE_NUMBER" + } + }, + "UK_NATIONAL_INSURANCE_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UK_NATIONAL_INSURANCE_NUMBER" + } + }, + "UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER" + } + }, + "URL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "URL" + } + }, + "USERNAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "USERNAME" + } + }, + "US_BANK_ACCOUNT_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_BANK_ACCOUNT_NUMBER" + } + }, + "US_BANK_ROUTING_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_BANK_ROUTING_NUMBER" + } + }, + "US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER" + } + }, + "US_PASSPORT_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_PASSPORT_NUMBER" + } + }, + "US_SOCIAL_SECURITY_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_SOCIAL_SECURITY_NUMBER" + } + }, + "VEHICLE_IDENTIFICATION_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VEHICLE_IDENTIFICATION_NUMBER" } } - }, - "traits": { - "smithy.api#documentation": "

Contains the generation configuration of the external source wrapper object.

" } }, - "com.amazonaws.bedrockagentruntime#ExternalSourcesRetrieveAndGenerateConfiguration": { + "com.amazonaws.bedrockagentruntime#GuardrailRegexFilter": { "type": "structure", "members": { - "modelArn": { - "target": "com.amazonaws.bedrockagentruntime#BedrockModelArn", + "name": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The modelArn used with the external source wrapper object in the retrieveAndGenerate function.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The name details for the regex filter used in the Guardrail.

" } }, - "sources": { - "target": "com.amazonaws.bedrockagentruntime#ExternalSources", + "regex": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The document used with the external source wrapper object in the retrieveAndGenerate function.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The regex details for the regex filter used in the Guardrail.

" } }, - "generationConfiguration": { - "target": "com.amazonaws.bedrockagentruntime#ExternalSourcesGenerationConfiguration", + "match": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The prompt used with the external source wrapper object with the retrieveAndGenerate function.

" + "smithy.api#documentation": "

The match details for the regex filter used in the Guardrail.

" + } + }, + "action": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailSensitiveInformationPolicyAction", + "traits": { + "smithy.api#documentation": "

The action details for the regex filter used in the Guardrail.

" } } }, "traits": { - "smithy.api#documentation": "

The configurations of the external source wrapper object in the retrieveAndGenerate function.

" + "smithy.api#documentation": "

The details for the regex filter used in the Guardrail.

", + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagentruntime#FailureReasonString": { - "type": "string", + "com.amazonaws.bedrockagentruntime#GuardrailRegexFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailRegexFilter" + }, "traits": { "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagentruntime#FailureTrace": { - "type": "structure", + "com.amazonaws.bedrockagentruntime#GuardrailSensitiveInformationPolicyAction": { + "type": "enum", "members": { - "traceId": { - "target": "com.amazonaws.bedrockagentruntime#TraceId", + "BLOCKED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The unique identifier of the trace.

" + "smithy.api#enumValue": "BLOCKED" } }, - "failureReason": { - "target": "com.amazonaws.bedrockagentruntime#FailureReasonString", + "ANONYMIZED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The reason the interaction failed.

" + "smithy.api#enumValue": "ANONYMIZED" } } - }, - "traits": { - "smithy.api#documentation": "

Contains information about the failure of the interaction.

", - "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagentruntime#FilterAttribute": { + "com.amazonaws.bedrockagentruntime#GuardrailSensitiveInformationPolicyAssessment": { "type": "structure", "members": { - "key": { - "target": "com.amazonaws.bedrockagentruntime#FilterKey", + "piiEntities": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailPiiEntityFilterList", "traits": { - "smithy.api#documentation": "

The name that the metadata attribute must match.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The details of the PII entities used in the sensitive policy assessment for the Guardrail.

" } }, - "value": { - "target": "com.amazonaws.bedrockagentruntime#FilterValue", + "regexes": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailRegexFilterList", "traits": { - "smithy.api#documentation": "

The value to whcih to compare the value of the metadata attribute.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The details of the regexes used in the sensitive policy assessment for the Guardrail.

" } } }, "traits": { - "smithy.api#documentation": "

Specifies the name that the metadata attribute must match and the value to which to compare the value of the metadata attribute. For more information, see Query configurations.

\n

This data type is used in the following API operations:

\n " - } - }, - "com.amazonaws.bedrockagentruntime#FilterKey": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 100 - } + "smithy.api#documentation": "

The details of the sensitive policy assessment used in the Guardrail.

", + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagentruntime#FilterValue": { - "type": "document" - }, - "com.amazonaws.bedrockagentruntime#FinalResponse": { + "com.amazonaws.bedrockagentruntime#GuardrailTopic": { "type": "structure", "members": { - "text": { - "target": "com.amazonaws.bedrockagentruntime#FinalResponseString", + "name": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The text in the response to the user.

" + "smithy.api#documentation": "

The name details on a specific topic in the Guardrail.

" + } + }, + "type": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailTopicType", + "traits": { + "smithy.api#documentation": "

The type details on a specific topic in the Guardrail.

" + } + }, + "action": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailTopicPolicyAction", + "traits": { + "smithy.api#documentation": "

The action details on a specific topic in the Guardrail.

" } } }, "traits": { - "smithy.api#documentation": "

Contains details about the response to the user.

" - } - }, - "com.amazonaws.bedrockagentruntime#FinalResponseString": { - "type": "string", - "traits": { + "smithy.api#documentation": "

The details for a specific topic defined in the Guardrail.

", "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagentruntime#Function": { - "type": "string", + "com.amazonaws.bedrockagentruntime#GuardrailTopicList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailTopic" + }, "traits": { "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagentruntime#FunctionInvocationInput": { - "type": "structure", + "com.amazonaws.bedrockagentruntime#GuardrailTopicPolicyAction": { + "type": "enum", "members": { - "actionGroup": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "

The action group that the function belongs to.

", - "smithy.api#required": {} - } - }, - "parameters": { - "target": "com.amazonaws.bedrockagentruntime#FunctionParameters", - "traits": { - "smithy.api#documentation": "

A list of parameters of the function.

" - } - }, - "function": { - "target": "smithy.api#String", + "BLOCKED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The name of the function.

" + "smithy.api#enumValue": "BLOCKED" } } - }, - "traits": { - "smithy.api#documentation": "

Contains information about the function that the agent predicts should be called.

\n

This data type is used in the following API operations:

\n " } }, - "com.amazonaws.bedrockagentruntime#FunctionParameter": { + "com.amazonaws.bedrockagentruntime#GuardrailTopicPolicyAssessment": { "type": "structure", "members": { - "name": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "

The name of the parameter.

" - } - }, - "type": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "

The data type of the parameter.

" - } - }, - "value": { - "target": "smithy.api#String", + "topics": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailTopicList", "traits": { - "smithy.api#documentation": "

The value of the parameter.

" + "smithy.api#documentation": "

The topic details of the policy assessment used in the Guardrail.

" } } }, "traits": { - "smithy.api#documentation": "

Contains information about a parameter of the function.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

The details of the policy assessment used in the Guardrail.

", + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagentruntime#FunctionParameters": { - "type": "list", - "member": { - "target": "com.amazonaws.bedrockagentruntime#FunctionParameter" + "com.amazonaws.bedrockagentruntime#GuardrailTopicType": { + "type": "enum", + "members": { + "DENY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DENY" + } + } } }, - "com.amazonaws.bedrockagentruntime#FunctionResult": { + "com.amazonaws.bedrockagentruntime#GuardrailTrace": { "type": "structure", "members": { - "actionGroup": { - "target": "smithy.api#String", + "action": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailAction", "traits": { - "smithy.api#documentation": "

The action group that the function belongs to.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The trace action details used with the Guardrail.

" } }, - "function": { - "target": "smithy.api#String", + "traceId": { + "target": "com.amazonaws.bedrockagentruntime#TraceId", "traits": { - "smithy.api#documentation": "

The name of the function that was called.

" + "smithy.api#documentation": "

The details of the trace Id used in the Guardrail Trace.

" } }, - "responseBody": { - "target": "com.amazonaws.bedrockagentruntime#ResponseBody", + "inputAssessments": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailAssessmentList", "traits": { - "smithy.api#documentation": "

The response from the function call using the parameters. The response may be returned directly or from the Lambda function.

" + "smithy.api#documentation": "

The details of the input assessments used in the Guardrail Trace.

" } }, - "responseState": { - "target": "com.amazonaws.bedrockagentruntime#ResponseState", + "outputAssessments": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailAssessmentList", "traits": { - "smithy.api#documentation": "

Controls the final response state returned to end user when API/Function execution failed. When this state is FAILURE, the request would fail with dependency failure exception. When this state is REPROMPT, the API/function response will be sent to model for re-prompt

" + "smithy.api#documentation": "

The details of the output assessments used in the Guardrail Trace.

" } } }, "traits": { - "smithy.api#documentation": "

Contains information about the function that was called from the action group and the response that was returned.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

The trace details used in the Guardrail.

", + "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockagentruntime#GeneratedResponsePart": { - "type": "structure", + "com.amazonaws.bedrockagentruntime#GuardrailWordPolicyAction": { + "type": "enum", "members": { - "textResponsePart": { - "target": "com.amazonaws.bedrockagentruntime#TextResponsePart", + "BLOCKED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

Contains metadata about a textual part of the generated response that is accompanied by a citation.

" + "smithy.api#enumValue": "BLOCKED" } } - }, - "traits": { - "smithy.api#documentation": "

Contains metadata about a part of the generated response that is accompanied by a citation.

\n

This data type is used in the following API operations:

\n " } }, - "com.amazonaws.bedrockagentruntime#GenerationConfiguration": { + "com.amazonaws.bedrockagentruntime#GuardrailWordPolicyAssessment": { "type": "structure", "members": { - "promptTemplate": { - "target": "com.amazonaws.bedrockagentruntime#PromptTemplate", + "customWords": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailCustomWordList", "traits": { - "smithy.api#documentation": "

Contains the template for the prompt that's sent to the model for response generation.

" + "smithy.api#documentation": "

The custom word details for words defined in the Guardrail filter.

" + } + }, + "managedWordLists": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailManagedWordList", + "traits": { + "smithy.api#documentation": "

The managed word lists for words defined in the Guardrail filter.

" } } }, "traits": { - "smithy.api#documentation": "

Contains configurations for response generation based on the knowledge base query results.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

The assessment details for words defined in the Guardrail filter.

", + "smithy.api#sensitive": {} } }, "com.amazonaws.bedrockagentruntime#Identifier": { @@ -1451,6 +2266,20 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.bedrockagentruntime#InferenceConfig": { + "type": "structure", + "members": { + "textInferenceConfig": { + "target": "com.amazonaws.bedrockagentruntime#TextInferenceConfig", + "traits": { + "smithy.api#documentation": "

Configuration settings specific to text generation while generating responses using RetrieveAndGenerate.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration for inference settings when generating responses using RetrieveAndGenerate.

" + } + }, "com.amazonaws.bedrockagentruntime#InferenceConfiguration": { "type": "structure", "members": { @@ -1569,7 +2398,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains details about the API operation or function that the agent predicts should be called.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains details about the API operation or function that the agent predicts should be called.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#InvocationInputs": { @@ -1601,7 +2430,7 @@ } }, "traits": { - "smithy.api#documentation": "

A result from the action group invocation.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

A result from the invocation of an action. For more information, see Return control to the agent developer and Control session context.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#InvocationType": { @@ -1665,7 +2494,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sends a prompt for the agent to process and respond to. Use return control event type for function calling.

\n \n

The CLI doesn't support InvokeAgent.

\n
\n
    \n
  • \n

    To continue the same conversation with an agent, use the same sessionId value in the request.

    \n
  • \n
  • \n

    To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.

    \n
  • \n
  • \n

    End a conversation by setting endSession to true.

    \n
  • \n
  • \n

    In the sessionState object, you can include attributes for the session or prompt or parameters returned from the action group.

    \n
  • \n
  • \n

    Use return control event type for function calling.

    \n
  • \n
\n

The response is returned in the bytes field of the chunk object.

\n
    \n
  • \n

    The attribution object contains citations for parts of the response.

    \n
  • \n
  • \n

    If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.

    \n
  • \n
  • \n

    Errors are also surfaced in the response.

    \n
  • \n
", + "smithy.api#documentation": "\n

The CLI doesn't support InvokeAgent.

\n
\n

Sends a prompt for the agent to process and respond to. Note the following fields for the request:

\n
    \n
  • \n

    To continue the same conversation with an agent, use the same sessionId value in the request.

    \n
  • \n
  • \n

    To activate trace enablement, turn enableTrace to true. Trace enablement helps you follow the agent's reasoning process that led it to the information it processed, the actions it took, and the final result it yielded. For more information, see Trace enablement.

    \n
  • \n
  • \n

    End a conversation by setting endSession to true.

    \n
  • \n
  • \n

    In the sessionState object, you can include attributes for the session or prompt or, if you configured an action group to return control, results from invocation of the action group.

    \n
  • \n
\n

The response is returned in the bytes field of the chunk object.

\n
    \n
  • \n

    The attribution object contains citations for parts of the response.

    \n
  • \n
  • \n

    If you set enableTrace to true in the request, you can trace the agent's steps and reasoning process that led it to the response.

    \n
  • \n
  • \n

    If the action predicted was configured to return control, the response returns parameters for the action, elicited from the user, in the returnControl field.

    \n
  • \n
  • \n

    Errors are also surfaced in the response.

    \n
  • \n
", "smithy.api#http": { "code": 200, "method": "POST", @@ -1679,7 +2508,7 @@ "sessionState": { "target": "com.amazonaws.bedrockagentruntime#SessionState", "traits": { - "smithy.api#documentation": "

Contains parameters that specify various attributes of the session. For more information, see Control session context.

" + "smithy.api#documentation": "

Contains parameters that specify various attributes of the session. For more information, see Control session context.

\n \n

If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored.

\n
" } }, "agentId": { @@ -1721,7 +2550,7 @@ "inputText": { "target": "com.amazonaws.bedrockagentruntime#InputText", "traits": { - "smithy.api#documentation": "

The prompt text to send the agent.

" + "smithy.api#documentation": "

The prompt text to send the agent.

\n \n

If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored.

\n
" } } }, @@ -1965,6 +2794,15 @@ "com.amazonaws.bedrockagentruntime#LambdaArn": { "type": "string" }, + "com.amazonaws.bedrockagentruntime#MaxTokens": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 65536 + } + } + }, "com.amazonaws.bedrockagentruntime#MaximumLength": { "type": "integer", "traits": { @@ -2388,6 +3226,24 @@ "smithy.api#documentation": "

Contains the parameters in the request body.

" } }, + "com.amazonaws.bedrockagentruntime#RAGStopSequences": { + "type": "list", + "member": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 + } + } + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 4 + } + } + }, "com.amazonaws.bedrockagentruntime#Rationale": { "type": "structure", "members": { @@ -2576,72 +3432,84 @@ "equals": { "target": "com.amazonaws.bedrockagentruntime#FilterAttribute", "traits": { - "smithy.api#documentation": "

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value matches the value in this object are returned.

" + "smithy.api#documentation": "

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value matches the value in this object.

\n

The following example would return data sources with an animal attribute whose value is cat:

\n

\n \"equals\": { \"key\": \"animal\", \"value\": \"cat\" }\n

" } }, "notEquals": { "target": "com.amazonaws.bedrockagentruntime#FilterAttribute", "traits": { - "smithy.api#documentation": "

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value doesn't match the value in this object are returned.

" + "smithy.api#documentation": "

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value doesn't match the value in this object are returned.

\n

The following example would return data sources that don't contain an animal attribute whose value is cat.

\n

\n \"notEquals\": { \"key\": \"animal\", \"value\": \"cat\" }\n

" } }, "greaterThan": { "target": "com.amazonaws.bedrockagentruntime#FilterAttribute", "traits": { - "smithy.api#documentation": "

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is greater than the value in this object are returned.

" + "smithy.api#documentation": "

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is greater than the value in this object.

\n

The following example would return data sources with an year attribute whose value is greater than 1989:

\n

\n \"greaterThan\": { \"key\": \"year\", \"value\": 1989 }\n

" } }, "greaterThanOrEquals": { "target": "com.amazonaws.bedrockagentruntime#FilterAttribute", "traits": { - "smithy.api#documentation": "

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is greater than or equal to the value in this object are returned.

" + "smithy.api#documentation": "

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is greater than or equal to the value in this object.

\n

The following example would return data sources with an year attribute whose value is greater than or equal to 1989:

\n

\n \"greaterThanOrEquals\": { \"key\": \"year\", \"value\": 1989 }\n

" } }, "lessThan": { "target": "com.amazonaws.bedrockagentruntime#FilterAttribute", "traits": { - "smithy.api#documentation": "

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is less than the value in this object are returned.

" + "smithy.api#documentation": "

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is less than the value in this object.

\n

The following example would return data sources with an year attribute whose value is less than to 1989.

\n

\n \"lessThan\": { \"key\": \"year\", \"value\": 1989 }\n

" } }, "lessThanOrEquals": { "target": "com.amazonaws.bedrockagentruntime#FilterAttribute", "traits": { - "smithy.api#documentation": "

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is less than or equal to the value in this object are returned.

" + "smithy.api#documentation": "

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is less than or equal to the value in this object.

\n

The following example would return data sources with an year attribute whose value is less than or equal to 1989.

\n

\n \"lessThanOrEquals\": { \"key\": \"year\", \"value\": 1989 }\n

" } }, "in": { "target": "com.amazonaws.bedrockagentruntime#FilterAttribute", "traits": { - "smithy.api#documentation": "

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value is in the list specified in the value in this object are returned.

" + "smithy.api#documentation": "

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is in the list specified in the value in this object.

\n

The following example would return data sources with an animal attribute that is either cat or dog:

\n

\n \"in\": { \"key\": \"animal\", \"value\": [\"cat\", \"dog\"] }\n

" } }, "notIn": { "target": "com.amazonaws.bedrockagentruntime#FilterAttribute", "traits": { - "smithy.api#documentation": "

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value isn't in the list specified in the value in this object are returned.

" + "smithy.api#documentation": "

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value isn't in the list specified in the value in this object.

\n

The following example would return data sources whose animal attribute is neither cat nor dog.

\n

\n \"notIn\": { \"key\": \"animal\", \"value\": [\"cat\", \"dog\"] }\n

" } }, "startsWith": { "target": "com.amazonaws.bedrockagentruntime#FilterAttribute", "traits": { - "smithy.api#documentation": "

Knowledge base data sources that contain a metadata attribute whose name matches the key and whose value starts with the value in this object are returned. This filter is currently only supported for Amazon OpenSearch Serverless vector stores.

" + "smithy.api#documentation": "

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value starts with the value in this object. This filter is currently only supported for Amazon OpenSearch Serverless vector stores.

\n

The following example would return data sources with an animal attribute starts with ca (for example, cat or camel).

\n

\n \"startsWith\": { \"key\": \"animal\", \"value\": \"ca\" }\n

" + } + }, + "listContains": { + "target": "com.amazonaws.bedrockagentruntime#FilterAttribute", + "traits": { + "smithy.api#documentation": "

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is a list that contains the value as one of its members.

\n

The following example would return data sources with an animals attribute that is a list containing a cat member (for example [\"dog\", \"cat\"]).

\n

\n \"listContains\": { \"key\": \"animals\", \"value\": \"cat\" }\n

" + } + }, + "stringContains": { + "target": "com.amazonaws.bedrockagentruntime#FilterAttribute", + "traits": { + "smithy.api#documentation": "

Knowledge base data sources are returned if they contain a metadata attribute whose name matches the key and whose value is one of the following:

\n
    \n
  • \n

    A string that contains the value as a substring. The following example would return data sources with an animal attribute that contains the substring at (for example cat).

    \n

    \n \"stringContains\": { \"key\": \"animal\", \"value\": \"at\" }\n

    \n
  • \n
  • \n

    A list with a member that contains the value as a substring. The following example would return data sources with an animals attribute that is a list containing a member that contains the substring at (for example [\"dog\", \"cat\"]).

    \n

    \n \"stringContains\": { \"key\": \"animals\", \"value\": \"at\" }\n

    \n
  • \n
" } }, "andAll": { "target": "com.amazonaws.bedrockagentruntime#RetrievalFilterList", "traits": { - "smithy.api#documentation": "

Knowledge base data sources whose metadata attributes fulfill all the filter conditions inside this list are returned.

" + "smithy.api#documentation": "

Knowledge base data sources are returned if their metadata attributes fulfill all the filter conditions inside this list.

" } }, "orAll": { "target": "com.amazonaws.bedrockagentruntime#RetrievalFilterList", "traits": { - "smithy.api#documentation": "

Knowledge base data sources whose metadata attributes fulfill at least one of the filter conditions inside this list are returned.

" + "smithy.api#documentation": "

Knowledge base data sources are returned if their metadata attributes fulfill at least one of the filter conditions inside this list.

" } } }, "traits": { - "smithy.api#documentation": "

Specifies the filters to use on the metadata attributes in the knowledge base data sources before returning results. For more information, see Query configurations.

\n

This data type is used in the following API operations:

\n ", + "smithy.api#documentation": "

Specifies the filters to use on the metadata attributes in the knowledge base data sources before returning results. For more information, see Query configurations. See the examples below to see how to use these filters.

\n

This data type is used in the following API operations:

\n ", "smithy.api#sensitive": {} } }, @@ -2669,7 +3537,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains the cited text from the data source.

\n

This data type is used in the following API operations:

\n ", + "smithy.api#documentation": "

Contains the cited text from the data source.

\n

This data type is used in the following API operations:

\n ", "smithy.api#sensitive": {} } }, @@ -2691,7 +3559,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains information about the location of the data source.

\n

This data type is used in the following API operations:

\n ", + "smithy.api#documentation": "

Contains information about the location of the data source.

\n

This data type is used in the following API operations:

\n ", "smithy.api#sensitive": {} } }, @@ -2744,7 +3612,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains the S3 location of the data source.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains the S3 location of the data source.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#Retrieve": { @@ -2970,6 +3838,12 @@ "traits": { "smithy.api#documentation": "

A list of segments of the generated response that are based on sources in the knowledge base, alongside information about the sources.

" } + }, + "guardrailAction": { + "target": "com.amazonaws.bedrockagentruntime#GuadrailAction", + "traits": { + "smithy.api#documentation": "

Specifies if there is a guardrail intervention in the response.

" + } } }, "traits": { @@ -3095,7 +3969,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains metadata about a source cited for the generated response.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains metadata about a source cited for the generated response.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#RetrievedReferences": { @@ -3133,7 +4007,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains information to return from the action group that the agent has predicted to invoke.

\n

This data type is used in the following API operations:

\n ", + "smithy.api#documentation": "

Contains information to return from the action group that the agent has predicted to invoke.

\n

This data type is used in the following API operations:

\n ", "smithy.api#sensitive": {} } }, @@ -3229,13 +4103,13 @@ "returnControlInvocationResults": { "target": "com.amazonaws.bedrockagentruntime#ReturnControlInvocationResults", "traits": { - "smithy.api#documentation": "

Contains information about the results from the action group invocation.

" + "smithy.api#documentation": "

Contains information about the results from the action group invocation. For more information, see Return control to the agent developer and Control session context.

\n \n

If you include this field, the inputText field will be ignored.

\n
" } }, "invocationId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The identifier of the invocation.

" + "smithy.api#documentation": "

The identifier of the invocation of an action. This value must match the invocationId returned in the InvokeAgent response for the action whose results are provided in the returnControlInvocationResults field. For more information, see Return control to the agent developer and Control session context.

" } } }, @@ -3292,7 +4166,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains information about where the text with a citation begins and ends in the generated output.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains information about where the text with a citation begins and ends in the generated output.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagentruntime#StopSequences": { @@ -3316,6 +4190,38 @@ } } }, + "com.amazonaws.bedrockagentruntime#TextInferenceConfig": { + "type": "structure", + "members": { + "temperature": { + "target": "com.amazonaws.bedrockagentruntime#Temperature", + "traits": { + "smithy.api#documentation": "

Controls the random-ness of text generated by the language model, influencing how much the model sticks to the most predictable next words versus exploring more surprising options. A lower temperature value (e.g. 0.2 or 0.3) makes model outputs more deterministic or predictable, while a higher temperature (e.g. 0.8 or 0.9) makes the outputs more creative or unpredictable.

" + } + }, + "topP": { + "target": "com.amazonaws.bedrockagentruntime#TopP", + "traits": { + "smithy.api#documentation": "

A probability distribution threshold which controls what the model considers for the set of possible next tokens. The model will only consider the top p% of the probability distribution when generating the next token.

" + } + }, + "maxTokens": { + "target": "com.amazonaws.bedrockagentruntime#MaxTokens", + "traits": { + "smithy.api#documentation": "

The maximum number of tokens to generate in the output text. Do not use the minimum of 0\n or the maximum of 65536. The limit values described here are arbitary values, for actual\n values consult the limits defined by your specific model.

" + } + }, + "stopSequences": { + "target": "com.amazonaws.bedrockagentruntime#RAGStopSequences", + "traits": { + "smithy.api#documentation": "

A list of sequences of characters that, if generated, will cause the model to stop\n generating further tokens. Do not use a minimum length of 1 or a maximum length of 1000. The\n limit values described here are arbitary values, for actual values consult the limits defined\n by your specific model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration settings for text generation using a language model via the\n RetrieveAndGenerate operation. Includes parameters like temperature, top-p, maximum token\n count, and stop sequences.

\n \n

The valid range of maxTokens depends on the accepted values for your chosen\n model's inference parameters. To see the inference parameters for your model, see Inference\n parameters for foundation models.\n

\n
" + } + }, "com.amazonaws.bedrockagentruntime#TextPromptTemplate": { "type": "string", "traits": { @@ -3343,7 +4249,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains the part of the generated text that contains a citation, alongside where it begins and ends.

\n

This data type is used in the following API operations:

\n ", + "smithy.api#documentation": "

Contains the part of the generated text that contains a citation, alongside where it begins and ends.

\n

This data type is used in the following API operations:

\n ", "smithy.api#sensitive": {} } }, @@ -3381,6 +4287,12 @@ "com.amazonaws.bedrockagentruntime#Trace": { "type": "union", "members": { + "guardrailTrace": { + "target": "com.amazonaws.bedrockagentruntime#GuardrailTrace", + "traits": { + "smithy.api#documentation": "

The trace details for a trace defined in the Guardrail filter.

" + } + }, "preProcessingTrace": { "target": "com.amazonaws.bedrockagentruntime#PreProcessingTrace", "traits": { diff --git a/models/bedrock-agent.json b/models/bedrock-agent.json index b1733e1e6b..9d4cbfb72a 100644 --- a/models/bedrock-agent.json +++ b/models/bedrock-agent.json @@ -274,6 +274,12 @@ "traits": { "smithy.api#documentation": "

Contains configurations to override prompt templates in different parts of an agent sequence. For more information, see Advanced prompts.

" } + }, + "guardrailConfiguration": { + "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", + "traits": { + "smithy.api#documentation": "

The guardrails configuration assigned to the agent.

" + } } }, "traits": { @@ -790,7 +796,7 @@ "smithy.api#length": { "max": 2048 }, - "smithy.api#pattern": "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/(service-role/)?AmazonBedrockExecutionRoleForAgents_.+$" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/.+$" } }, "com.amazonaws.bedrockagent#AgentStatus": { @@ -899,6 +905,12 @@ "traits": { "smithy.api#documentation": "

The latest version of the agent.

" } + }, + "guardrailConfiguration": { + "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", + "traits": { + "smithy.api#documentation": "

The details of the guardrails configuration in the agent summary.

" + } } }, "traits": { @@ -1012,6 +1024,12 @@ "traits": { "smithy.api#documentation": "

Contains configurations to override prompt templates in different parts of an agent sequence. For more information, see Advanced prompts.

" } + }, + "guardrailConfiguration": { + "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", + "traits": { + "smithy.api#documentation": "

The guardrails configuration assigned to the agent version.

" + } } }, "traits": { @@ -1072,6 +1090,12 @@ "traits": { "smithy.api#documentation": "

The description of the version of the agent.

" } + }, + "guardrailConfiguration": { + "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", + "traits": { + "smithy.api#documentation": "

The details of the guardrails configuration in the agent version summary.

" + } } }, "traits": { @@ -1939,9 +1963,23 @@ "traits": { "smithy.api#length": { "min": 20, - "max": 1011 + "max": 2048 }, - "smithy.api#pattern": "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}))$" + "smithy.api#pattern": "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" + } + }, + "com.amazonaws.bedrockagent#BedrockEmbeddingModelConfiguration": { + "type": "structure", + "members": { + "dimensions": { + "target": "com.amazonaws.bedrockagent#Dimensions", + "traits": { + "smithy.api#documentation": "

The dimensions details for the vector configuration used on the Bedrock embeddings model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The vector configuration details for the Bedrock embeddings model.

" } }, "com.amazonaws.bedrockagent#BucketOwnerAccountId": { @@ -2369,6 +2407,12 @@ "traits": { "smithy.api#documentation": "

Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts.

" } + }, + "guardrailConfiguration": { + "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", + "traits": { + "smithy.api#documentation": "

The unique Guardrail configuration assigned to the agent when it is created.

" + } } }, "traits": { @@ -3439,6 +3483,15 @@ } } }, + "com.amazonaws.bedrockagent#Dimensions": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 4096 + } + } + }, "com.amazonaws.bedrockagent#DisassociateAgentKnowledgeBase": { "type": "operation", "input": { @@ -3529,6 +3582,20 @@ "smithy.api#pattern": "^DRAFT$" } }, + "com.amazonaws.bedrockagent#EmbeddingModelConfiguration": { + "type": "structure", + "members": { + "bedrockEmbeddingModelConfiguration": { + "target": "com.amazonaws.bedrockagent#BedrockEmbeddingModelConfiguration", + "traits": { + "smithy.api#documentation": "

The vector configuration details on the Bedrock embeddings model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration details for the embeddings model.

" + } + }, "com.amazonaws.bedrockagent#FailureReason": { "type": "string", "traits": { @@ -4266,6 +4333,41 @@ "smithy.api#output": {} } }, + "com.amazonaws.bedrockagent#GuardrailConfiguration": { + "type": "structure", + "members": { + "guardrailIdentifier": { + "target": "com.amazonaws.bedrockagent#GuardrailIdentifier", + "traits": { + "smithy.api#documentation": "

The guardrails identifier assigned to the guardrails configuration.

" + } + }, + "guardrailVersion": { + "target": "com.amazonaws.bedrockagent#GuardrailVersion", + "traits": { + "smithy.api#documentation": "

The guardrails version assigned to the guardrails configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the guardrails configuration.

" + } + }, + "com.amazonaws.bedrockagent#GuardrailIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$" + } + }, + "com.amazonaws.bedrockagent#GuardrailVersion": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(([0-9]{1,8})|(DRAFT))$" + } + }, "com.amazonaws.bedrockagent#Id": { "type": "string", "traits": { @@ -4675,7 +4777,7 @@ "traits": { "smithy.api#length": { "min": 40, - "max": 1200 + "max": 4000 }, "smithy.api#sensitive": {} } @@ -6352,7 +6454,7 @@ "basePromptTemplate": { "target": "com.amazonaws.bedrockagent#BasePromptTemplate", "traits": { - "smithy.api#documentation": "

Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see Prompt template placeholder variables.

" + "smithy.api#documentation": "

Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see Prompt template placeholder variables. For more information, see Configure the prompt templates.

" } }, "inferenceConfiguration": { @@ -6396,7 +6498,7 @@ "overrideLambda": { "target": "com.amazonaws.bedrockagent#LambdaArn", "traits": { - "smithy.api#documentation": "

The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN.

" + "smithy.api#documentation": "

The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see Parser Lambda function in Agents for Amazon Bedrock.

" } } }, @@ -7704,6 +7806,12 @@ "traits": { "smithy.api#documentation": "

Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts.

" } + }, + "guardrailConfiguration": { + "target": "com.amazonaws.bedrockagent#GuardrailConfiguration", + "traits": { + "smithy.api#documentation": "

The unique Guardrail configuration assigned to the agent when it is updated.

" + } } }, "traits": { @@ -8019,6 +8127,12 @@ "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.

", "smithy.api#required": {} } + }, + "embeddingModelConfiguration": { + "target": "com.amazonaws.bedrockagent#EmbeddingModelConfiguration", + "traits": { + "smithy.api#documentation": "

The embeddings model configuration details for the vector model used in Knowledge Base.

" + } } }, "traits": { diff --git a/models/bedrock-runtime.json b/models/bedrock-runtime.json index e19538049a..9c3ca78f33 100644 --- a/models/bedrock-runtime.json +++ b/models/bedrock-runtime.json @@ -14,6 +14,23 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.bedrockruntime#AdditionalModelResponseFieldPaths": { + "type": "list", + "member": { + "target": "smithy.api#String", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "traits": { + "smithy.api#length": { + "max": 10 + } + } + }, "com.amazonaws.bedrockruntime#AmazonBedrockFrontendService": { "type": "service", "version": "2023-09-30", @@ -700,6 +717,20 @@ } } }, + "com.amazonaws.bedrockruntime#AnyToolChoice": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

The model must request at least one tool (no text is generated). For example, {\"any\" : {}}.

" + } + }, + "com.amazonaws.bedrockruntime#AutoToolChoice": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

The Model automatically decides if a tool should be called or whether to generate text instead.\n For example, {\"auto\" : {}}.

" + } + }, "com.amazonaws.bedrockruntime#Body": { "type": "blob", "traits": { @@ -709,52 +740,183 @@ "smithy.api#sensitive": {} } }, - "com.amazonaws.bedrockruntime#GuardrailIdentifier": { - "type": "string", + "com.amazonaws.bedrockruntime#ContentBlock": { + "type": "union", + "members": { + "text": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Text to include in the message.

" + } + }, + "image": { + "target": "com.amazonaws.bedrockruntime#ImageBlock", + "traits": { + "smithy.api#documentation": "

Image to include in the message.

\n \n

This field is only supported by Anthropic Claude 3 models.

\n
" + } + }, + "document": { + "target": "com.amazonaws.bedrockruntime#DocumentBlock", + "traits": { + "smithy.api#documentation": "

A document to include in the message.

" + } + }, + "toolUse": { + "target": "com.amazonaws.bedrockruntime#ToolUseBlock", + "traits": { + "smithy.api#documentation": "

Information about a tool use request from a model.

" + } + }, + "toolResult": { + "target": "com.amazonaws.bedrockruntime#ToolResultBlock", + "traits": { + "smithy.api#documentation": "

The result for a tool request that a model makes.

" + } + }, + "guardContent": { + "target": "com.amazonaws.bedrockruntime#GuardrailConverseContentBlock", + "traits": { + "smithy.api#documentation": "

Contains the content to assess with the guardrail. If you don't specify\n guardContent in a call to the Converse API, the guardrail (if passed in the\n Converse API) assesses the entire message.

\n

For more information, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n\n

" + } + } + }, "traits": { - "smithy.api#length": { - "max": 2048 + "smithy.api#documentation": "

A block of content for a message that you pass to, or receive from, a model with the Converse API (Converse and ConverseStream).

" + } + }, + "com.amazonaws.bedrockruntime#ContentBlockDelta": { + "type": "union", + "members": { + "text": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The content text.

" + } }, - "smithy.api#pattern": "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$" + "toolUse": { + "target": "com.amazonaws.bedrockruntime#ToolUseBlockDelta", + "traits": { + "smithy.api#documentation": "

Information about a tool that the model is requesting to use.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A bock of content in a streaming response.

" } }, - "com.amazonaws.bedrockruntime#GuardrailVersion": { - "type": "string", + "com.amazonaws.bedrockruntime#ContentBlockDeltaEvent": { + "type": "structure", + "members": { + "delta": { + "target": "com.amazonaws.bedrockruntime#ContentBlockDelta", + "traits": { + "smithy.api#documentation": "

The delta for a content block delta event.

", + "smithy.api#required": {} + } + }, + "contentBlockIndex": { + "target": "com.amazonaws.bedrockruntime#NonNegativeInteger", + "traits": { + "smithy.api#documentation": "

The block index for a content block delta event.

", + "smithy.api#required": {} + } + } + }, "traits": { - "smithy.api#pattern": "^(([1-9][0-9]{0,7})|(DRAFT))$" + "smithy.api#documentation": "

The content block delta event.

" } }, - "com.amazonaws.bedrockruntime#InferenceResource": { - "type": "resource", - "operations": [ - { - "target": "com.amazonaws.bedrockruntime#InvokeModel" + "com.amazonaws.bedrockruntime#ContentBlockStart": { + "type": "union", + "members": { + "toolUse": { + "target": "com.amazonaws.bedrockruntime#ToolUseBlockStart", + "traits": { + "smithy.api#documentation": "

Information about a tool that the model is requesting to use.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Content block start information.

" + } + }, + "com.amazonaws.bedrockruntime#ContentBlockStartEvent": { + "type": "structure", + "members": { + "start": { + "target": "com.amazonaws.bedrockruntime#ContentBlockStart", + "traits": { + "smithy.api#documentation": "

Start information about a content block start event.

", + "smithy.api#required": {} + } }, - { - "target": "com.amazonaws.bedrockruntime#InvokeModelWithResponseStream" + "contentBlockIndex": { + "target": "com.amazonaws.bedrockruntime#NonNegativeInteger", + "traits": { + "smithy.api#documentation": "

The index for a content block start event.

", + "smithy.api#required": {} + } } - ] + }, + "traits": { + "smithy.api#documentation": "

Content block start event.

" + } }, - "com.amazonaws.bedrockruntime#InternalServerException": { + "com.amazonaws.bedrockruntime#ContentBlockStopEvent": { "type": "structure", "members": { - "message": { - "target": "com.amazonaws.bedrockruntime#NonBlankString" + "contentBlockIndex": { + "target": "com.amazonaws.bedrockruntime#NonNegativeInteger", + "traits": { + "smithy.api#documentation": "

The index for a content block.

", + "smithy.api#required": {} + } } }, "traits": { - "smithy.api#documentation": "

An internal server error occurred. Retry your request.

", - "smithy.api#error": "server", - "smithy.api#httpError": 500 + "smithy.api#documentation": "

A content block stop event.

" } }, - "com.amazonaws.bedrockruntime#InvokeModel": { + "com.amazonaws.bedrockruntime#ContentBlocks": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#ContentBlock" + } + }, + "com.amazonaws.bedrockruntime#ConversationRole": { + "type": "enum", + "members": { + "USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "user" + } + }, + "ASSISTANT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "assistant" + } + } + } + }, + "com.amazonaws.bedrockruntime#ConversationalModelId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" + } + }, + "com.amazonaws.bedrockruntime#Converse": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockruntime#InvokeModelRequest" + "target": "com.amazonaws.bedrockruntime#ConverseRequest" }, "output": { - "target": "com.amazonaws.bedrockruntime#InvokeModelResponse" + "target": "com.amazonaws.bedrockruntime#ConverseResponse" }, "errors": [ { @@ -775,9 +937,6 @@ { "target": "com.amazonaws.bedrockruntime#ResourceNotFoundException" }, - { - "target": "com.amazonaws.bedrockruntime#ServiceQuotaExceededException" - }, { "target": "com.amazonaws.bedrockruntime#ThrottlingException" }, @@ -786,76 +945,98 @@ } ], "traits": { - "smithy.api#documentation": "

Invokes the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. \n You use model inference to generate text, images, and embeddings.

\n

For example code, see Invoke model code examples in the Amazon Bedrock User Guide.\n

\n

This operation requires permission for the bedrock:InvokeModel action.

", + "smithy.api#documentation": "

Sends messages to the specified Amazon Bedrock model. Converse provides\n a consistent interface that works with all models that\n support messages. This allows you to write code once and use it with different models.\n Should a model have unique inference parameters, you can also pass those unique parameters\n to the model.

\n

For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide.\n To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide\n

\n

For example code, see Converse API examples in the Amazon Bedrock User Guide.\n

\n

This operation requires permission for the bedrock:InvokeModel action.

", "smithy.api#http": { "code": 200, "method": "POST", - "uri": "/model/{modelId}/invoke" + "uri": "/model/{modelId}/converse" } } }, - "com.amazonaws.bedrockruntime#InvokeModelIdentifier": { - "type": "string", + "com.amazonaws.bedrockruntime#ConverseMetrics": { + "type": "structure", + "members": { + "latencyMs": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

The latency of the call to Converse, in milliseconds.\n

", + "smithy.api#required": {} + } + } + }, "traits": { - "smithy.api#length": { - "min": 1, - "max": 2048 - }, - "smithy.api#pattern": "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" + "smithy.api#documentation": "

Metrics for a call to Converse.

" } }, - "com.amazonaws.bedrockruntime#InvokeModelRequest": { + "com.amazonaws.bedrockruntime#ConverseOutput": { + "type": "union", + "members": { + "message": { + "target": "com.amazonaws.bedrockruntime#Message", + "traits": { + "smithy.api#documentation": "

The message that the model generates.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The output from a call to Converse.

" + } + }, + "com.amazonaws.bedrockruntime#ConverseRequest": { "type": "structure", "members": { - "body": { - "target": "com.amazonaws.bedrockruntime#Body", + "modelId": { + "target": "com.amazonaws.bedrockruntime#ConversationalModelId", "traits": { - "smithy.api#documentation": "

The prompt and inference parameters in the format specified in the contentType in the header. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide.

", - "smithy.api#httpPayload": {}, + "smithy.api#documentation": "

The identifier for the model that you want to call.

\n

The modelId to provide depends on the type of model that you use:

\n ", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "contentType": { - "target": "com.amazonaws.bedrockruntime#MimeType", + "messages": { + "target": "com.amazonaws.bedrockruntime#Messages", "traits": { - "smithy.api#documentation": "

The MIME type of the input data in the request. The default value is\n application/json.

", - "smithy.api#httpHeader": "Content-Type" + "smithy.api#documentation": "

The messages that you want to send to the model.

", + "smithy.api#required": {} } }, - "accept": { - "target": "com.amazonaws.bedrockruntime#MimeType", + "system": { + "target": "com.amazonaws.bedrockruntime#SystemContentBlocks", "traits": { - "smithy.api#documentation": "

The desired MIME type of the inference body in the response. The default value is application/json.

", - "smithy.api#httpHeader": "Accept" + "smithy.api#documentation": "

A system prompt to pass to the model.

" } }, - "modelId": { - "target": "com.amazonaws.bedrockruntime#InvokeModelIdentifier", + "inferenceConfig": { + "target": "com.amazonaws.bedrockruntime#InferenceConfiguration", "traits": { - "smithy.api#documentation": "

The unique identifier of the model to invoke to run inference.

\n

The modelId to provide depends on the type of model that you use:

\n ", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#documentation": "

Inference parameters to pass to the model. Converse supports a base\n set of inference parameters. If you need to pass additional parameters that the model\n supports, use the additionalModelRequestFields request field.

" } }, - "trace": { - "target": "com.amazonaws.bedrockruntime#Trace", + "toolConfig": { + "target": "com.amazonaws.bedrockruntime#ToolConfiguration", "traits": { - "smithy.api#documentation": "

Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.

", - "smithy.api#httpHeader": "X-Amzn-Bedrock-Trace" + "smithy.api#documentation": "

Configuration information for the tools that the model can use when generating a response.

\n \n

This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.

\n
" } }, - "guardrailIdentifier": { - "target": "com.amazonaws.bedrockruntime#GuardrailIdentifier", + "guardrailConfig": { + "target": "com.amazonaws.bedrockruntime#GuardrailConfiguration", "traits": { - "smithy.api#documentation": "

The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied\n to the invocation.

\n

An error will be thrown in the following situations.

\n
    \n
  • \n

    You don't provide a guardrail identifier but you specify the amazon-bedrock-guardrailConfig field in the request body.

    \n
  • \n
  • \n

    You enable the guardrail but the contentType isn't application/json.

    \n
  • \n
  • \n

    You provide a guardrail identifier, but guardrailVersion isn't specified.

    \n
  • \n
", - "smithy.api#httpHeader": "X-Amzn-Bedrock-GuardrailIdentifier" + "smithy.api#documentation": "

Configuration information for a guardrail that you want to use in the request.

" } }, - "guardrailVersion": { - "target": "com.amazonaws.bedrockruntime#GuardrailVersion", + "additionalModelRequestFields": { + "target": "smithy.api#Document", "traits": { - "smithy.api#documentation": "

The version number for the guardrail. The value can also be DRAFT.

", - "smithy.api#httpHeader": "X-Amzn-Bedrock-GuardrailVersion" + "smithy.api#documentation": "

Additional inference parameters that the model supports, beyond the\n base set of inference parameters that Converse supports in the inferenceConfig\n field. For more information, see Model parameters.

" + } + }, + "additionalModelResponseFieldPaths": { + "target": "com.amazonaws.bedrockruntime#AdditionalModelResponseFieldPaths", + "traits": { + "smithy.api#documentation": "

Additional model parameters field paths to return in the\n response. Converse returns the requested fields as a JSON Pointer object in the\n additionalModelResponseFields field. The following is example JSON for additionalModelResponseFieldPaths.

\n

\n [\n \"/stop_sequence\"\n ]\n

\n

For information about the JSON Pointer syntax, see the\n Internet Engineering Task Force (IETF) documentation.

\n

\n Converse rejects an empty JSON Pointer or incorrectly structured\n JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested\n field is not in the model response, it is ignored by Converse.

", + "smithy.api#length": { + "max": 10 + } } } }, @@ -863,37 +1044,61 @@ "smithy.api#input": {} } }, - "com.amazonaws.bedrockruntime#InvokeModelResponse": { + "com.amazonaws.bedrockruntime#ConverseResponse": { "type": "structure", "members": { - "body": { - "target": "com.amazonaws.bedrockruntime#Body", + "output": { + "target": "com.amazonaws.bedrockruntime#ConverseOutput", "traits": { - "smithy.api#documentation": "

Inference response from the model in the format specified in the contentType header. To see the format and content of the request and response bodies for different models, refer to Inference parameters.

", - "smithy.api#httpPayload": {}, + "smithy.api#documentation": "

The result from the call to Converse.

", "smithy.api#required": {} } }, - "contentType": { - "target": "com.amazonaws.bedrockruntime#MimeType", + "stopReason": { + "target": "com.amazonaws.bedrockruntime#StopReason", "traits": { - "smithy.api#documentation": "

The MIME type of the inference result.

", - "smithy.api#httpHeader": "Content-Type", + "smithy.api#documentation": "

The reason why the model stopped generating output.

", + "smithy.api#required": {} + } + }, + "usage": { + "target": "com.amazonaws.bedrockruntime#TokenUsage", + "traits": { + "smithy.api#documentation": "

The total number of tokens used in the call to Converse. The total includes\n the tokens input to the model and the tokens generated by the model.

", + "smithy.api#required": {} + } + }, + "metrics": { + "target": "com.amazonaws.bedrockruntime#ConverseMetrics", + "traits": { + "smithy.api#documentation": "

Metrics for the call to Converse.

", "smithy.api#required": {} } + }, + "additionalModelResponseFields": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

Additional fields in the response that are unique to the model.

" + } + }, + "trace": { + "target": "com.amazonaws.bedrockruntime#ConverseTrace", + "traits": { + "smithy.api#documentation": "

A trace object that contains information about the Guardrail behavior.

" + } } }, "traits": { "smithy.api#output": {} } }, - "com.amazonaws.bedrockruntime#InvokeModelWithResponseStream": { + "com.amazonaws.bedrockruntime#ConverseStream": { "type": "operation", "input": { - "target": "com.amazonaws.bedrockruntime#InvokeModelWithResponseStreamRequest" + "target": "com.amazonaws.bedrockruntime#ConverseStreamRequest" }, "output": { - "target": "com.amazonaws.bedrockruntime#InvokeModelWithResponseStreamResponse" + "target": "com.amazonaws.bedrockruntime#ConverseStreamResponse" }, "errors": [ { @@ -908,18 +1113,12 @@ { "target": "com.amazonaws.bedrockruntime#ModelNotReadyException" }, - { - "target": "com.amazonaws.bedrockruntime#ModelStreamErrorException" - }, { "target": "com.amazonaws.bedrockruntime#ModelTimeoutException" }, { "target": "com.amazonaws.bedrockruntime#ResourceNotFoundException" }, - { - "target": "com.amazonaws.bedrockruntime#ServiceQuotaExceededException" - }, { "target": "com.amazonaws.bedrockruntime#ThrottlingException" }, @@ -928,48 +1127,1548 @@ } ], "traits": { - "smithy.api#documentation": "

Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream.

\n

To see if a model supports streaming, call GetFoundationModel\n and check the responseStreamingSupported field in the response.

\n \n

The CLI doesn't support InvokeModelWithResponseStream.

\n
\n

For example code, see Invoke model with streaming code\n example in the Amazon Bedrock User Guide.\n

\n

This operation requires permissions to perform the bedrock:InvokeModelWithResponseStream action.

", + "smithy.api#documentation": "

Sends messages to the specified Amazon Bedrock model and returns\n the response in a stream. ConverseStream provides a consistent API\n that works with all Amazon Bedrock models that support messages.\n This allows you to write code once and use it with different models. Should a\n model have unique inference parameters, you can also pass those unique parameters to the\n model.

\n

To find out if a model supports streaming, call GetFoundationModel\n and check the responseStreamingSupported field in the response.

\n

For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide.\n To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide.\n To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide\n

\n

For example code, see Conversation streaming example in the Amazon Bedrock User Guide.\n

\n

This operation requires permission for the bedrock:InvokeModelWithResponseStream action.

", "smithy.api#http": { "code": 200, "method": "POST", - "uri": "/model/{modelId}/invoke-with-response-stream" + "uri": "/model/{modelId}/converse-stream" } } }, - "com.amazonaws.bedrockruntime#InvokeModelWithResponseStreamRequest": { + "com.amazonaws.bedrockruntime#ConverseStreamMetadataEvent": { "type": "structure", "members": { - "body": { - "target": "com.amazonaws.bedrockruntime#Body", + "usage": { + "target": "com.amazonaws.bedrockruntime#TokenUsage", "traits": { - "smithy.api#documentation": "

The prompt and inference parameters in the format specified in the contentType in the header. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide.

", - "smithy.api#httpPayload": {}, + "smithy.api#documentation": "

Usage information for the conversation stream event.

", "smithy.api#required": {} } }, - "contentType": { - "target": "com.amazonaws.bedrockruntime#MimeType", + "metrics": { + "target": "com.amazonaws.bedrockruntime#ConverseStreamMetrics", "traits": { - "smithy.api#documentation": "

The MIME type of the input data in the request. The default value is\n application/json.

", - "smithy.api#httpHeader": "Content-Type" + "smithy.api#documentation": "

The metrics for the conversation stream metadata event.

", + "smithy.api#required": {} } }, - "accept": { - "target": "com.amazonaws.bedrockruntime#MimeType", + "trace": { + "target": "com.amazonaws.bedrockruntime#ConverseStreamTrace", "traits": { - "smithy.api#documentation": "

The desired MIME type of the inference body in the response. The default value is\n application/json.

", - "smithy.api#httpHeader": "X-Amzn-Bedrock-Accept" + "smithy.api#documentation": "

The trace object in the response from ConverseStream that contains information about the guardrail behavior.

" } - }, - "modelId": { - "target": "com.amazonaws.bedrockruntime#InvokeModelIdentifier", + } + }, + "traits": { + "smithy.api#documentation": "

A conversation stream metadata event.

" + } + }, + "com.amazonaws.bedrockruntime#ConverseStreamMetrics": { + "type": "structure", + "members": { + "latencyMs": { + "target": "smithy.api#Long", "traits": { - "smithy.api#documentation": "

The unique identifier of the model to invoke to run inference.

\n

The modelId to provide depends on the type of model that you use:

\n ", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The latency for the streaming request, in milliseconds.

", "smithy.api#required": {} } - }, - "trace": { + } + }, + "traits": { + "smithy.api#documentation": "

Metrics for the stream.

" + } + }, + "com.amazonaws.bedrockruntime#ConverseStreamOutput": { + "type": "union", + "members": { + "messageStart": { + "target": "com.amazonaws.bedrockruntime#MessageStartEvent", + "traits": { + "smithy.api#documentation": "

Message start information.

" + } + }, + "contentBlockStart": { + "target": "com.amazonaws.bedrockruntime#ContentBlockStartEvent", + "traits": { + "smithy.api#documentation": "

Start information for a content block.

" + } + }, + "contentBlockDelta": { + "target": "com.amazonaws.bedrockruntime#ContentBlockDeltaEvent", + "traits": { + "smithy.api#documentation": "

The messages output content block delta.

" + } + }, + "contentBlockStop": { + "target": "com.amazonaws.bedrockruntime#ContentBlockStopEvent", + "traits": { + "smithy.api#documentation": "

Stop information for a content block.

" + } + }, + "messageStop": { + "target": "com.amazonaws.bedrockruntime#MessageStopEvent", + "traits": { + "smithy.api#documentation": "

Message stop information.

" + } + }, + "metadata": { + "target": "com.amazonaws.bedrockruntime#ConverseStreamMetadataEvent", + "traits": { + "smithy.api#documentation": "

Metadata for the converse output stream.

" + } + }, + "internalServerException": { + "target": "com.amazonaws.bedrockruntime#InternalServerException", + "traits": { + "smithy.api#documentation": "

An internal server error occurred. Retry your request.

" + } + }, + "modelStreamErrorException": { + "target": "com.amazonaws.bedrockruntime#ModelStreamErrorException", + "traits": { + "smithy.api#documentation": "

A streaming error occurred. Retry your request.

" + } + }, + "validationException": { + "target": "com.amazonaws.bedrockruntime#ValidationException", + "traits": { + "smithy.api#documentation": "

Input validation failed. Check your request parameters and retry the request.

" + } + }, + "throttlingException": { + "target": "com.amazonaws.bedrockruntime#ThrottlingException", + "traits": { + "smithy.api#documentation": "

The number of requests exceeds the limit. Resubmit your request later.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The messages output stream

", + "smithy.api#streaming": {} + } + }, + "com.amazonaws.bedrockruntime#ConverseStreamRequest": { + "type": "structure", + "members": { + "modelId": { + "target": "com.amazonaws.bedrockruntime#ConversationalModelId", + "traits": { + "smithy.api#documentation": "

The ID for the model.

\n

The modelId to provide depends on the type of model that you use:

\n ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "messages": { + "target": "com.amazonaws.bedrockruntime#Messages", + "traits": { + "smithy.api#documentation": "

The messages that you want to send to the model.

", + "smithy.api#required": {} + } + }, + "system": { + "target": "com.amazonaws.bedrockruntime#SystemContentBlocks", + "traits": { + "smithy.api#documentation": "

A system prompt to send to the model.

" + } + }, + "inferenceConfig": { + "target": "com.amazonaws.bedrockruntime#InferenceConfiguration", + "traits": { + "smithy.api#documentation": "

Inference parameters to pass to the model. ConverseStream supports a base\n set of inference parameters. If you need to pass additional parameters that the model\n supports, use the additionalModelRequestFields request field.

" + } + }, + "toolConfig": { + "target": "com.amazonaws.bedrockruntime#ToolConfiguration", + "traits": { + "smithy.api#documentation": "

Configuration information for the tools that the model can use when generating a response.

\n \n

This field is only supported by Anthropic Claude 3 models.

\n
" + } + }, + "guardrailConfig": { + "target": "com.amazonaws.bedrockruntime#GuardrailStreamConfiguration", + "traits": { + "smithy.api#documentation": "

Configuration information for a guardrail that you want to use in the request.

" + } + }, + "additionalModelRequestFields": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

Additional inference parameters that the model supports, beyond the\n base set of inference parameters that ConverseStream supports in the inferenceConfig\n field.

" + } + }, + "additionalModelResponseFieldPaths": { + "target": "com.amazonaws.bedrockruntime#AdditionalModelResponseFieldPaths", + "traits": { + "smithy.api#documentation": "

Additional model parameters field paths to return in the\n response. ConverseStream returns the requested fields as a JSON Pointer object in the\n additionalModelResponseFields field. The following is example JSON for additionalModelResponseFieldPaths.

\n

\n [\n \"/stop_sequence\"\n ]\n

\n

For information about the JSON Pointer syntax, see the\n Internet Engineering Task Force (IETF) documentation.

\n

\n ConverseStream rejects an empty JSON Pointer or incorrectly structured\n JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested\n field is not in the model response, it is ignored by ConverseStream.

", + "smithy.api#length": { + "max": 10 + } + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockruntime#ConverseStreamResponse": { + "type": "structure", + "members": { + "stream": { + "target": "com.amazonaws.bedrockruntime#ConverseStreamOutput", + "traits": { + "smithy.api#documentation": "

The output stream that the model generated.

", + "smithy.api#httpPayload": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockruntime#ConverseStreamTrace": { + "type": "structure", + "members": { + "guardrail": { + "target": "com.amazonaws.bedrockruntime#GuardrailTraceAssessment", + "traits": { + "smithy.api#documentation": "

The guardrail trace object.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The trace object in a response from ConverseStream. Currently, you can only trace guardrails.

" + } + }, + "com.amazonaws.bedrockruntime#ConverseTrace": { + "type": "structure", + "members": { + "guardrail": { + "target": "com.amazonaws.bedrockruntime#GuardrailTraceAssessment", + "traits": { + "smithy.api#documentation": "

The guardrail trace object.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The trace object in a response from Converse. Currently, you can only trace guardrails.

" + } + }, + "com.amazonaws.bedrockruntime#DocumentBlock": { + "type": "structure", + "members": { + "format": { + "target": "com.amazonaws.bedrockruntime#DocumentFormat", + "traits": { + "smithy.api#documentation": "

The format of a document, or its extension.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A name for the document.

", + "smithy.api#length": { + "min": 1, + "max": 200 + }, + "smithy.api#required": {} + } + }, + "source": { + "target": "com.amazonaws.bedrockruntime#DocumentSource", + "traits": { + "smithy.api#documentation": "

Contains the content of the document.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A document to include in a message when sending a Converse or ConverseStream request. You can include up to 5 documents in a request. The maximum document size is 50 MB.

" + } + }, + "com.amazonaws.bedrockruntime#DocumentFormat": { + "type": "enum", + "members": { + "PDF": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "pdf" + } + }, + "CSV": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "csv" + } + }, + "DOC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "doc" + } + }, + "DOCX": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "docx" + } + }, + "XLS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "xls" + } + }, + "XLSX": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "xlsx" + } + }, + "HTML": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "html" + } + }, + "TXT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "txt" + } + }, + "MD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "md" + } + } + } + }, + "com.amazonaws.bedrockruntime#DocumentSource": { + "type": "union", + "members": { + "bytes": { + "target": "smithy.api#Blob", + "traits": { + "smithy.api#documentation": "

A base64-encoded string of a UTF-8 encoded file, that is the document to include in the message.

", + "smithy.api#length": { + "min": 1 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the content of the document included in a message when sending a Converse or ConverseStream request or in the response.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailAssessment": { + "type": "structure", + "members": { + "topicPolicy": { + "target": "com.amazonaws.bedrockruntime#GuardrailTopicPolicyAssessment", + "traits": { + "smithy.api#documentation": "

The topic policy.

" + } + }, + "contentPolicy": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentPolicyAssessment", + "traits": { + "smithy.api#documentation": "

The content policy.

" + } + }, + "wordPolicy": { + "target": "com.amazonaws.bedrockruntime#GuardrailWordPolicyAssessment", + "traits": { + "smithy.api#documentation": "

The word policy.

" + } + }, + "sensitiveInformationPolicy": { + "target": "com.amazonaws.bedrockruntime#GuardrailSensitiveInformationPolicyAssessment", + "traits": { + "smithy.api#documentation": "

The sensitive information policy.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A behavior assessment of the guardrail policies used in a call to the Converse API.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailAssessmentList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailAssessment" + } + }, + "com.amazonaws.bedrockruntime#GuardrailAssessmentListMap": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "com.amazonaws.bedrockruntime#GuardrailAssessmentList" + } + }, + "com.amazonaws.bedrockruntime#GuardrailAssessmentMap": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "com.amazonaws.bedrockruntime#GuardrailAssessment" + } + }, + "com.amazonaws.bedrockruntime#GuardrailConfiguration": { + "type": "structure", + "members": { + "guardrailIdentifier": { + "target": "com.amazonaws.bedrockruntime#GuardrailIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier for the guardrail.

", + "smithy.api#required": {} + } + }, + "guardrailVersion": { + "target": "com.amazonaws.bedrockruntime#GuardrailVersion", + "traits": { + "smithy.api#documentation": "

The version of the guardrail.

", + "smithy.api#required": {} + } + }, + "trace": { + "target": "com.amazonaws.bedrockruntime#GuardrailTrace", + "traits": { + "smithy.api#default": "disabled", + "smithy.api#documentation": "

The trace behavior for the guardrail.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration information for a guardrail that you use with the Converse action.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailContentFilter": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentFilterType", + "traits": { + "smithy.api#documentation": "

The guardrail type.

", + "smithy.api#required": {} + } + }, + "confidence": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentFilterConfidence", + "traits": { + "smithy.api#documentation": "

The guardrail confidence.

", + "smithy.api#required": {} + } + }, + "action": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentPolicyAction", + "traits": { + "smithy.api#documentation": "

The guardrail action.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The content filter for a guardrail.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailContentFilterConfidence": { + "type": "enum", + "members": { + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + }, + "LOW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LOW" + } + }, + "MEDIUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MEDIUM" + } + }, + "HIGH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HIGH" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailContentFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentFilter" + } + }, + "com.amazonaws.bedrockruntime#GuardrailContentFilterType": { + "type": "enum", + "members": { + "INSULTS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INSULTS" + } + }, + "HATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HATE" + } + }, + "SEXUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SEXUAL" + } + }, + "VIOLENCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VIOLENCE" + } + }, + "MISCONDUCT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MISCONDUCT" + } + }, + "PROMPT_ATTACK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROMPT_ATTACK" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailContentPolicyAction": { + "type": "enum", + "members": { + "BLOCKED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BLOCKED" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailContentPolicyAssessment": { + "type": "structure", + "members": { + "filters": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentFilterList", + "traits": { + "smithy.api#documentation": "

The content policy filters.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An assessment of a content policy for a guardrail.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailConverseContentBlock": { + "type": "union", + "members": { + "text": { + "target": "com.amazonaws.bedrockruntime#GuardrailConverseTextBlock", + "traits": { + "smithy.api#documentation": "

The text to guard.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n

A content block for selective guarding with the Converse API (Converse and ConverseStream).\n

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailConverseTextBlock": { + "type": "structure", + "members": { + "text": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The text that you want to guard.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A text block that contains text that you want to assess with a guardrail. For more information, see GuardrailConverseContentBlock.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailCustomWord": { + "type": "structure", + "members": { + "match": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The match for the custom word.

", + "smithy.api#required": {} + } + }, + "action": { + "target": "com.amazonaws.bedrockruntime#GuardrailWordPolicyAction", + "traits": { + "smithy.api#documentation": "

The action for the custom word.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A custom word configured in a guardrail.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailCustomWordList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailCustomWord" + } + }, + "com.amazonaws.bedrockruntime#GuardrailIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$" + } + }, + "com.amazonaws.bedrockruntime#GuardrailManagedWord": { + "type": "structure", + "members": { + "match": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The match for the managed word.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.bedrockruntime#GuardrailManagedWordType", + "traits": { + "smithy.api#documentation": "

The type for the managed word.

", + "smithy.api#required": {} + } + }, + "action": { + "target": "com.amazonaws.bedrockruntime#GuardrailWordPolicyAction", + "traits": { + "smithy.api#documentation": "

The action for the managed word.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A managed word configured in a guardrail.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailManagedWordList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailManagedWord" + } + }, + "com.amazonaws.bedrockruntime#GuardrailManagedWordType": { + "type": "enum", + "members": { + "PROFANITY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROFANITY" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailOutputText": { + "type": "string" + }, + "com.amazonaws.bedrockruntime#GuardrailPiiEntityFilter": { + "type": "structure", + "members": { + "match": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The PII entity filter match.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.bedrockruntime#GuardrailPiiEntityType", + "traits": { + "smithy.api#documentation": "

The PII entity filter type.

", + "smithy.api#required": {} + } + }, + "action": { + "target": "com.amazonaws.bedrockruntime#GuardrailSensitiveInformationPolicyAction", + "traits": { + "smithy.api#documentation": "

The PII entity filter action.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A Personally Identifiable Information (PII) entity configured in a guardrail.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailPiiEntityFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailPiiEntityFilter" + } + }, + "com.amazonaws.bedrockruntime#GuardrailPiiEntityType": { + "type": "enum", + "members": { + "ADDRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ADDRESS" + } + }, + "AGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AGE" + } + }, + "AWS_ACCESS_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_ACCESS_KEY" + } + }, + "AWS_SECRET_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SECRET_KEY" + } + }, + "CA_HEALTH_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CA_HEALTH_NUMBER" + } + }, + "CA_SOCIAL_INSURANCE_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CA_SOCIAL_INSURANCE_NUMBER" + } + }, + "CREDIT_DEBIT_CARD_CVV": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREDIT_DEBIT_CARD_CVV" + } + }, + "CREDIT_DEBIT_CARD_EXPIRY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREDIT_DEBIT_CARD_EXPIRY" + } + }, + "CREDIT_DEBIT_CARD_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREDIT_DEBIT_CARD_NUMBER" + } + }, + "DRIVER_ID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DRIVER_ID" + } + }, + "EMAIL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EMAIL" + } + }, + "INTERNATIONAL_BANK_ACCOUNT_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNATIONAL_BANK_ACCOUNT_NUMBER" + } + }, + "IP_ADDRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IP_ADDRESS" + } + }, + "LICENSE_PLATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LICENSE_PLATE" + } + }, + "MAC_ADDRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MAC_ADDRESS" + } + }, + "NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NAME" + } + }, + "PASSWORD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PASSWORD" + } + }, + "PHONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PHONE" + } + }, + "PIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PIN" + } + }, + "SWIFT_CODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SWIFT_CODE" + } + }, + "UK_NATIONAL_HEALTH_SERVICE_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UK_NATIONAL_HEALTH_SERVICE_NUMBER" + } + }, + "UK_NATIONAL_INSURANCE_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UK_NATIONAL_INSURANCE_NUMBER" + } + }, + "UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER" + } + }, + "URL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "URL" + } + }, + "USERNAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "USERNAME" + } + }, + "US_BANK_ACCOUNT_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_BANK_ACCOUNT_NUMBER" + } + }, + "US_BANK_ROUTING_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_BANK_ROUTING_NUMBER" + } + }, + "US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER" + } + }, + "US_PASSPORT_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_PASSPORT_NUMBER" + } + }, + "US_SOCIAL_SECURITY_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "US_SOCIAL_SECURITY_NUMBER" + } + }, + "VEHICLE_IDENTIFICATION_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VEHICLE_IDENTIFICATION_NUMBER" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailRegexFilter": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The regex filter name.

" + } + }, + "match": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The regesx filter match.

" + } + }, + "regex": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The regex query.

" + } + }, + "action": { + "target": "com.amazonaws.bedrockruntime#GuardrailSensitiveInformationPolicyAction", + "traits": { + "smithy.api#documentation": "

The region filter action.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A Regex filter configured in a guardrail.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailRegexFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailRegexFilter" + } + }, + "com.amazonaws.bedrockruntime#GuardrailSensitiveInformationPolicyAction": { + "type": "enum", + "members": { + "ANONYMIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ANONYMIZED" + } + }, + "BLOCKED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BLOCKED" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailSensitiveInformationPolicyAssessment": { + "type": "structure", + "members": { + "piiEntities": { + "target": "com.amazonaws.bedrockruntime#GuardrailPiiEntityFilterList", + "traits": { + "smithy.api#documentation": "

The PII entities in the assessment.

", + "smithy.api#required": {} + } + }, + "regexes": { + "target": "com.amazonaws.bedrockruntime#GuardrailRegexFilterList", + "traits": { + "smithy.api#documentation": "

The regex queries in the assessment.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The assessment for aPersonally Identifiable Information (PII) policy.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailStreamConfiguration": { + "type": "structure", + "members": { + "guardrailIdentifier": { + "target": "com.amazonaws.bedrockruntime#GuardrailIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier for the guardrail.

", + "smithy.api#required": {} + } + }, + "guardrailVersion": { + "target": "com.amazonaws.bedrockruntime#GuardrailVersion", + "traits": { + "smithy.api#documentation": "

The version of the guardrail.

", + "smithy.api#required": {} + } + }, + "trace": { + "target": "com.amazonaws.bedrockruntime#GuardrailTrace", + "traits": { + "smithy.api#default": "disabled", + "smithy.api#documentation": "

The trace behavior for the guardrail.

" + } + }, + "streamProcessingMode": { + "target": "com.amazonaws.bedrockruntime#GuardrailStreamProcessingMode", + "traits": { + "smithy.api#default": "sync", + "smithy.api#documentation": "

The processing mode.

\n

The processing mode. For more information, see Configure streaming response behavior in the Amazon Bedrock User Guide.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration information for a guardrail that you use with the ConverseStream action.\n

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailStreamProcessingMode": { + "type": "enum", + "members": { + "SYNC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "sync" + } + }, + "ASYNC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "async" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailTopic": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name for the guardrail.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.bedrockruntime#GuardrailTopicType", + "traits": { + "smithy.api#documentation": "

The type behavior that the guardrail should perform when the model detects the topic.

", + "smithy.api#required": {} + } + }, + "action": { + "target": "com.amazonaws.bedrockruntime#GuardrailTopicPolicyAction", + "traits": { + "smithy.api#documentation": "

The action the guardrail should take when it intervenes on a topic.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about a topic guardrail.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailTopicList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailTopic" + } + }, + "com.amazonaws.bedrockruntime#GuardrailTopicPolicyAction": { + "type": "enum", + "members": { + "BLOCKED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BLOCKED" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailTopicPolicyAssessment": { + "type": "structure", + "members": { + "topics": { + "target": "com.amazonaws.bedrockruntime#GuardrailTopicList", + "traits": { + "smithy.api#documentation": "

The topics in the assessment.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A behavior assessment of a topic policy.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailTopicType": { + "type": "enum", + "members": { + "DENY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DENY" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailTrace": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "enabled" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "disabled" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailTraceAssessment": { + "type": "structure", + "members": { + "modelOutput": { + "target": "com.amazonaws.bedrockruntime#ModelOutputs", + "traits": { + "smithy.api#documentation": "

The output from the model.

" + } + }, + "inputAssessment": { + "target": "com.amazonaws.bedrockruntime#GuardrailAssessmentMap", + "traits": { + "smithy.api#documentation": "

The input assessment.

" + } + }, + "outputAssessments": { + "target": "com.amazonaws.bedrockruntime#GuardrailAssessmentListMap", + "traits": { + "smithy.api#documentation": "

the output assessments.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A Top level guardrail trace object. For more information, see ConverseTrace.

" + } + }, + "com.amazonaws.bedrockruntime#GuardrailVersion": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(([1-9][0-9]{0,7})|(DRAFT))$" + } + }, + "com.amazonaws.bedrockruntime#GuardrailWordPolicyAction": { + "type": "enum", + "members": { + "BLOCKED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BLOCKED" + } + } + } + }, + "com.amazonaws.bedrockruntime#GuardrailWordPolicyAssessment": { + "type": "structure", + "members": { + "customWords": { + "target": "com.amazonaws.bedrockruntime#GuardrailCustomWordList", + "traits": { + "smithy.api#documentation": "

Custom words in the assessment.

", + "smithy.api#required": {} + } + }, + "managedWordLists": { + "target": "com.amazonaws.bedrockruntime#GuardrailManagedWordList", + "traits": { + "smithy.api#documentation": "

Managed word lists in the assessment.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The word policy assessment.

" + } + }, + "com.amazonaws.bedrockruntime#ImageBlock": { + "type": "structure", + "members": { + "format": { + "target": "com.amazonaws.bedrockruntime#ImageFormat", + "traits": { + "smithy.api#documentation": "

The format of the image.

", + "smithy.api#required": {} + } + }, + "source": { + "target": "com.amazonaws.bedrockruntime#ImageSource", + "traits": { + "smithy.api#documentation": "

The source for the image.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Image content for a message.

" + } + }, + "com.amazonaws.bedrockruntime#ImageFormat": { + "type": "enum", + "members": { + "PNG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "png" + } + }, + "JPEG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "jpeg" + } + }, + "GIF": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "gif" + } + }, + "WEBP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "webp" + } + } + } + }, + "com.amazonaws.bedrockruntime#ImageSource": { + "type": "union", + "members": { + "bytes": { + "target": "smithy.api#Blob", + "traits": { + "smithy.api#documentation": "

The raw image bytes for the image. If you use an AWS SDK, you don't need to base64 encode the image bytes.

", + "smithy.api#length": { + "min": 1 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

The source for an image.

" + } + }, + "com.amazonaws.bedrockruntime#InferenceConfiguration": { + "type": "structure", + "members": { + "maxTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of tokens to allow in the generated response. The default value is\n the maximum allowed value for the model that you are using. For more information, see\n Inference parameters for foundation models.

", + "smithy.api#range": { + "min": 1 + } + } + }, + "temperature": { + "target": "smithy.api#Float", + "traits": { + "smithy.api#documentation": "

The likelihood of the model selecting higher-probability options while generating a\n response. A lower value makes the model more likely to choose higher-probability options,\n while a higher value makes the model more likely to choose lower-probability\n options.

\n

The default value is the default value for the model that\n you are using. For more information, see Inference parameters for foundation\n models.

", + "smithy.api#range": { + "min": 0, + "max": 1 + } + } + }, + "topP": { + "target": "smithy.api#Float", + "traits": { + "smithy.api#documentation": "

The percentage of most-likely candidates that the model considers for the next token. For\n example, if you choose a value of 0.8 for topP, the model selects from the top 80% of the\n probability distribution of tokens that could be next in the sequence.

\n

The default value is the default value for the model that you are using. For more information, see\n Inference parameters for foundation models.

", + "smithy.api#range": { + "min": 0, + "max": 1 + } + } + }, + "stopSequences": { + "target": "com.amazonaws.bedrockruntime#NonEmptyStringList", + "traits": { + "smithy.api#documentation": "

A list of stop sequences. A stop sequence is a sequence of characters that causes the\n model to stop generating the response.

", + "smithy.api#length": { + "max": 4 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

Base inference parameters to pass to a model in a call to Converse or ConverseStream. For more information,\n see Inference parameters for foundation models.

\n

If you need to pass additional parameters that the model\n supports, use the additionalModelRequestFields request field in the call to Converse\n or ConverseStream.\n For more information, see Model parameters.

" + } + }, + "com.amazonaws.bedrockruntime#InferenceResource": { + "type": "resource", + "operations": [ + { + "target": "com.amazonaws.bedrockruntime#Converse" + }, + { + "target": "com.amazonaws.bedrockruntime#ConverseStream" + }, + { + "target": "com.amazonaws.bedrockruntime#InvokeModel" + }, + { + "target": "com.amazonaws.bedrockruntime#InvokeModelWithResponseStream" + } + ] + }, + "com.amazonaws.bedrockruntime#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.bedrockruntime#NonBlankString" + } + }, + "traits": { + "smithy.api#documentation": "

An internal server error occurred. Retry your request.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.bedrockruntime#InvokeModel": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockruntime#InvokeModelRequest" + }, + "output": { + "target": "com.amazonaws.bedrockruntime#InvokeModelResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockruntime#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockruntime#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockruntime#ModelErrorException" + }, + { + "target": "com.amazonaws.bedrockruntime#ModelNotReadyException" + }, + { + "target": "com.amazonaws.bedrockruntime#ModelTimeoutException" + }, + { + "target": "com.amazonaws.bedrockruntime#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockruntime#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockruntime#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockruntime#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Invokes the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. \n You use model inference to generate text, images, and embeddings.

\n

For example code, see Invoke model code examples in the Amazon Bedrock User Guide.\n

\n

This operation requires permission for the bedrock:InvokeModel action.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/model/{modelId}/invoke" + } + } + }, + "com.amazonaws.bedrockruntime#InvokeModelIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" + } + }, + "com.amazonaws.bedrockruntime#InvokeModelRequest": { + "type": "structure", + "members": { + "body": { + "target": "com.amazonaws.bedrockruntime#Body", + "traits": { + "smithy.api#documentation": "

The prompt and inference parameters in the format specified in the contentType in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide.

", + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + }, + "contentType": { + "target": "com.amazonaws.bedrockruntime#MimeType", + "traits": { + "smithy.api#documentation": "

The MIME type of the input data in the request. You must specify\n application/json.

", + "smithy.api#httpHeader": "Content-Type" + } + }, + "accept": { + "target": "com.amazonaws.bedrockruntime#MimeType", + "traits": { + "smithy.api#documentation": "

The desired MIME type of the inference body in the response. The default value is application/json.

", + "smithy.api#httpHeader": "Accept" + } + }, + "modelId": { + "target": "com.amazonaws.bedrockruntime#InvokeModelIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the model to invoke to run inference.

\n

The modelId to provide depends on the type of model that you use:

\n ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "trace": { + "target": "com.amazonaws.bedrockruntime#Trace", + "traits": { + "smithy.api#documentation": "

Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.

", + "smithy.api#httpHeader": "X-Amzn-Bedrock-Trace" + } + }, + "guardrailIdentifier": { + "target": "com.amazonaws.bedrockruntime#GuardrailIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied\n to the invocation.

\n

An error will be thrown in the following situations.

\n
    \n
  • \n

    You don't provide a guardrail identifier but you specify the amazon-bedrock-guardrailConfig field in the request body.

    \n
  • \n
  • \n

    You enable the guardrail but the contentType isn't application/json.

    \n
  • \n
  • \n

    You provide a guardrail identifier, but guardrailVersion isn't specified.

    \n
  • \n
", + "smithy.api#httpHeader": "X-Amzn-Bedrock-GuardrailIdentifier" + } + }, + "guardrailVersion": { + "target": "com.amazonaws.bedrockruntime#GuardrailVersion", + "traits": { + "smithy.api#documentation": "

The version number for the guardrail. The value can also be DRAFT.

", + "smithy.api#httpHeader": "X-Amzn-Bedrock-GuardrailVersion" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockruntime#InvokeModelResponse": { + "type": "structure", + "members": { + "body": { + "target": "com.amazonaws.bedrockruntime#Body", + "traits": { + "smithy.api#documentation": "

Inference response from the model in the format specified in the contentType header. To see the format and content of the request and response bodies for different models, refer to Inference parameters.

", + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + }, + "contentType": { + "target": "com.amazonaws.bedrockruntime#MimeType", + "traits": { + "smithy.api#documentation": "

The MIME type of the inference result.

", + "smithy.api#httpHeader": "Content-Type", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.bedrockruntime#InvokeModelWithResponseStream": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockruntime#InvokeModelWithResponseStreamRequest" + }, + "output": { + "target": "com.amazonaws.bedrockruntime#InvokeModelWithResponseStreamResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockruntime#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockruntime#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockruntime#ModelErrorException" + }, + { + "target": "com.amazonaws.bedrockruntime#ModelNotReadyException" + }, + { + "target": "com.amazonaws.bedrockruntime#ModelStreamErrorException" + }, + { + "target": "com.amazonaws.bedrockruntime#ModelTimeoutException" + }, + { + "target": "com.amazonaws.bedrockruntime#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockruntime#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.bedrockruntime#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockruntime#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream.

\n

To see if a model supports streaming, call GetFoundationModel\n and check the responseStreamingSupported field in the response.

\n \n

The CLI doesn't support InvokeModelWithResponseStream.

\n
\n

For example code, see Invoke model with streaming code\n example in the Amazon Bedrock User Guide.\n

\n

This operation requires permissions to perform the bedrock:InvokeModelWithResponseStream action.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/model/{modelId}/invoke-with-response-stream" + } + } + }, + "com.amazonaws.bedrockruntime#InvokeModelWithResponseStreamRequest": { + "type": "structure", + "members": { + "body": { + "target": "com.amazonaws.bedrockruntime#Body", + "traits": { + "smithy.api#documentation": "

The prompt and inference parameters in the format specified in the contentType in the header. You must provide the body in JSON format. To see the format and content of the request and response bodies for different models, refer to Inference parameters. For more information, see Run inference in the Bedrock User Guide.

", + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + }, + "contentType": { + "target": "com.amazonaws.bedrockruntime#MimeType", + "traits": { + "smithy.api#documentation": "

The MIME type of the input data in the request. You must specify\n application/json.

", + "smithy.api#httpHeader": "Content-Type" + } + }, + "accept": { + "target": "com.amazonaws.bedrockruntime#MimeType", + "traits": { + "smithy.api#documentation": "

The desired MIME type of the inference body in the response. The default value is\n application/json.

", + "smithy.api#httpHeader": "X-Amzn-Bedrock-Accept" + } + }, + "modelId": { + "target": "com.amazonaws.bedrockruntime#InvokeModelIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the model to invoke to run inference.

\n

The modelId to provide depends on the type of model that you use:

\n ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "trace": { "target": "com.amazonaws.bedrockruntime#Trace", "traits": { "smithy.api#documentation": "

Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace.

", @@ -1019,6 +2718,70 @@ "smithy.api#output": {} } }, + "com.amazonaws.bedrockruntime#Message": { + "type": "structure", + "members": { + "role": { + "target": "com.amazonaws.bedrockruntime#ConversationRole", + "traits": { + "smithy.api#documentation": "

The role that the message plays in the message.

", + "smithy.api#required": {} + } + }, + "content": { + "target": "com.amazonaws.bedrockruntime#ContentBlocks", + "traits": { + "smithy.api#documentation": "

The message content.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A message input, or returned from, a call to Converse or ConverseStream.

" + } + }, + "com.amazonaws.bedrockruntime#MessageStartEvent": { + "type": "structure", + "members": { + "role": { + "target": "com.amazonaws.bedrockruntime#ConversationRole", + "traits": { + "smithy.api#documentation": "

The role for the message.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The start of a message.

" + } + }, + "com.amazonaws.bedrockruntime#MessageStopEvent": { + "type": "structure", + "members": { + "stopReason": { + "target": "com.amazonaws.bedrockruntime#StopReason", + "traits": { + "smithy.api#documentation": "

The reason why the model stopped generating output.

", + "smithy.api#required": {} + } + }, + "additionalModelResponseFields": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

The additional model response fields.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The stop event for a message.

" + } + }, + "com.amazonaws.bedrockruntime#Messages": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#Message" + } + }, "com.amazonaws.bedrockruntime#MimeType": { "type": "string" }, @@ -1060,6 +2823,12 @@ "smithy.api#httpError": 429 } }, + "com.amazonaws.bedrockruntime#ModelOutputs": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#GuardrailOutputText" + } + }, "com.amazonaws.bedrockruntime#ModelStreamErrorException": { "type": "structure", "members": { @@ -1098,10 +2867,32 @@ "smithy.api#httpError": 408 } }, - "com.amazonaws.bedrockruntime#NonBlankString": { + "com.amazonaws.bedrockruntime#NonBlankString": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.bedrockruntime#NonEmptyString": { "type": "string", "traits": { - "smithy.api#pattern": "^[\\s\\S]*$" + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.bedrockruntime#NonEmptyStringList": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#NonEmptyString" + } + }, + "com.amazonaws.bedrockruntime#NonNegativeInteger": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0 + } } }, "com.amazonaws.bedrockruntime#PartBody": { @@ -1199,6 +2990,21 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.bedrockruntime#SpecificToolChoice": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockruntime#ToolName", + "traits": { + "smithy.api#documentation": "

The name of the tool that the model must request.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The model must request a specific tool. For example, {\"tool\" : {\"name\" : \"Your tool name\"}}.

\n \n

This field is only supported by Anthropic Claude 3 models.

\n
" + } + }, "com.amazonaws.bedrockruntime#StatusCode": { "type": "integer", "traits": { @@ -1208,6 +3014,73 @@ } } }, + "com.amazonaws.bedrockruntime#StopReason": { + "type": "enum", + "members": { + "END_TURN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "end_turn" + } + }, + "TOOL_USE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "tool_use" + } + }, + "MAX_TOKENS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "max_tokens" + } + }, + "STOP_SEQUENCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "stop_sequence" + } + }, + "GUARDRAIL_INTERVENED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "guardrail_intervened" + } + }, + "CONTENT_FILTERED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "content_filtered" + } + } + } + }, + "com.amazonaws.bedrockruntime#SystemContentBlock": { + "type": "union", + "members": { + "text": { + "target": "com.amazonaws.bedrockruntime#NonEmptyString", + "traits": { + "smithy.api#documentation": "

A system prompt for the model.

" + } + }, + "guardContent": { + "target": "com.amazonaws.bedrockruntime#GuardrailConverseContentBlock", + "traits": { + "smithy.api#documentation": "

A content block to assess with the guardrail. Use with the Converse API (Converse and ConverseStream).

\n

For more information, see Use a guardrail with the Converse\n API in the Amazon Bedrock User Guide.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A system content block.

" + } + }, + "com.amazonaws.bedrockruntime#SystemContentBlocks": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#SystemContentBlock" + } + }, "com.amazonaws.bedrockruntime#ThrottlingException": { "type": "structure", "members": { @@ -1221,6 +3094,325 @@ "smithy.api#httpError": 429 } }, + "com.amazonaws.bedrockruntime#TokenUsage": { + "type": "structure", + "members": { + "inputTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of tokens sent in the request to the model.

", + "smithy.api#range": { + "min": 0 + }, + "smithy.api#required": {} + } + }, + "outputTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of tokens that the model generated for the request.

", + "smithy.api#range": { + "min": 0 + }, + "smithy.api#required": {} + } + }, + "totalTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The total of input tokens and tokens generated by the model.

", + "smithy.api#range": { + "min": 0 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The tokens used in a message API inference call.

" + } + }, + "com.amazonaws.bedrockruntime#Tool": { + "type": "union", + "members": { + "toolSpec": { + "target": "com.amazonaws.bedrockruntime#ToolSpecification", + "traits": { + "smithy.api#documentation": "

The specfication for the tool.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about a tool that you can use with the Converse API.

" + } + }, + "com.amazonaws.bedrockruntime#ToolChoice": { + "type": "union", + "members": { + "auto": { + "target": "com.amazonaws.bedrockruntime#AutoToolChoice", + "traits": { + "smithy.api#documentation": "

(Default). The Model automatically decides if a tool should be called or whether to generate text instead.

" + } + }, + "any": { + "target": "com.amazonaws.bedrockruntime#AnyToolChoice", + "traits": { + "smithy.api#documentation": "

The model must request at least one tool (no text is generated).

" + } + }, + "tool": { + "target": "com.amazonaws.bedrockruntime#SpecificToolChoice", + "traits": { + "smithy.api#documentation": "

The Model must request the specified tool. Only supported by Anthropic Claude 3 models.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Determines which tools the model should request in a call to Converse or ConverseStream.\n ToolChoice is only supported by\n Anthropic Claude 3 models and by Mistral AI Mistral Large.

" + } + }, + "com.amazonaws.bedrockruntime#ToolConfiguration": { + "type": "structure", + "members": { + "tools": { + "target": "com.amazonaws.bedrockruntime#Tools", + "traits": { + "smithy.api#documentation": "

An array of tools that you want to pass to a model.

", + "smithy.api#length": { + "min": 1 + }, + "smithy.api#required": {} + } + }, + "toolChoice": { + "target": "com.amazonaws.bedrockruntime#ToolChoice", + "traits": { + "smithy.api#documentation": "

If supported by model, forces the model to request a tool.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration information for the tools that you pass to a model.

\n \n

This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.

\n
" + } + }, + "com.amazonaws.bedrockruntime#ToolInputSchema": { + "type": "union", + "members": { + "json": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

The JSON schema for the tool. For more information, see JSON Schema Reference.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The schema for the tool. The top level schema type must be object.

" + } + }, + "com.amazonaws.bedrockruntime#ToolName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[a-zA-Z][a-zA-Z0-9_]*$" + } + }, + "com.amazonaws.bedrockruntime#ToolResultBlock": { + "type": "structure", + "members": { + "toolUseId": { + "target": "com.amazonaws.bedrockruntime#ToolUseId", + "traits": { + "smithy.api#documentation": "

The ID of the tool request that this is the result for.

", + "smithy.api#required": {} + } + }, + "content": { + "target": "com.amazonaws.bedrockruntime#ToolResultContentBlocks", + "traits": { + "smithy.api#documentation": "

The content for tool result content block.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.bedrockruntime#ToolResultStatus", + "traits": { + "smithy.api#documentation": "

The status for the tool result content block.

\n \n

This field is only supported Anthropic Claude 3 models.

\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

A tool result block that contains the results for a tool request that\n the model previously made.

" + } + }, + "com.amazonaws.bedrockruntime#ToolResultContentBlock": { + "type": "union", + "members": { + "json": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

A tool result that is JSON format data.

" + } + }, + "text": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A tool result that is text.

" + } + }, + "image": { + "target": "com.amazonaws.bedrockruntime#ImageBlock", + "traits": { + "smithy.api#documentation": "

A tool result that is an image.

\n \n

This field is only supported by Anthropic Claude 3 models.

\n
" + } + }, + "document": { + "target": "com.amazonaws.bedrockruntime#DocumentBlock", + "traits": { + "smithy.api#documentation": "

A tool result that is a document.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The tool result content block.

" + } + }, + "com.amazonaws.bedrockruntime#ToolResultContentBlocks": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#ToolResultContentBlock" + } + }, + "com.amazonaws.bedrockruntime#ToolResultStatus": { + "type": "enum", + "members": { + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "success" + } + }, + "ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "error" + } + } + } + }, + "com.amazonaws.bedrockruntime#ToolSpecification": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.bedrockruntime#ToolName", + "traits": { + "smithy.api#documentation": "

The name for the tool.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.bedrockruntime#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The description for the tool.

" + } + }, + "inputSchema": { + "target": "com.amazonaws.bedrockruntime#ToolInputSchema", + "traits": { + "smithy.api#documentation": "

The input schema for the tool in JSON format.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The specification for the tool.

" + } + }, + "com.amazonaws.bedrockruntime#ToolUseBlock": { + "type": "structure", + "members": { + "toolUseId": { + "target": "com.amazonaws.bedrockruntime#ToolUseId", + "traits": { + "smithy.api#documentation": "

The ID for the tool request.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.bedrockruntime#ToolName", + "traits": { + "smithy.api#documentation": "

The name of the tool that the model wants to use.

", + "smithy.api#required": {} + } + }, + "input": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

The input to pass to the tool.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A tool use content block. Contains information about a tool that the model\n is requesting be run., The model uses the result from the tool to generate a response.

" + } + }, + "com.amazonaws.bedrockruntime#ToolUseBlockDelta": { + "type": "structure", + "members": { + "input": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The input for a requested tool.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The delta for a tool use block.

" + } + }, + "com.amazonaws.bedrockruntime#ToolUseBlockStart": { + "type": "structure", + "members": { + "toolUseId": { + "target": "com.amazonaws.bedrockruntime#ToolUseId", + "traits": { + "smithy.api#documentation": "

The ID for the tool request.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.bedrockruntime#ToolName", + "traits": { + "smithy.api#documentation": "

The name of the tool that the model is requesting to use.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The start of a tool use block.

" + } + }, + "com.amazonaws.bedrockruntime#ToolUseId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[a-zA-Z0-9_-]+$" + } + }, + "com.amazonaws.bedrockruntime#Tools": { + "type": "list", + "member": { + "target": "com.amazonaws.bedrockruntime#Tool" + } + }, "com.amazonaws.bedrockruntime#Trace": { "type": "enum", "members": { diff --git a/models/budgets.json b/models/budgets.json index 90f4a70225..00dc803101 100644 --- a/models/budgets.json +++ b/models/budgets.json @@ -90,6 +90,15 @@ { "target": "com.amazonaws.budgets#ExecuteBudgetAction" }, + { + "target": "com.amazonaws.budgets#ListTagsForResource" + }, + { + "target": "com.amazonaws.budgets#TagResource" + }, + { + "target": "com.amazonaws.budgets#UntagResource" + }, { "target": "com.amazonaws.budgets#UpdateBudget" }, @@ -159,7 +168,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -202,7 +210,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -215,7 +224,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -229,7 +237,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -354,7 +361,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -389,7 +395,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -400,14 +405,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -421,14 +428,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -437,11 +442,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -452,14 +457,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -473,7 +480,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -493,7 +499,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -504,14 +509,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -522,9 +529,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -960,7 +969,7 @@ "ActionType": { "target": "com.amazonaws.budgets#ActionType", "traits": { - "smithy.api#documentation": "

The type of action. This defines the type of tasks that can be carried out by this action.\n\t\t\tThis field also determines the format for definition.

", + "smithy.api#documentation": "

The type of action. This defines the type of tasks that can be carried out by this\n\t\t\taction. This field also determines the format for definition.

", "smithy.api#required": {} } }, @@ -981,7 +990,7 @@ "ExecutionRoleArn": { "target": "com.amazonaws.budgets#RoleArn", "traits": { - "smithy.api#documentation": "

The role passed for action execution and reversion. Roles and actions must be in the same\n\t\t\taccount.

", + "smithy.api#documentation": "

The role passed for action execution and reversion. Roles and actions must be in the\n\t\t\tsame account.

", "smithy.api#required": {} } }, @@ -1041,7 +1050,7 @@ "EventType": { "target": "com.amazonaws.budgets#EventType", "traits": { - "smithy.api#documentation": "

This distinguishes between whether the events are triggered by the user or are generated by\n\t\t\tthe system.

", + "smithy.api#documentation": "

This distinguishes between whether the events are triggered by the user or are\n\t\t\tgenerated by the system.

", "smithy.api#required": {} } }, @@ -1235,6 +1244,15 @@ } } }, + "com.amazonaws.budgets#AmazonResourceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1011 + } + } + }, "com.amazonaws.budgets#ApprovalModel": { "type": "enum", "members": { @@ -1258,14 +1276,14 @@ "AutoAdjustType": { "target": "com.amazonaws.budgets#AutoAdjustType", "traits": { - "smithy.api#documentation": "

The string that defines whether your budget auto-adjusts based on historical or forecasted data.

", + "smithy.api#documentation": "

The string that defines whether your budget auto-adjusts based on historical or\n\t\t\tforecasted data.

", "smithy.api#required": {} } }, "HistoricalOptions": { "target": "com.amazonaws.budgets#HistoricalOptions", "traits": { - "smithy.api#documentation": "

The parameters that define or describe the historical data that your auto-adjusting budget is based on.

" + "smithy.api#documentation": "

The parameters that define or describe the historical data that your auto-adjusting\n\t\t\tbudget is based on.

" } }, "LastAutoAdjustTime": { @@ -1302,32 +1320,32 @@ "BudgetName": { "target": "com.amazonaws.budgets#BudgetName", "traits": { - "smithy.api#documentation": "

The name of a budget. The name must be unique within an account. The : and\n\t\t\t\t\\ characters, and the \"/action/\" substring, aren't allowed in\n\t\t\t\tBudgetName.

", + "smithy.api#documentation": "

The name of a budget. The name must be unique within an account. The :\n\t\t\tand \\ characters, and the \"/action/\" substring, aren't allowed in\n\t\t\t\tBudgetName.

", "smithy.api#required": {} } }, "BudgetLimit": { "target": "com.amazonaws.budgets#Spend", "traits": { - "smithy.api#documentation": "

The total amount of cost, usage, RI utilization, RI coverage, Savings Plans utilization, or\n\t\t\tSavings Plans coverage that you want to track with your budget.

\n

\n BudgetLimit is required for cost or usage budgets, but optional for RI or\n\t\t\tSavings Plans utilization or coverage budgets. RI and Savings Plans utilization or\n\t\t\tcoverage budgets default to 100. This is the only valid value for RI or\n\t\t\tSavings Plans utilization or coverage budgets. You can't use BudgetLimit\n\t\t\twith PlannedBudgetLimits for CreateBudget and\n\t\t\t\tUpdateBudget actions.

" + "smithy.api#documentation": "

The total amount of cost, usage, RI utilization, RI coverage, Savings Plans\n\t\t\tutilization, or Savings Plans coverage that you want to track with your budget.

\n

\n BudgetLimit is required for cost or usage budgets, but optional for RI or\n\t\t\tSavings Plans utilization or coverage budgets. RI and Savings Plans utilization or\n\t\t\tcoverage budgets default to 100. This is the only valid value for RI or\n\t\t\tSavings Plans utilization or coverage budgets. You can't use BudgetLimit\n\t\t\twith PlannedBudgetLimits for CreateBudget and\n\t\t\t\tUpdateBudget actions.

" } }, "PlannedBudgetLimits": { "target": "com.amazonaws.budgets#PlannedBudgetLimits", "traits": { - "smithy.api#documentation": "

A map containing multiple BudgetLimit, including current or future limits.

\n

\n PlannedBudgetLimits is available for cost or usage budget and supports both\n\t\t\tmonthly and quarterly TimeUnit.

\n

For monthly budgets, provide 12 months of PlannedBudgetLimits values. This must start from the current month and include the next 11 months. The key is the start of the month, UTC in epoch seconds.

\n

For quarterly budgets, provide four quarters of PlannedBudgetLimits value\n\t\t\tentries in standard calendar quarter increments. This must start from the current\n\t\t\tquarter and include the next three quarters. The key is the start of the\n\t\t\tquarter, UTC in epoch seconds.

\n

If the planned budget expires before 12 months for monthly or four quarters for quarterly,\n\t\t\tprovide the PlannedBudgetLimits values only for the remaining\n\t\t\tperiods.

\n

If the budget begins at a date in the future, provide PlannedBudgetLimits values from the start date of the budget.

\n

After all of the BudgetLimit values in PlannedBudgetLimits are used, the budget continues to use the last limit as the BudgetLimit. At that point, the planned budget provides the same experience as a fixed budget.

\n

\n DescribeBudget and DescribeBudgets response along with\n\t\t\t\tPlannedBudgetLimits also contain BudgetLimit representing\n\t\t\tthe current month or quarter limit present in PlannedBudgetLimits. This\n\t\t\tonly applies to budgets that are created with PlannedBudgetLimits. Budgets\n\t\t\tthat are created without PlannedBudgetLimits only contain\n\t\t\t\tBudgetLimit. They don't contain\n\t\t\tPlannedBudgetLimits.

" + "smithy.api#documentation": "

A map containing multiple BudgetLimit, including current or future\n\t\t\tlimits.

\n

\n PlannedBudgetLimits is available for cost or usage budget and supports\n\t\t\tboth monthly and quarterly TimeUnit.

\n

For monthly budgets, provide 12 months of PlannedBudgetLimits values.\n\t\t\tThis must start from the current month and include the next 11 months. The\n\t\t\t\tkey is the start of the month, UTC in epoch seconds.

\n

For quarterly budgets, provide four quarters of PlannedBudgetLimits value\n\t\t\tentries in standard calendar quarter increments. This must start from the current\n\t\t\tquarter and include the next three quarters. The key is the start of the\n\t\t\tquarter, UTC in epoch seconds.

\n

If the planned budget expires before 12 months for monthly or four quarters for\n\t\t\tquarterly, provide the PlannedBudgetLimits values only for the remaining\n\t\t\tperiods.

\n

If the budget begins at a date in the future, provide PlannedBudgetLimits\n\t\t\tvalues from the start date of the budget.

\n

After all of the BudgetLimit values in PlannedBudgetLimits\n\t\t\tare used, the budget continues to use the last limit as the BudgetLimit. At\n\t\t\tthat point, the planned budget provides the same experience as a fixed budget.

\n

\n DescribeBudget and DescribeBudgets response along with\n\t\t\t\tPlannedBudgetLimits also contain BudgetLimit representing\n\t\t\tthe current month or quarter limit present in PlannedBudgetLimits. This\n\t\t\tonly applies to budgets that are created with PlannedBudgetLimits. Budgets\n\t\t\tthat are created without PlannedBudgetLimits only contain\n\t\t\t\tBudgetLimit. They don't contain\n\t\t\tPlannedBudgetLimits.

" } }, "CostFilters": { "target": "com.amazonaws.budgets#CostFilters", "traits": { - "smithy.api#documentation": "

The cost filters, such as Region, Service, member account, Tag, or Cost Category, that are applied to a budget.

\n

Amazon Web Services Budgets supports the following services as a Service filter for RI budgets:

\n
    \n
  • \n

    Amazon EC2

    \n
  • \n
  • \n

    Amazon Redshift

    \n
  • \n
  • \n

    Amazon Relational Database Service

    \n
  • \n
  • \n

    Amazon ElastiCache

    \n
  • \n
  • \n

    Amazon OpenSearch Service

    \n
  • \n
" + "smithy.api#documentation": "

The cost filters, such as Region, Service,\n\t\t\t\tLinkedAccount, Tag, or CostCategory, that are\n\t\t\tapplied to a budget.

\n

Amazon Web Services Budgets supports the following services as a Service filter for RI budgets:

\n
    \n
  • \n

    Amazon EC2

    \n
  • \n
  • \n

    Amazon Redshift

    \n
  • \n
  • \n

    Amazon Relational Database Service

    \n
  • \n
  • \n

    Amazon ElastiCache

    \n
  • \n
  • \n

    Amazon OpenSearch Service

    \n
  • \n
" } }, "CostTypes": { "target": "com.amazonaws.budgets#CostTypes", "traits": { - "smithy.api#documentation": "

The types of costs that are included in this COST budget.

\n

\n USAGE, RI_UTILIZATION, RI_COVERAGE, SAVINGS_PLANS_UTILIZATION, and SAVINGS_PLANS_COVERAGE budgets do not have CostTypes.

" + "smithy.api#documentation": "

The types of costs that are included in this COST budget.

\n

\n USAGE, RI_UTILIZATION, RI_COVERAGE,\n\t\t\t\tSAVINGS_PLANS_UTILIZATION, and SAVINGS_PLANS_COVERAGE\n\t\t\tbudgets do not have CostTypes.

" } }, "TimeUnit": { @@ -1340,7 +1358,7 @@ "TimePeriod": { "target": "com.amazonaws.budgets#TimePeriod", "traits": { - "smithy.api#documentation": "

The period of time that's covered by a budget. You setthe start date and end date. The start\n\t\t\tdate must come before the end date. The end date must come before 06/15/87 00:00\n\t\t\t\tUTC.

\n

If you create your budget and don't specify a start date, Amazon Web Services defaults to the\n\t\t\tstart of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For example,\n\t\t\tif you created your budget on January 24, 2018, chose DAILY, and didn't set\n\t\t\ta start date, Amazon Web Services set your start date to 01/24/18 00:00 UTC.\n\t\t\tIf you chose MONTHLY, Amazon Web Services set your start date to\n\t\t\t\t01/01/18 00:00 UTC. If you didn't specify an end date, Amazon Web Services set your end date to 06/15/87 00:00 UTC. The defaults are the same for\n\t\t\tthe Billing and Cost Management console and the API.

\n

You can change either date with the UpdateBudget operation.

\n

After the end date, Amazon Web Services deletes the budget and all the associated\n\t\t\tnotifications and subscribers.

" + "smithy.api#documentation": "

The period of time that's covered by a budget. You setthe start date and end date. The\n\t\t\tstart date must come before the end date. The end date must come before 06/15/87\n\t\t\t\t00:00 UTC.

\n

If you create your budget and don't specify a start date, Amazon Web Services defaults\n\t\t\tto the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For\n\t\t\texample, if you created your budget on January 24, 2018, chose DAILY, and\n\t\t\tdidn't set a start date, Amazon Web Services set your start date to 01/24/18 00:00\n\t\t\t\tUTC. If you chose MONTHLY, Amazon Web Services set your start\n\t\t\tdate to 01/01/18 00:00 UTC. If you didn't specify an end date, Amazon Web Services set your end date to 06/15/87 00:00 UTC. The defaults are\n\t\t\tthe same for the Billing and Cost Management console and the API.

\n

You can change either date with the UpdateBudget operation.

\n

After the end date, Amazon Web Services deletes the budget and all the associated\n\t\t\tnotifications and subscribers.

" } }, "CalculatedSpend": { @@ -1352,7 +1370,7 @@ "BudgetType": { "target": "com.amazonaws.budgets#BudgetType", "traits": { - "smithy.api#documentation": "

Specifies whether this budget tracks costs, usage, RI utilization, RI coverage, Savings\n\t\t\tPlans utilization, or Savings Plans coverage.

", + "smithy.api#documentation": "

Specifies whether this budget tracks costs, usage, RI utilization, RI coverage,\n\t\t\tSavings Plans utilization, or Savings Plans coverage.

", "smithy.api#required": {} } }, @@ -1370,18 +1388,18 @@ } }, "traits": { - "smithy.api#documentation": "

Represents the output of the CreateBudget operation. The content consists of the detailed metadata and data file information, and the current status of the budget object.

\n

This is the Amazon Resource Name (ARN) pattern for a budget:

\n

\n arn:aws:budgets::AccountId:budget/budgetName\n

" + "smithy.api#documentation": "

Represents the output of the CreateBudget operation. The content consists\n\t\t\tof the detailed metadata and data file information, and the current status of the\n\t\t\t\tbudget object.

\n

This is the Amazon Resource Name (ARN) pattern for a budget:

\n

\n arn:aws:budgets::AccountId:budget/budgetName\n

" } }, "com.amazonaws.budgets#BudgetName": { "type": "string", "traits": { - "smithy.api#documentation": "

A string that represents the budget name. The \":\" and \"\\\" characters, and the \"/action/\"\n\t\t\tsubstring, aren't allowed.

", + "smithy.api#documentation": "

A string that represents the budget name. The \":\" and \"\\\" characters, and the\n\t\t\t\"/action/\" substring, aren't allowed.

", "smithy.api#length": { "min": 1, "max": 100 }, - "smithy.api#pattern": "^(?![^:\\\\]*/action/)[^:\\\\]+$" + "smithy.api#pattern": "^(?![^:\\\\]*/action/|(?i).*.*)[^:\\\\]+$" } }, "com.amazonaws.budgets#BudgetNotificationsForAccount": { @@ -1437,12 +1455,12 @@ "BudgetedAndActualAmountsList": { "target": "com.amazonaws.budgets#BudgetedAndActualAmountsList", "traits": { - "smithy.api#documentation": "

A list of amounts of cost or usage that you created budgets for, which are compared to your\n\t\t\tactual costs or usage.

" + "smithy.api#documentation": "

A list of amounts of cost or usage that you created budgets for, which are compared to\n\t\t\tyour actual costs or usage.

" } } }, "traits": { - "smithy.api#documentation": "

A history of the state of a budget at the end of the budget's specified time period.

" + "smithy.api#documentation": "

A history of the state of a budget at the end of the budget's specified time\n\t\t\tperiod.

" } }, "com.amazonaws.budgets#BudgetType": { @@ -1486,7 +1504,7 @@ } }, "traits": { - "smithy.api#documentation": "

The type of a budget. It must be one of the following types:

\n

\n COST, USAGE, RI_UTILIZATION, RI_COVERAGE, SAVINGS_PLANS_UTILIZATION, or SAVINGS_PLANS_COVERAGE.

" + "smithy.api#documentation": "

The type of a budget. It must be one of the following types:

\n

\n COST, USAGE, RI_UTILIZATION,\n\t\t\t\tRI_COVERAGE, SAVINGS_PLANS_UTILIZATION, or\n\t\t\t\tSAVINGS_PLANS_COVERAGE.

" } }, "com.amazonaws.budgets#BudgetedAndActualAmounts": { @@ -1512,7 +1530,7 @@ } }, "traits": { - "smithy.api#documentation": "

The amount of cost or usage that you created the budget for, compared to your actual costs or usage.

" + "smithy.api#documentation": "

The amount of cost or usage that you created the budget for, compared to your actual\n\t\t\tcosts or usage.

" } }, "com.amazonaws.budgets#BudgetedAndActualAmountsList": { @@ -1548,7 +1566,7 @@ } }, "traits": { - "smithy.api#documentation": "

The spend objects that are associated with this budget. The actualSpend tracks\n\t\t\thow much you've used, cost, usage, RI units, or Savings Plans units and the\n\t\t\t\tforecastedSpend tracks how much that you're predicted to spend based on\n\t\t\tyour historical usage profile.

\n

For example, if it's the 20th of the month and you have spent 50 dollars on\n\t\t\tAmazon EC2, your actualSpend is 50 USD, and your\n\t\t\t\tforecastedSpend is 75 USD.

" + "smithy.api#documentation": "

The spend objects that are associated with this budget. The actualSpend\n\t\t\ttracks how much you've used, cost, usage, RI units, or Savings Plans units and the\n\t\t\t\tforecastedSpend tracks how much that you're predicted to spend based on\n\t\t\tyour historical usage profile.

\n

For example, if it's the 20th of the month and you have spent 50 dollars\n\t\t\ton Amazon EC2, your actualSpend is 50 USD, and your\n\t\t\t\tforecastedSpend is 75 USD.

" } }, "com.amazonaws.budgets#ComparisonOperator": { @@ -1574,7 +1592,7 @@ } }, "traits": { - "smithy.api#documentation": "

The comparison operator of a notification. Currently, the service supports the following\n\t\t\toperators:

\n

\n GREATER_THAN, LESS_THAN, EQUAL_TO\n

" + "smithy.api#documentation": "

The comparison operator of a notification. Currently, the service supports the\n\t\t\tfollowing operators:

\n

\n GREATER_THAN, LESS_THAN, EQUAL_TO\n

" } }, "com.amazonaws.budgets#CostFilters": { @@ -1660,7 +1678,7 @@ } }, "traits": { - "smithy.api#documentation": "

The types of cost that are included in a COST budget, such as tax and subscriptions.

\n

\n USAGE, RI_UTILIZATION, RI_COVERAGE,\n\t\t\t\tSAVINGS_PLANS_UTILIZATION, and SAVINGS_PLANS_COVERAGE\n\t\t\tbudgets don't have CostTypes.

" + "smithy.api#documentation": "

The types of cost that are included in a COST budget, such as tax and\n\t\t\tsubscriptions.

\n

\n USAGE, RI_UTILIZATION, RI_COVERAGE,\n\t\t\t\tSAVINGS_PLANS_UTILIZATION, and SAVINGS_PLANS_COVERAGE\n\t\t\tbudgets don't have CostTypes.

" } }, "com.amazonaws.budgets#CreateBudget": { @@ -1687,6 +1705,9 @@ { "target": "com.amazonaws.budgets#InvalidParameterException" }, + { + "target": "com.amazonaws.budgets#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.budgets#ThrottlingException" } @@ -1722,6 +1743,9 @@ { "target": "com.amazonaws.budgets#NotFoundException" }, + { + "target": "com.amazonaws.budgets#ServiceQuotaExceededException" + }, { "target": "com.amazonaws.budgets#ThrottlingException" } @@ -1789,6 +1813,12 @@ "traits": { "smithy.api#required": {} } + }, + "ResourceTags": { + "target": "com.amazonaws.budgets#ResourceTagList", + "traits": { + "smithy.api#documentation": "

An optional list of tags to associate with the specified budget action. Each tag consists of a\n key and a value, and each key must be unique for the resource.

" + } } }, "traits": { @@ -1844,6 +1874,12 @@ "traits": { "smithy.api#documentation": "

A notification that you want to associate with a budget. A budget can have up to five notifications, and each notification can have one SNS subscriber and up to 10 email subscribers. If you include notifications and subscribers in your CreateBudget call, Amazon Web Services creates the notifications and subscribers for you.

" } + }, + "ResourceTags": { + "target": "com.amazonaws.budgets#ResourceTagList", + "traits": { + "smithy.api#documentation": "

An optional list of tags to associate with the specified budget. Each tag consists of a\n key and a value, and each key must be unique for the resource.

" + } } }, "traits": { @@ -2735,7 +2771,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n\t\t\tLists the budget names and notifications that are associated with an account.\n\t\t

", + "smithy.api#documentation": "

Lists the budget names and notifications that are associated with an account.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -2756,7 +2792,7 @@ "MaxResults": { "target": "com.amazonaws.budgets#MaxResultsBudgetNotifications", "traits": { - "smithy.api#documentation": "

An integer that represents how many budgets a paginated response contains. The default is\n\t\t\t50.

" + "smithy.api#documentation": "

An integer that represents how many budgets a paginated response contains. The\n\t\t\tdefault is 50.

" } }, "NextToken": { @@ -2773,7 +2809,7 @@ "BudgetNotificationsForAccount": { "target": "com.amazonaws.budgets#BudgetNotificationsForAccountList", "traits": { - "smithy.api#documentation": "

\n\t\t\tA list of budget names and associated notifications for an account.\n\t\t

" + "smithy.api#documentation": "

A list of budget names and associated notifications for an account.

" } }, "NextToken": { @@ -3453,19 +3489,19 @@ "BudgetAdjustmentPeriod": { "target": "com.amazonaws.budgets#AdjustmentPeriod", "traits": { - "smithy.api#documentation": "

The number of budget periods included in the moving-average calculation that determines your auto-adjusted budget amount. The maximum value depends on the TimeUnit granularity of the budget:

\n
    \n
  • \n

    For the DAILY granularity, the maximum value is 60.

    \n
  • \n
  • \n

    For the MONTHLY granularity, the maximum value is 12.

    \n
  • \n
  • \n

    For the QUARTERLY granularity, the maximum value is 4.

    \n
  • \n
  • \n

    For the ANNUALLY granularity, the maximum value is 1.

    \n
  • \n
", + "smithy.api#documentation": "

The number of budget periods included in the moving-average calculation that\n\t\t\tdetermines your auto-adjusted budget amount. The maximum value depends on the\n\t\t\t\tTimeUnit granularity of the budget:

\n
    \n
  • \n

    For the DAILY granularity, the maximum value is\n\t\t\t\t\t60.

    \n
  • \n
  • \n

    For the MONTHLY granularity, the maximum value is\n\t\t\t\t\t12.

    \n
  • \n
  • \n

    For the QUARTERLY granularity, the maximum value is\n\t\t\t\t\t\t4.

    \n
  • \n
  • \n

    For the ANNUALLY granularity, the maximum value is\n\t\t\t\t\t1.

    \n
  • \n
", "smithy.api#required": {} } }, "LookBackAvailablePeriods": { "target": "com.amazonaws.budgets#AdjustmentPeriod", "traits": { - "smithy.api#documentation": "

The integer that describes how many budget periods in your BudgetAdjustmentPeriod are included in the calculation of your current BudgetLimit. If the first budget period in your BudgetAdjustmentPeriod has no cost data, then that budget period isn’t included in the average that determines your budget limit.

\n

For example, if you set BudgetAdjustmentPeriod as 4 quarters, but your account had no cost data in the first quarter, then only the last three quarters are included in the calculation. In this scenario, LookBackAvailablePeriods returns 3.

\n

You can’t set your own LookBackAvailablePeriods. The value is automatically calculated from the BudgetAdjustmentPeriod and your historical cost data.

" + "smithy.api#documentation": "

The integer that describes how many budget periods in your\n\t\t\t\tBudgetAdjustmentPeriod are included in the calculation of your current\n\t\t\t\tBudgetLimit. If the first budget period in your\n\t\t\t\tBudgetAdjustmentPeriod has no cost data, then that budget period isn’t\n\t\t\tincluded in the average that determines your budget limit.

\n

For example, if you set BudgetAdjustmentPeriod as 4\n\t\t\tquarters, but your account had no cost data in the first quarter, then only the last\n\t\t\tthree quarters are included in the calculation. In this scenario,\n\t\t\t\tLookBackAvailablePeriods returns 3.

\n

You can’t set your own LookBackAvailablePeriods. The value is\n\t\t\tautomatically calculated from the BudgetAdjustmentPeriod and your\n\t\t\thistorical cost data.

" } } }, "traits": { - "smithy.api#documentation": "

The parameters that define or describe the historical data that your auto-adjusting budget is based on.

" + "smithy.api#documentation": "

The parameters that define or describe the historical data that your auto-adjusting\n\t\t\tbudget is based on.

" } }, "com.amazonaws.budgets#IamActionDefinition": { @@ -3562,10 +3598,68 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.budgets#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.budgets#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.budgets#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.budgets#AccessDeniedException" + }, + { + "target": "com.amazonaws.budgets#InternalErrorException" + }, + { + "target": "com.amazonaws.budgets#InvalidParameterException" + }, + { + "target": "com.amazonaws.budgets#NotFoundException" + }, + { + "target": "com.amazonaws.budgets#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists tags associated with a budget or budget action resource.

" + } + }, + "com.amazonaws.budgets#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.budgets#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The unique identifier for the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.budgets#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "ResourceTags": { + "target": "com.amazonaws.budgets#ResourceTagList", + "traits": { + "smithy.api#documentation": "

The tags associated with the resource.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.budgets#MaxResults": { "type": "integer", "traits": { - "smithy.api#documentation": "

An integer that represents how many entries a paginated response contains. The maximum is 100.

", + "smithy.api#documentation": "

An integer that represents how many entries a paginated response contains. The\n\t\t\tmaximum is 100.

", "smithy.api#range": { "min": 1, "max": 100 @@ -3609,7 +3703,7 @@ "NotificationType": { "target": "com.amazonaws.budgets#NotificationType", "traits": { - "smithy.api#documentation": "

Specifies whether the notification is for how much you have spent (ACTUAL) or\n\t\t\tfor how much that you're forecasted to spend (FORECASTED).

", + "smithy.api#documentation": "

Specifies whether the notification is for how much you have spent\n\t\t\t(ACTUAL) or for how much that you're forecasted to spend\n\t\t\t\t(FORECASTED).

", "smithy.api#required": {} } }, @@ -3624,14 +3718,14 @@ "target": "com.amazonaws.budgets#NotificationThreshold", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The threshold that's associated with a notification. Thresholds are always a percentage, and\n\t\t\tmany customers find value being alerted between 50% - 200% of the budgeted amount. The\n\t\t\tmaximum limit for your threshold is 1,000,000% above the budgeted amount.

", + "smithy.api#documentation": "

The threshold that's associated with a notification. Thresholds are always a\n\t\t\tpercentage, and many customers find value being alerted between 50% - 200% of the\n\t\t\tbudgeted amount. The maximum limit for your threshold is 1,000,000% above the budgeted\n\t\t\tamount.

", "smithy.api#required": {} } }, "ThresholdType": { "target": "com.amazonaws.budgets#ThresholdType", "traits": { - "smithy.api#documentation": "

The type of threshold for a notification. For ABSOLUTE_VALUE thresholds, Amazon Web Services notifies you when you go over or are forecasted to go over your total cost threshold. For PERCENTAGE thresholds, Amazon Web Services notifies you when you go over or are forecasted to go over a certain percentage of your forecasted spend. For example, if you have a budget for 200 dollars and you have a PERCENTAGE threshold of 80%, Amazon Web Services notifies you when you go over 160 dollars.

" + "smithy.api#documentation": "

The type of threshold for a notification. For ABSOLUTE_VALUE thresholds,\n\t\t\t\tAmazon Web Services notifies you when you go over or are forecasted to go over your\n\t\t\ttotal cost threshold. For\n\t\t\t\tPERCENTAGE thresholds, Amazon Web Services notifies you when you go over\n\t\t\tor are forecasted to go over a certain percentage of your forecasted spend. For example,\n\t\t\tif you have a budget for 200 dollars and you have a PERCENTAGE threshold of\n\t\t\t80%, Amazon Web Services notifies you when you go over 160 dollars.

" } }, "NotificationState": { @@ -3642,7 +3736,7 @@ } }, "traits": { - "smithy.api#documentation": "

A notification that's associated with a budget. A budget can have up to ten notifications.

\n

Each notification must have at least one subscriber. A notification can have one SNS subscriber and up to 10 email subscribers, for a total of 11 subscribers.

\n

For example, if you have a budget for 200 dollars and you want to be notified when you go over 160 dollars, create a notification with the following parameters:

\n
    \n
  • \n

    A notificationType of ACTUAL\n

    \n
  • \n
  • \n

    A thresholdType of PERCENTAGE\n

    \n
  • \n
  • \n

    A comparisonOperator of GREATER_THAN\n

    \n
  • \n
  • \n

    A notification threshold of 80\n

    \n
  • \n
" + "smithy.api#documentation": "

A notification that's associated with a budget. A budget can have up to ten\n\t\t\tnotifications.

\n

Each notification must have at least one subscriber. A notification can have one SNS\n\t\t\tsubscriber and up to 10 email subscribers, for a total of 11 subscribers.

\n

For example, if you have a budget for 200 dollars and you want to be notified when you\n\t\t\tgo over 160 dollars, create a notification with the following parameters:

\n
    \n
  • \n

    A notificationType of ACTUAL\n

    \n
  • \n
  • \n

    A thresholdType of PERCENTAGE\n

    \n
  • \n
  • \n

    A comparisonOperator of GREATER_THAN\n

    \n
  • \n
  • \n

    A notification threshold of 80\n

    \n
  • \n
" } }, "com.amazonaws.budgets#NotificationState": { @@ -3712,7 +3806,7 @@ } }, "traits": { - "smithy.api#documentation": "

A notification with subscribers. A notification can have one SNS subscriber and up to 10 email subscribers, for a total of 11 subscribers.

" + "smithy.api#documentation": "

A notification with subscribers. A notification can have one SNS subscriber and up to\n\t\t\t10 email subscribers, for a total of 11 subscribers.

" } }, "com.amazonaws.budgets#NotificationWithSubscribersList": { @@ -3798,11 +3892,75 @@ } }, "traits": { - "smithy.api#documentation": "

The request was received and recognized by the server, but the server rejected that\n particular method for the requested resource.

", + "smithy.api#documentation": "

The request was received and recognized by the server, but the server rejected that\n particular method for the requested resource.

", "smithy.api#error": "client", "smithy.api#httpError": 423 } }, + "com.amazonaws.budgets#ResourceTag": { + "type": "structure", + "members": { + "Key": { + "target": "com.amazonaws.budgets#ResourceTagKey", + "traits": { + "smithy.api#documentation": "

The key that's associated with the tag.

", + "smithy.api#required": {} + } + }, + "Value": { + "target": "com.amazonaws.budgets#ResourceTagValue", + "traits": { + "smithy.api#documentation": "

The value that's associated with the tag.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The tag structure that contains a tag key and value.

" + } + }, + "com.amazonaws.budgets#ResourceTagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.budgets#ResourceTagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.budgets#ResourceTagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.budgets#ResourceTagList": { + "type": "list", + "member": { + "target": "com.amazonaws.budgets#ResourceTag" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.budgets#ResourceTagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, "com.amazonaws.budgets#Role": { "type": "string", "traits": { @@ -3857,13 +4015,26 @@ "smithy.api#documentation": "

The service control policies (SCP) action definition details.

" } }, + "com.amazonaws.budgets#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.budgets#errorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

You've reached the limit on the number of tags you can associate with a resource.

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, "com.amazonaws.budgets#Spend": { "type": "structure", "members": { "Amount": { "target": "com.amazonaws.budgets#NumericValue", "traits": { - "smithy.api#documentation": "

The cost or usage amount that's associated with a budget forecast, actual spend, or budget\n\t\t\tthreshold.

", + "smithy.api#documentation": "

The cost or usage amount that's associated with a budget forecast, actual spend, or\n\t\t\tbudget threshold.

", "smithy.api#required": {} } }, @@ -3921,19 +4092,19 @@ "Address": { "target": "com.amazonaws.budgets#SubscriberAddress", "traits": { - "smithy.api#documentation": "

The address that Amazon Web Services sends budget notifications to, either an SNS topic or an email.

\n

When you create a subscriber, the value of Address can't contain line breaks.

", + "smithy.api#documentation": "

The address that Amazon Web Services sends budget notifications to, either an SNS topic\n\t\t\tor an email.

\n

When you create a subscriber, the value of Address can't contain line\n\t\t\tbreaks.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The subscriber to a budget notification. The subscriber consists of a subscription type and either an Amazon SNS topic or an email address.

\n

For example, an email subscriber has the following parameters:

\n
    \n
  • \n

    A subscriptionType of EMAIL\n

    \n
  • \n
  • \n

    An address of example@example.com\n

    \n
  • \n
" + "smithy.api#documentation": "

The subscriber to a budget notification. The subscriber consists of a subscription\n\t\t\ttype and either an Amazon SNS topic or an email address.

\n

For example, an email subscriber has the following parameters:

\n
    \n
  • \n

    A subscriptionType of EMAIL\n

    \n
  • \n
  • \n

    An address of example@example.com\n

    \n
  • \n
" } }, "com.amazonaws.budgets#SubscriberAddress": { "type": "string", "traits": { - "smithy.api#documentation": "

A string that contains an email address or SNS topic for the subscriber's address.

", + "smithy.api#documentation": "

A string that contains an email address or SNS topic for the subscriber's\n\t\t\taddress.

", "smithy.api#length": { "min": 1, "max": 2147483647 @@ -3975,6 +4146,67 @@ "smithy.api#documentation": "

The subscription type of the subscriber. It can be SMS or EMAIL.

" } }, + "com.amazonaws.budgets#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.budgets#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.budgets#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.budgets#AccessDeniedException" + }, + { + "target": "com.amazonaws.budgets#InternalErrorException" + }, + { + "target": "com.amazonaws.budgets#InvalidParameterException" + }, + { + "target": "com.amazonaws.budgets#NotFoundException" + }, + { + "target": "com.amazonaws.budgets#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.budgets#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates tags for a budget or budget action resource.

" + } + }, + "com.amazonaws.budgets#TagResourceRequest": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.budgets#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The unique identifier for the resource.

", + "smithy.api#required": {} + } + }, + "ResourceTags": { + "target": "com.amazonaws.budgets#ResourceTagList", + "traits": { + "smithy.api#documentation": "

The tags associated with the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.budgets#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.budgets#TargetId": { "type": "string", "traits": { @@ -4036,18 +4268,18 @@ "Start": { "target": "com.amazonaws.budgets#GenericTimestamp", "traits": { - "smithy.api#documentation": "

The start date for a budget. If you created your budget and didn't specify a start date, Amazon Web Services defaults to the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). For example, if you created your budget on January 24, 2018, chose DAILY, and didn't set a start date, Amazon Web Services set your start date to 01/24/18 00:00 UTC. If you chose MONTHLY, Amazon Web Services set your start date to 01/01/18 00:00 UTC. The defaults are the same for the Billing and Cost Management console and the API.

\n

You can change your start date with the UpdateBudget operation.

" + "smithy.api#documentation": "

The start date for a budget. If you created your budget and didn't specify a start\n\t\t\tdate, Amazon Web Services defaults to the start of your chosen time period (DAILY,\n\t\t\tMONTHLY, QUARTERLY, or ANNUALLY). For example, if you created your budget on January 24,\n\t\t\t2018, chose DAILY, and didn't set a start date, Amazon Web Services set your\n\t\t\tstart date to 01/24/18 00:00 UTC. If you chose MONTHLY,\n\t\t\t\tAmazon Web Services set your start date to 01/01/18 00:00 UTC. The\n\t\t\tdefaults are the same for the Billing and Cost Management console and the API.

\n

You can change your start date with the UpdateBudget operation.

" } }, "End": { "target": "com.amazonaws.budgets#GenericTimestamp", "traits": { - "smithy.api#documentation": "

The end date for a budget. If you didn't specify an end date, Amazon Web Services set your end date to 06/15/87 00:00 UTC. The defaults are the same for the Billing and Cost Management console and the API.

\n

After the end date, Amazon Web Services deletes the budget and all the associated\n\t\t\tnotifications and subscribers. You can change your end date with the\n\t\t\t\tUpdateBudget operation.

" + "smithy.api#documentation": "

The end date for a budget. If you didn't specify an end date, Amazon Web Services set\n\t\t\tyour end date to 06/15/87 00:00 UTC. The defaults are the same for the\n\t\t\t\tBilling and Cost Management console and the API.

\n

After the end date, Amazon Web Services deletes the budget and all the associated\n\t\t\tnotifications and subscribers. You can change your end date with the\n\t\t\t\tUpdateBudget operation.

" } } }, "traits": { - "smithy.api#documentation": "

The period of time that's covered by a budget. The period has a start date and an end date.\n\t\t\tThe start date must come before the end date. There are no restrictions on the end date.

" + "smithy.api#documentation": "

The period of time that's covered by a budget. The period has a start date and an end\n\t\t\tdate. The start date must come before the end date. There are no restrictions on the end\n\t\t\tdate.

" } }, "com.amazonaws.budgets#TimeUnit": { @@ -4085,7 +4317,7 @@ "com.amazonaws.budgets#UnitValue": { "type": "string", "traits": { - "smithy.api#documentation": "

A string that represents the spend unit of a budget. It can't be null or empty.

", + "smithy.api#documentation": "

A string that represents the spend unit of a budget. It can't be null or\n\t\t\tempty.

", "smithy.api#length": { "min": 1, "max": 2147483647 @@ -4093,6 +4325,64 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.budgets#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.budgets#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.budgets#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.budgets#AccessDeniedException" + }, + { + "target": "com.amazonaws.budgets#InternalErrorException" + }, + { + "target": "com.amazonaws.budgets#InvalidParameterException" + }, + { + "target": "com.amazonaws.budgets#NotFoundException" + }, + { + "target": "com.amazonaws.budgets#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes tags associated with a budget or budget action resource.

" + } + }, + "com.amazonaws.budgets#UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.budgets#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The unique identifier for the resource.

", + "smithy.api#required": {} + } + }, + "ResourceTagKeys": { + "target": "com.amazonaws.budgets#ResourceTagKeyList", + "traits": { + "smithy.api#documentation": "

The key that's associated with the tag.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.budgets#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.budgets#UpdateBudget": { "type": "operation", "input": { diff --git a/models/chatbot.json b/models/chatbot.json index a1f088d5c7..874c61f60e 100644 --- a/models/chatbot.json +++ b/models/chatbot.json @@ -49,6 +49,16 @@ "smithy.api#documentation": "Preferences which apply for AWS Chatbot usage in the calling AWS account." } }, + "com.amazonaws.chatbot#AmazonResourceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1011 + }, + "smithy.api#pattern": "^arn:aws:(wheatley|chatbot):[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + } + }, "com.amazonaws.chatbot#Arn": { "type": "string", "traits": { @@ -124,6 +134,12 @@ "traits": { "smithy.api#documentation": "Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.Logging levels include ERROR, INFO, or NONE." } + }, + "Tags": { + "target": "com.amazonaws.chatbot#Tags", + "traits": { + "smithy.api#documentation": "A list of tags applied to the configuration." + } } }, "traits": { @@ -302,6 +318,12 @@ "traits": { "smithy.api#documentation": "Logging levels include ERROR, INFO, or NONE." } + }, + "Tags": { + "target": "com.amazonaws.chatbot#Tags", + "traits": { + "smithy.api#documentation": "A list of tags to apply to the configuration." + } } }, "traits": { @@ -463,6 +485,12 @@ "traits": { "smithy.api#documentation": "Enables use of a user role requirement in your chat configuration." } + }, + "Tags": { + "target": "com.amazonaws.chatbot#Tags", + "traits": { + "smithy.api#documentation": "A list of tags to apply to the configuration." + } } }, "traits": { @@ -569,6 +597,12 @@ "traits": { "smithy.api#documentation": "Enables use of a user role requirement in your chat configuration." } + }, + "Tags": { + "target": "com.amazonaws.chatbot#Tags", + "traits": { + "smithy.api#documentation": "A list of tags to apply to the configuration." + } } }, "traits": { @@ -1574,6 +1608,19 @@ "target": "com.amazonaws.chatbot#GuardrailPolicyArn" } }, + "com.amazonaws.chatbot#InternalServiceError": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.chatbot#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "Customer/consumer-facing internal service exception.\n https://w.amazon.com/index.php/AWS/API_Standards/Exceptions#InternalServiceError", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, "com.amazonaws.chatbot#InvalidParameterException": { "type": "structure", "members": { @@ -1824,6 +1871,63 @@ "smithy.api#output": {} } }, + "com.amazonaws.chatbot#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.chatbot#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.chatbot#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.chatbot#InternalServiceError" + }, + { + "target": "com.amazonaws.chatbot#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.chatbot#ServiceUnavailableException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieves the list of tags applied to a configuration.", + "smithy.api#http": { + "method": "POST", + "uri": "/list-tags-for-resource", + "code": 200 + } + } + }, + "com.amazonaws.chatbot#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.chatbot#AmazonResourceName", + "traits": { + "smithy.api#documentation": "The ARN of the configuration.", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.chatbot#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.chatbot#TagList", + "traits": { + "smithy.api#documentation": "A list of tags applied to the configuration." + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.chatbot#ListTeamsChannelConfigurationsException": { "type": "structure", "members": { @@ -1915,6 +2019,19 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.chatbot#ServiceUnavailableException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.chatbot#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "We can’t process your request right now because of a server issue. Try again later.", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, "com.amazonaws.chatbot#SlackChannelConfiguration": { "type": "structure", "members": { @@ -1990,6 +2107,12 @@ "traits": { "smithy.api#documentation": "Enables use of a user role requirement in your chat configuration." } + }, + "Tags": { + "target": "com.amazonaws.chatbot#Tags", + "traits": { + "smithy.api#documentation": "A list of tags applied to the configuration." + } } }, "traits": { @@ -2135,6 +2258,136 @@ "com.amazonaws.chatbot#String": { "type": "string" }, + "com.amazonaws.chatbot#Tag": { + "type": "structure", + "members": { + "TagKey": { + "target": "com.amazonaws.chatbot#TagKey", + "traits": { + "smithy.api#documentation": "The tag key.", + "smithy.api#required": {} + } + }, + "TagValue": { + "target": "com.amazonaws.chatbot#TagValue", + "traits": { + "smithy.api#documentation": "The tag value.", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "A tag applied to the configuration." + } + }, + "com.amazonaws.chatbot#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.chatbot#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.chatbot#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.chatbot#TagList": { + "type": "list", + "member": { + "target": "com.amazonaws.chatbot#Tag" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.chatbot#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.chatbot#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.chatbot#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.chatbot#InternalServiceError" + }, + { + "target": "com.amazonaws.chatbot#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.chatbot#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.chatbot#TooManyTagsException" + } + ], + "traits": { + "smithy.api#documentation": "Applies the supplied tags to a configuration.", + "smithy.api#http": { + "method": "POST", + "uri": "/tag-resource", + "code": 200 + } + } + }, + "com.amazonaws.chatbot#TagResourceRequest": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.chatbot#AmazonResourceName", + "traits": { + "smithy.api#documentation": "The ARN of the configuration.", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.chatbot#TagList", + "traits": { + "smithy.api#documentation": "A list of tags to apply to the configuration.", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.chatbot#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.chatbot#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.chatbot#Tags": { + "type": "list", + "member": { + "target": "com.amazonaws.chatbot#Tag" + } + }, "com.amazonaws.chatbot#TeamChannelConfigurationsList": { "type": "list", "member": { @@ -2231,6 +2484,12 @@ "traits": { "smithy.api#documentation": "Enables use of a user role requirement in your chat configuration." } + }, + "Tags": { + "target": "com.amazonaws.chatbot#Tags", + "traits": { + "smithy.api#documentation": "A list of tags applied to the configuration." + } } }, "traits": { @@ -2316,6 +2575,19 @@ "smithy.api#documentation": "Identifes a user level permission for a channel configuration." } }, + "com.amazonaws.chatbot#TooManyTagsException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.chatbot#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "The supplied list of tags contains too many tags.", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.chatbot#UUID": { "type": "string", "traits": { @@ -2326,6 +2598,63 @@ "smithy.api#pattern": "^[0-9A-Fa-f]{8}(?:-[0-9A-Fa-f]{4}){3}-[0-9A-Fa-f]{12}$" } }, + "com.amazonaws.chatbot#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.chatbot#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.chatbot#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.chatbot#InternalServiceError" + }, + { + "target": "com.amazonaws.chatbot#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.chatbot#ServiceUnavailableException" + } + ], + "traits": { + "smithy.api#documentation": "Removes the supplied tags from a configuration", + "smithy.api#http": { + "method": "POST", + "uri": "/untag-resource", + "code": 200 + } + } + }, + "com.amazonaws.chatbot#UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.chatbot#AmazonResourceName", + "traits": { + "smithy.api#documentation": "The ARN of the configuration.", + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.chatbot#TagKeyList", + "traits": { + "smithy.api#documentation": "A list of tag keys to remove from the configuration.", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.chatbot#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.chatbot#UpdateAccountPreferences": { "type": "operation", "input": { @@ -2797,6 +3126,15 @@ { "target": "com.amazonaws.chatbot#ListMicrosoftTeamsUserIdentities" }, + { + "target": "com.amazonaws.chatbot#ListTagsForResource" + }, + { + "target": "com.amazonaws.chatbot#TagResource" + }, + { + "target": "com.amazonaws.chatbot#UntagResource" + }, { "target": "com.amazonaws.chatbot#UpdateAccountPreferences" }, diff --git a/models/cloudformation.json b/models/cloudformation.json index 6ea959868a..a2160e7f7e 100644 --- a/models/cloudformation.json +++ b/models/cloudformation.json @@ -3417,6 +3417,12 @@ "traits": { "smithy.api#documentation": "

A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests\n so that CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry\n DeleteStack requests to ensure that CloudFormation successfully received them.

\n

All events initiated by a given stack operation are assigned the same client request token, which you can use to\n track operations. For example, if you execute a CreateStack operation with the token\n token1, then all the StackEvents generated by that operation will have\n ClientRequestToken set as token1.

\n

In the console, stack operations display the client request token on the Events tab. Stack operations that are\n initiated from the console use the token format Console-StackOperation-ID, which helps you\n easily identify the stack operation . For example, if you create a stack using the console, each stack event would be\n assigned the same token in the following format:\n Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" } + }, + "DeletionMode": { + "target": "com.amazonaws.cloudformation#DeletionMode", + "traits": { + "smithy.api#documentation": "

Specifies the deletion mode for the stack. Possible values are:

\n
    \n
  • \n

    \n STANDARD - Use the standard behavior. Specifying this value is the same as not specifying this\n parameter.

    \n
  • \n
  • \n

    \n FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED state due to\n resource deletion failure.

    \n
  • \n
" + } } }, "traits": { @@ -3579,6 +3585,23 @@ "smithy.api#output": {} } }, + "com.amazonaws.cloudformation#DeletionMode": { + "type": "enum", + "members": { + "STANDARD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STANDARD" + } + }, + "FORCE_DELETE_STACK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FORCE_DELETE_STACK" + } + } + } + }, "com.amazonaws.cloudformation#DeletionTime": { "type": "timestamp" }, @@ -7934,7 +7957,7 @@ "Summaries": { "target": "com.amazonaws.cloudformation#StackInstanceResourceDriftsSummaries", "traits": { - "smithy.api#documentation": "

A list of StackInstanceResourceDriftSummary structures that contain information about the specified\n stack instances.

" + "smithy.api#documentation": "

A list of StackInstanceResourceDriftsSummary structures that contain information about the\n specified stack instances.

" } }, "NextToken": { @@ -11505,6 +11528,12 @@ "smithy.api#documentation": "

When set to true, newly created resources are deleted when the operation rolls back. This includes\n newly created resources marked with a deletion policy of Retain.

\n

Default: false\n

" } }, + "DeletionMode": { + "target": "com.amazonaws.cloudformation#DeletionMode", + "traits": { + "smithy.api#documentation": "

Specifies the deletion mode for the stack. Possible values are:

\n
    \n
  • \n

    \n STANDARD - Use the standard behavior. Specifying this value is the same as not specifying this\n parameter.

    \n
  • \n
  • \n

    \n FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED state due to\n resource deletion failure.

    \n
  • \n
" + } + }, "DetailedStatus": { "target": "com.amazonaws.cloudformation#DetailedStatus", "traits": { @@ -13119,7 +13148,7 @@ "ConcurrencyMode": { "target": "com.amazonaws.cloudformation#ConcurrencyMode", "traits": { - "smithy.api#documentation": "

Specifies how the concurrency level behaves during the operation execution.

\n
    \n
  • \n

    \n STRICT_FAILURE_TOLERANCE: This option dynamically lowers the concurrency level to ensure the\n number of failed accounts never exceeds the value of FailureToleranceCount +1. The initial actual\n concurrency is set to the lower of either the value of the MaxConcurrentCount, or the value of\n MaxConcurrentCount +1. The actual concurrency is then reduced proportionally by the number of\n failures. This is the default behavior.

    \n

    If failure tolerance or Maximum concurrent accounts are set to percentages, the behavior is similar.

    \n
  • \n
  • \n

    \n SOFT_FAILURE_TOLERANCE: This option decouples FailureToleranceCount from the actual\n concurrency. This allows stack set operations to run at the concurrency level set by the\n MaxConcurrentCount value, or MaxConcurrentPercentage, regardless of the number of\n failures.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies how the concurrency level behaves during the operation execution.

\n
    \n
  • \n

    \n STRICT_FAILURE_TOLERANCE: This option dynamically lowers the concurrency level to ensure the\n number of failed accounts never exceeds the value of FailureToleranceCount +1. The initial actual\n concurrency is set to the lower of either the value of the MaxConcurrentCount, or the value of\n FailureToleranceCount +1. The actual concurrency is then reduced proportionally by the number of\n failures. This is the default behavior.

    \n

    If failure tolerance or Maximum concurrent accounts are set to percentages, the behavior is similar.

    \n
  • \n
  • \n

    \n SOFT_FAILURE_TOLERANCE: This option decouples FailureToleranceCount from the actual\n concurrency. This allows stack set operations to run at the concurrency level set by the\n MaxConcurrentCount value, or MaxConcurrentPercentage, regardless of the number of\n failures.

    \n
  • \n
" } } }, diff --git a/models/cloudfront.json b/models/cloudfront.json index fd6713f953..c57eb3ac9b 100644 --- a/models/cloudfront.json +++ b/models/cloudfront.json @@ -10755,7 +10755,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of cache policies that you want in the response.

", "smithy.api#httpQuery": "MaxItems" @@ -10820,7 +10820,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of origin access identities you want in the response body.

", "smithy.api#httpQuery": "MaxItems" @@ -10965,7 +10965,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of continuous deployment policies that you want returned in the\n\t\t\tresponse.

", "smithy.api#httpQuery": "MaxItems" @@ -11058,7 +11058,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of distribution IDs that you want in the response.

", "smithy.api#httpQuery": "MaxItems" @@ -11128,7 +11128,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of distribution IDs that you want in the response.

", "smithy.api#httpQuery": "MaxItems" @@ -11200,7 +11200,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of distribution IDs that you want in the response.

", "smithy.api#httpQuery": "MaxItems" @@ -11266,7 +11266,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of distributions that you want in the response.

" } @@ -11341,7 +11341,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of distribution IDs that you want to get in the response.

", "smithy.api#httpQuery": "MaxItems" @@ -11410,7 +11410,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of distributions that you want CloudFront to return in the response body.\n\t\t\tThe maximum and default values are both 100.

", "smithy.api#httpQuery": "MaxItems" @@ -11457,7 +11457,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of distributions you want in the response body.

", "smithy.api#httpQuery": "MaxItems" @@ -11518,7 +11518,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of field-level encryption configurations you want in the response\n\t\t\tbody.

", "smithy.api#httpQuery": "MaxItems" @@ -11577,7 +11577,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of field-level encryption profiles you want in the response body.\n\t\t

", "smithy.api#httpQuery": "MaxItems" @@ -11639,7 +11639,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of functions that you want in the response.

", "smithy.api#httpQuery": "MaxItems" @@ -11725,7 +11725,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of invalidation batches that you want in the response body.

", "smithy.api#httpQuery": "MaxItems" @@ -11786,7 +11786,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of key groups that you want in the response.

", "smithy.api#httpQuery": "MaxItems" @@ -11857,7 +11857,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of items in the key value stores list.

", "smithy.api#httpQuery": "MaxItems" @@ -11923,7 +11923,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of origin access controls that you want in the response.

", "smithy.api#httpQuery": "MaxItems" @@ -11995,7 +11995,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of origin request policies that you want in the response.

", "smithy.api#httpQuery": "MaxItems" @@ -12054,7 +12054,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of public keys you want in the response body.

", "smithy.api#httpQuery": "MaxItems" @@ -12112,7 +12112,7 @@ "type": "structure", "members": { "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of real-time log configurations that you want in the\n\t\t\tresponse.

", "smithy.api#httpQuery": "MaxItems" @@ -12191,7 +12191,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The maximum number of response headers policies that you want to get in the\n\t\t\tresponse.

", "smithy.api#httpQuery": "MaxItems" @@ -12256,7 +12256,7 @@ } }, "MaxItems": { - "target": "smithy.api#Integer", + "target": "com.amazonaws.cloudfront#integer", "traits": { "smithy.api#documentation": "

The value that you provided for the MaxItems request parameter.

", "smithy.api#httpQuery": "MaxItems" diff --git a/models/cloudhsm-v2.json b/models/cloudhsm-v2.json index aa16b18c41..a4bc48bc05 100644 --- a/models/cloudhsm-v2.json +++ b/models/cloudhsm-v2.json @@ -98,6 +98,18 @@ "traits": { "smithy.api#documentation": "

The list of tags for the backup.

" } + }, + "HsmType": { + "target": "com.amazonaws.cloudhsmv2#HsmType", + "traits": { + "smithy.api#documentation": "

The HSM type of the cluster that was backed up.

" + } + }, + "Mode": { + "target": "com.amazonaws.cloudhsmv2#ClusterMode", + "traits": { + "smithy.api#documentation": "

The mode of the cluster that was backed up.

" + } } }, "traits": { @@ -313,7 +325,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -356,7 +367,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -369,7 +381,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -383,7 +394,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -406,7 +416,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -441,7 +450,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -452,14 +460,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -473,14 +483,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -489,11 +497,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -504,14 +512,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -525,7 +535,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -545,7 +554,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -556,14 +564,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -624,9 +634,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -1232,7 +1244,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 5000 + "max": 20000 }, "smithy.api#pattern": "^[a-zA-Z0-9+-/=\\s]*$" } @@ -1439,6 +1451,12 @@ "traits": { "smithy.api#documentation": "

The list of tags for the cluster.

" } + }, + "Mode": { + "target": "com.amazonaws.cloudhsmv2#ClusterMode", + "traits": { + "smithy.api#documentation": "

The mode of the cluster.

" + } } }, "traits": { @@ -1451,6 +1469,23 @@ "smithy.api#pattern": "^cluster-[2-7a-zA-Z]{11,16}$" } }, + "com.amazonaws.cloudhsmv2#ClusterMode": { + "type": "enum", + "members": { + "FIPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FIPS" + } + }, + "NON_FIPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NON_FIPS" + } + } + } + }, "com.amazonaws.cloudhsmv2#ClusterState": { "type": "enum", "members": { @@ -1643,7 +1678,7 @@ "HsmType": { "target": "com.amazonaws.cloudhsmv2#HsmType", "traits": { - "smithy.api#documentation": "

The type of HSM to use in the cluster. Currently the only allowed value is\n hsm1.medium.

", + "smithy.api#documentation": "

The type of HSM to use in the cluster. The allowed values are\n hsm1.medium and hsm2m.medium.

", "smithy.api#required": {} } }, @@ -1665,6 +1700,12 @@ "traits": { "smithy.api#documentation": "

Tags to apply to the CloudHSM cluster during creation.

" } + }, + "Mode": { + "target": "com.amazonaws.cloudhsmv2#ClusterMode", + "traits": { + "smithy.api#documentation": "

The mode to use in the cluster. The allowed values are\n FIPS and NON_FIPS.

" + } } }, "traits": { @@ -2186,6 +2227,12 @@ }, "value": { "target": "com.amazonaws.cloudhsmv2#Strings" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 30 + } } }, "com.amazonaws.cloudhsmv2#Hsm": { @@ -2289,7 +2336,11 @@ "com.amazonaws.cloudhsmv2#HsmType": { "type": "string", "traits": { - "smithy.api#pattern": "^(hsm1\\.medium)$" + "smithy.api#length": { + "min": 0, + "max": 32 + }, + "smithy.api#pattern": "^((p|)hsm[0-9][a-z.]*\\.[a-zA-Z]+)$" } }, "com.amazonaws.cloudhsmv2#Hsms": { diff --git a/models/cloudtrail.json b/models/cloudtrail.json index 20883be5d2..a14db05d24 100644 --- a/models/cloudtrail.json +++ b/models/cloudtrail.json @@ -266,7 +266,7 @@ "Field": { "target": "com.amazonaws.cloudtrail#SelectorField", "traits": { - "smithy.api#documentation": "

A field in a CloudTrail event record on which to filter events to be logged. For\n event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for\n selecting events as filtering is not supported.

\n

For CloudTrail management events, supported fields include readOnly,\n eventCategory, and eventSource.

\n

For CloudTrail data events, supported fields include readOnly,\n eventCategory, eventName, resources.type, and resources.ARN.

\n

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is\n eventCategory.

\n
    \n
  • \n

    \n \n readOnly\n - Optional. Can be set to\n Equals a value of true or false. If you do\n not add this field, CloudTrail logs both read and\n write events. A value of true logs only\n read events. A value of false logs only\n write events.

    \n
  • \n
  • \n

    \n \n eventSource\n - For filtering\n management events only. This can be set to NotEquals\n kms.amazonaws.com or NotEquals\n rdsdata.amazonaws.com.

    \n
  • \n
  • \n

    \n \n eventName\n - Can use any operator.\n You can use it to filter in or filter out any data event logged to CloudTrail,\n such as PutBucket or GetSnapshotBlock. You can have\n multiple values for this field, separated by commas.

    \n
  • \n
  • \n

    \n \n eventCategory\n - This is required and\n must be set to Equals. \n

    \n
      \n
    • \n

      \n For CloudTrail management events, the value\n must be Management. \n

      \n
    • \n
    • \n

      \n For CloudTrail data events, the value\n must be Data. \n

      \n
    • \n
    \n

    The following are used only for event data stores:

    \n
      \n
    • \n

      \n For CloudTrail Insights events, the value\n must be Insight. \n

      \n
    • \n
    • \n

      \n For Config\n configuration items, the value must be ConfigurationItem.\n

      \n
    • \n
    • \n

      \n For Audit Manager evidence, the value must be Evidence.\n

      \n
    • \n
    • \n

      \n For non-Amazon Web Services events, the value must be ActivityAuditLog.\n

      \n
    • \n
    \n
  • \n
  • \n

    \n \n resources.type\n - This field is\n required for CloudTrail data events. resources.type can only\n use the Equals operator, and the value can be one of the\n following:

    \n
      \n
    • \n

      \n AWS::DynamoDB::Table\n

      \n
    • \n
    • \n

      \n AWS::Lambda::Function\n

      \n
    • \n
    • \n

      \n AWS::S3::Object\n

      \n
    • \n
    • \n

      \n AWS::AppConfig::Configuration\n

      \n
    • \n
    • \n

      \n AWS::B2BI::Transformer\n

      \n
    • \n
    • \n

      \n AWS::Bedrock::AgentAlias\n

      \n
    • \n
    • \n

      \n AWS::Bedrock::KnowledgeBase\n

      \n
    • \n
    • \n

      \n AWS::Cassandra::Table\n

      \n
    • \n
    • \n

      \n AWS::CloudFront::KeyValueStore\n

      \n
    • \n
    • \n

      \n AWS::CloudTrail::Channel\n

      \n
    • \n
    • \n

      \n AWS::CodeWhisperer::Customization\n

      \n
    • \n
    • \n

      \n AWS::CodeWhisperer::Profile\n

      \n
    • \n
    • \n

      \n AWS::Cognito::IdentityPool\n

      \n
    • \n
    • \n

      \n AWS::DynamoDB::Stream\n

      \n
    • \n
    • \n

      \n AWS::EC2::Snapshot\n

      \n
    • \n
    • \n

      \n AWS::EMRWAL::Workspace\n

      \n
    • \n
    • \n

      \n AWS::FinSpace::Environment\n

      \n
    • \n
    • \n

      \n AWS::Glue::Table\n

      \n
    • \n
    • \n

      \n AWS::GreengrassV2::ComponentVersion\n

      \n
    • \n
    • \n

      \n AWS::GreengrassV2::Deployment\n

      \n
    • \n
    • \n

      \n AWS::GuardDuty::Detector\n

      \n
    • \n
    • \n

      \n AWS::IoT::Certificate\n

      \n
    • \n
    • \n

      \n AWS::IoT::Thing\n

      \n
    • \n
    • \n

      \n AWS::IoTSiteWise::Asset\n

      \n
    • \n
    • \n

      \n AWS::IoTSiteWise::TimeSeries\n

      \n
    • \n
    • \n

      \n AWS::IoTTwinMaker::Entity\n

      \n
    • \n
    • \n

      \n AWS::IoTTwinMaker::Workspace\n

      \n
    • \n
    • \n

      \n AWS::KendraRanking::ExecutionPlan\n

      \n
    • \n
    • \n

      \n AWS::KinesisVideo::Stream\n

      \n
    • \n
    • \n

      \n AWS::ManagedBlockchain::Network\n

      \n
    • \n
    • \n

      \n AWS::ManagedBlockchain::Node\n

      \n
    • \n
    • \n

      \n AWS::MedicalImaging::Datastore\n

      \n
    • \n
    • \n

      \n AWS::NeptuneGraph::Graph\n

      \n
    • \n
    • \n

      \n AWS::PCAConnectorAD::Connector\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::Application\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::DataSource\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::Index\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::WebExperience\n

      \n
    • \n
    • \n

      \n AWS::RDS::DBCluster\n

      \n
    • \n
    • \n

      \n AWS::S3::AccessPoint\n

      \n
    • \n
    • \n

      \n AWS::S3ObjectLambda::AccessPoint\n

      \n
    • \n
    • \n

      \n AWS::S3Outposts::Object\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::Endpoint\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::ExperimentTrialComponent\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::FeatureGroup\n

      \n
    • \n
    • \n

      \n AWS::ServiceDiscovery::Namespace \n

      \n
    • \n
    • \n

      \n AWS::ServiceDiscovery::Service\n

      \n
    • \n
    • \n

      \n AWS::SCN::Instance\n

      \n
    • \n
    • \n

      \n AWS::SNS::PlatformEndpoint\n

      \n
    • \n
    • \n

      \n AWS::SNS::Topic\n

      \n
    • \n
    • \n

      \n AWS::SWF::Domain\n

      \n
    • \n
    • \n

      \n AWS::SQS::Queue\n

      \n
    • \n
    • \n

      \n AWS::SSMMessages::ControlChannel\n

      \n
    • \n
    • \n

      \n AWS::ThinClient::Device\n

      \n
    • \n
    • \n

      \n AWS::ThinClient::Environment\n

      \n
    • \n
    • \n

      \n AWS::Timestream::Database\n

      \n
    • \n
    • \n

      \n AWS::Timestream::Table\n

      \n
    • \n
    • \n

      \n AWS::VerifiedPermissions::PolicyStore\n

      \n
    • \n
    \n

    You can have only one resources.type field per selector. To log data\n events on more than one resource type, add another selector.

    \n
  • \n
  • \n

    \n \n resources.ARN\n - You can use any\n operator with resources.ARN, but if you use Equals or\n NotEquals, the value must exactly match the ARN of a valid resource\n of the type you've specified in the template as the value of resources.type. For\n example, if resources.type equals AWS::S3::Object, the ARN must be in\n one of the following formats. To log all data events for all objects in a specific S3\n bucket, use the StartsWith operator, and include only the bucket ARN as\n the matching value.

    \n

    The trailing slash is intentional; do not exclude it. Replace the text between\n less than and greater than symbols (<>) with resource-specific information.

    \n
      \n
    • \n

      \n arn::s3:::/\n

      \n
    • \n
    • \n

      \n arn::s3::://\n

      \n
    • \n
    \n

    When resources.type equals AWS::DynamoDB::Table, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::dynamodb:::table/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Lambda::Function, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::lambda:::function:\n

      \n
    • \n
    \n

    When resources.type equals AWS::AppConfig::Configuration, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::appconfig:::application//environment//configuration/\n

      \n
    • \n
    \n

    When resources.type equals AWS::B2BI::Transformer, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::b2bi:::transformer/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Bedrock::AgentAlias, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::bedrock:::agent-alias//\n

      \n
    • \n
    \n

    When resources.type equals AWS::Bedrock::KnowledgeBase, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::bedrock:::knowledge-base/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Cassandra::Table, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cassandra:::/keyspace//table/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CloudFront::KeyValueStore, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cloudfront:::key-value-store/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CloudTrail::Channel, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cloudtrail:::channel/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CodeWhisperer::Customization, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::codewhisperer:::customization/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CodeWhisperer::Profile, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::codewhisperer:::profile/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Cognito::IdentityPool, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cognito-identity:::identitypool/\n

      \n
    • \n
    \n

    When resources.type equals AWS::DynamoDB::Stream, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::dynamodb:::table//stream/\n

      \n
    • \n
    \n

    When resources.type equals AWS::EC2::Snapshot, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::ec2:::snapshot/\n

      \n
    • \n
    \n

    When resources.type equals AWS::EMRWAL::Workspace, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::emrwal:::workspace/\n

      \n
    • \n
    \n

    When resources.type equals AWS::FinSpace::Environment,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::finspace:::environment/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Glue::Table, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::glue:::table//\n

      \n
    • \n
    \n

    When resources.type equals AWS::GreengrassV2::ComponentVersion, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::greengrass:::components/\n

      \n
    • \n
    \n

    When resources.type equals AWS::GreengrassV2::Deployment, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::greengrass:::deployments/\n

      \n
    • \n
    \n

    When resources.type equals AWS::GuardDuty::Detector, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::guardduty:::detector/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoT::Certificate,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iot:::cert/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoT::Thing,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iot:::thing/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTSiteWise::Asset,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iotsitewise:::asset/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTSiteWise::TimeSeries,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iotsitewise:::timeseries/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTTwinMaker::Entity,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iottwinmaker:::workspace//entity/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTTwinMaker::Workspace,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iottwinmaker:::workspace/\n

      \n
    • \n
    \n

    When resources.type equals AWS::KendraRanking::ExecutionPlan, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::kendra-ranking:::rescore-execution-plan/\n

      \n
    • \n
    \n

    When resources.type equals AWS::KinesisVideo::Stream, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::kinesisvideo:::stream//\n

      \n
    • \n
    \n

    When resources.type equals AWS::ManagedBlockchain::Network,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::managedblockchain:::networks/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ManagedBlockchain::Node,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::managedblockchain:::nodes/\n

      \n
    • \n
    \n

    When resources.type equals AWS::MedicalImaging::Datastore,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::medical-imaging:::datastore/\n

      \n
    • \n
    \n

    When resources.type equals AWS::NeptuneGraph::Graph,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::neptune-graph:::graph/\n

      \n
    • \n
    \n

    When resources.type equals AWS::PCAConnectorAD::Connector,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::pca-connector-ad:::connector/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::Application,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::DataSource,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application//index//data-source/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::Index,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application//index/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::WebExperience,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application//web-experience/\n

      \n
    • \n
    \n

    When resources.type equals AWS::RDS::DBCluster,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::rds:::cluster/\n

      \n
    • \n
    \n

    When resources.type equals AWS::S3::AccessPoint, and the\n operator is set to Equals or NotEquals, the ARN must be in\n one of the following formats. To log events on all objects in an S3 access point, we\n recommend that you use only the access point ARN, don’t include the object path, and\n use the StartsWith or NotStartsWith operators.

    \n
      \n
    • \n

      \n arn::s3:::accesspoint/\n

      \n
    • \n
    • \n

      \n arn::s3:::accesspoint//object/\n

      \n
    • \n
    \n

    When resources.type equals\n AWS::S3ObjectLambda::AccessPoint, and the operator is set to\n Equals or NotEquals, the ARN must be in the following\n format:

    \n
      \n
    • \n

      \n arn::s3-object-lambda:::accesspoint/\n

      \n
    • \n
    \n

    When resources.type equals AWS::S3Outposts::Object, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::s3-outposts:::\n

      \n
    • \n
    \n

    When resources.type equals AWS::SageMaker::Endpoint, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::sagemaker:::endpoint/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::sagemaker:::experiment-trial-component/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::sagemaker:::feature-group/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SCN::Instance, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::scn:::instance/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ServiceDiscovery::Namespace, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::servicediscovery:::namespace/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ServiceDiscovery::Service, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::servicediscovery:::service/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SNS::PlatformEndpoint,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::sns:::endpoint///\n

      \n
    • \n
    \n

    When resources.type equals AWS::SNS::Topic,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::sns:::\n

      \n
    • \n
    \n

    When resources.type equals AWS::SWF::Domain,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::swf:::domain/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SQS::Queue,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::sqs:::\n

      \n
    • \n
    \n

    When resources.type equals AWS::SSMMessages::ControlChannel, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::ssmmessages:::control-channel/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ThinClient::Device, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::thinclient:::device/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ThinClient::Environment, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::thinclient:::environment/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Timestream::Database,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::timestream:::database/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Timestream::Table,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::timestream:::database//table/\n

      \n
    • \n
    \n

    When resources.type equals AWS::VerifiedPermissions::PolicyStore, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::verifiedpermissions:::policy-store/\n

      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

A field in a CloudTrail event record on which to filter events to be logged. For\n event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for\n selecting events as filtering is not supported.

\n

For CloudTrail management events, supported fields include readOnly,\n eventCategory, and eventSource.

\n

For CloudTrail data events, supported fields include readOnly,\n eventCategory, eventName, resources.type, and resources.ARN.

\n

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is\n eventCategory.

\n
    \n
  • \n

    \n \n readOnly\n - Optional. Can be set to\n Equals a value of true or false. If you do\n not add this field, CloudTrail logs both read and\n write events. A value of true logs only\n read events. A value of false logs only\n write events.

    \n
  • \n
  • \n

    \n \n eventSource\n - For filtering\n management events only. This can be set to NotEquals\n kms.amazonaws.com or NotEquals\n rdsdata.amazonaws.com.

    \n
  • \n
  • \n

    \n \n eventName\n - Can use any operator.\n You can use it to filter in or filter out any data event logged to CloudTrail,\n such as PutBucket or GetSnapshotBlock. You can have\n multiple values for this field, separated by commas.

    \n
  • \n
  • \n

    \n \n eventCategory\n - This is required and\n must be set to Equals. \n

    \n
      \n
    • \n

      \n For CloudTrail management events, the value\n must be Management. \n

      \n
    • \n
    • \n

      \n For CloudTrail data events, the value\n must be Data. \n

      \n
    • \n
    \n

    The following are used only for event data stores:

    \n
      \n
    • \n

      \n For CloudTrail Insights events, the value\n must be Insight. \n

      \n
    • \n
    • \n

      \n For Config\n configuration items, the value must be ConfigurationItem.\n

      \n
    • \n
    • \n

      \n For Audit Manager evidence, the value must be Evidence.\n

      \n
    • \n
    • \n

      \n For non-Amazon Web Services events, the value must be ActivityAuditLog.\n

      \n
    • \n
    \n
  • \n
  • \n

    \n \n resources.type\n - This field is\n required for CloudTrail data events. resources.type can only\n use the Equals operator, and the value can be one of the\n following:

    \n
      \n
    • \n

      \n AWS::DynamoDB::Table\n

      \n
    • \n
    • \n

      \n AWS::Lambda::Function\n

      \n
    • \n
    • \n

      \n AWS::S3::Object\n

      \n
    • \n
    • \n

      \n AWS::AppConfig::Configuration\n

      \n
    • \n
    • \n

      \n AWS::B2BI::Transformer\n

      \n
    • \n
    • \n

      \n AWS::Bedrock::AgentAlias\n

      \n
    • \n
    • \n

      \n AWS::Bedrock::KnowledgeBase\n

      \n
    • \n
    • \n

      \n AWS::Cassandra::Table\n

      \n
    • \n
    • \n

      \n AWS::CloudFront::KeyValueStore\n

      \n
    • \n
    • \n

      \n AWS::CloudTrail::Channel\n

      \n
    • \n
    • \n

      \n AWS::CodeWhisperer::Customization\n

      \n
    • \n
    • \n

      \n AWS::CodeWhisperer::Profile\n

      \n
    • \n
    • \n

      \n AWS::Cognito::IdentityPool\n

      \n
    • \n
    • \n

      \n AWS::DynamoDB::Stream\n

      \n
    • \n
    • \n

      \n AWS::EC2::Snapshot\n

      \n
    • \n
    • \n

      \n AWS::EMRWAL::Workspace\n

      \n
    • \n
    • \n

      \n AWS::FinSpace::Environment\n

      \n
    • \n
    • \n

      \n AWS::Glue::Table\n

      \n
    • \n
    • \n

      \n AWS::GreengrassV2::ComponentVersion\n

      \n
    • \n
    • \n

      \n AWS::GreengrassV2::Deployment\n

      \n
    • \n
    • \n

      \n AWS::GuardDuty::Detector\n

      \n
    • \n
    • \n

      \n AWS::IoT::Certificate\n

      \n
    • \n
    • \n

      \n AWS::IoT::Thing\n

      \n
    • \n
    • \n

      \n AWS::IoTSiteWise::Asset\n

      \n
    • \n
    • \n

      \n AWS::IoTSiteWise::TimeSeries\n

      \n
    • \n
    • \n

      \n AWS::IoTTwinMaker::Entity\n

      \n
    • \n
    • \n

      \n AWS::IoTTwinMaker::Workspace\n

      \n
    • \n
    • \n

      \n AWS::KendraRanking::ExecutionPlan\n

      \n
    • \n
    • \n

      \n AWS::KinesisVideo::Stream\n

      \n
    • \n
    • \n

      \n AWS::ManagedBlockchain::Network\n

      \n
    • \n
    • \n

      \n AWS::ManagedBlockchain::Node\n

      \n
    • \n
    • \n

      \n AWS::MedicalImaging::Datastore\n

      \n
    • \n
    • \n

      \n AWS::NeptuneGraph::Graph\n

      \n
    • \n
    • \n

      \n AWS::PCAConnectorAD::Connector\n

      \n
    • \n
    • \n

      \n AWS::QApps:QApp\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::Application\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::DataSource\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::Index\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::WebExperience\n

      \n
    • \n
    • \n

      \n AWS::RDS::DBCluster\n

      \n
    • \n
    • \n

      \n AWS::S3::AccessPoint\n

      \n
    • \n
    • \n

      \n AWS::S3ObjectLambda::AccessPoint\n

      \n
    • \n
    • \n

      \n AWS::S3Outposts::Object\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::Endpoint\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::ExperimentTrialComponent\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::FeatureGroup\n

      \n
    • \n
    • \n

      \n AWS::ServiceDiscovery::Namespace \n

      \n
    • \n
    • \n

      \n AWS::ServiceDiscovery::Service\n

      \n
    • \n
    • \n

      \n AWS::SCN::Instance\n

      \n
    • \n
    • \n

      \n AWS::SNS::PlatformEndpoint\n

      \n
    • \n
    • \n

      \n AWS::SNS::Topic\n

      \n
    • \n
    • \n

      \n AWS::SQS::Queue\n

      \n
    • \n
    • \n

      \n AWS::SSM::ManagedNode\n

      \n
    • \n
    • \n

      \n AWS::SSMMessages::ControlChannel\n

      \n
    • \n
    • \n

      \n AWS::SWF::Domain\n

      \n
    • \n
    • \n

      \n AWS::ThinClient::Device\n

      \n
    • \n
    • \n

      \n AWS::ThinClient::Environment\n

      \n
    • \n
    • \n

      \n AWS::Timestream::Database\n

      \n
    • \n
    • \n

      \n AWS::Timestream::Table\n

      \n
    • \n
    • \n

      \n AWS::VerifiedPermissions::PolicyStore\n

      \n
    • \n
    • \n

      \n AWS::XRay::Trace\n

      \n
    • \n
    \n

    You can have only one resources.type field per selector. To log data\n events on more than one resource type, add another selector.

    \n
  • \n
  • \n

    \n \n resources.ARN\n - You can use any\n operator with resources.ARN, but if you use Equals or\n NotEquals, the value must exactly match the ARN of a valid resource\n of the type you've specified in the template as the value of resources.type.

    \n \n

    You can't use the resources.ARN field to filter resource types that do not have ARNs.

    \n
    \n

    The resources.ARN field can be set one of the following.

    \n

    If resources.type equals AWS::S3::Object, the ARN must be in\n one of the following formats. To log all data events for all objects in a specific S3\n bucket, use the StartsWith operator, and include only the bucket ARN as\n the matching value.

    \n

    The trailing slash is intentional; do not exclude it. Replace the text between\n less than and greater than symbols (<>) with resource-specific information.

    \n
      \n
    • \n

      \n arn::s3:::/\n

      \n
    • \n
    • \n

      \n arn::s3::://\n

      \n
    • \n
    \n

    When resources.type equals AWS::DynamoDB::Table, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::dynamodb:::table/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Lambda::Function, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::lambda:::function:\n

      \n
    • \n
    \n

    When resources.type equals AWS::AppConfig::Configuration, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::appconfig:::application//environment//configuration/\n

      \n
    • \n
    \n

    When resources.type equals AWS::B2BI::Transformer, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::b2bi:::transformer/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Bedrock::AgentAlias, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::bedrock:::agent-alias//\n

      \n
    • \n
    \n

    When resources.type equals AWS::Bedrock::KnowledgeBase, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::bedrock:::knowledge-base/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Cassandra::Table, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cassandra:::/keyspace//table/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CloudFront::KeyValueStore, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cloudfront:::key-value-store/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CloudTrail::Channel, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cloudtrail:::channel/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CodeWhisperer::Customization, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::codewhisperer:::customization/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CodeWhisperer::Profile, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::codewhisperer:::profile/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Cognito::IdentityPool, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cognito-identity:::identitypool/\n

      \n
    • \n
    \n

    When resources.type equals AWS::DynamoDB::Stream, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::dynamodb:::table//stream/\n

      \n
    • \n
    \n

    When resources.type equals AWS::EC2::Snapshot, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::ec2:::snapshot/\n

      \n
    • \n
    \n

    When resources.type equals AWS::EMRWAL::Workspace, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::emrwal:::workspace/\n

      \n
    • \n
    \n

    When resources.type equals AWS::FinSpace::Environment,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::finspace:::environment/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Glue::Table, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::glue:::table//\n

      \n
    • \n
    \n

    When resources.type equals AWS::GreengrassV2::ComponentVersion, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::greengrass:::components/\n

      \n
    • \n
    \n

    When resources.type equals AWS::GreengrassV2::Deployment, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::greengrass:::deployments/\n

      \n
    • \n
    \n

    When resources.type equals AWS::GuardDuty::Detector, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::guardduty:::detector/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoT::Certificate,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iot:::cert/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoT::Thing,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iot:::thing/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTSiteWise::Asset,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iotsitewise:::asset/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTSiteWise::TimeSeries,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iotsitewise:::timeseries/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTTwinMaker::Entity,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iottwinmaker:::workspace//entity/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTTwinMaker::Workspace,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iottwinmaker:::workspace/\n

      \n
    • \n
    \n

    When resources.type equals AWS::KendraRanking::ExecutionPlan, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::kendra-ranking:::rescore-execution-plan/\n

      \n
    • \n
    \n

    When resources.type equals AWS::KinesisVideo::Stream, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::kinesisvideo:::stream//\n

      \n
    • \n
    \n

    When resources.type equals AWS::ManagedBlockchain::Network,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::managedblockchain:::networks/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ManagedBlockchain::Node,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::managedblockchain:::nodes/\n

      \n
    • \n
    \n

    When resources.type equals AWS::MedicalImaging::Datastore,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::medical-imaging:::datastore/\n

      \n
    • \n
    \n

    When resources.type equals AWS::NeptuneGraph::Graph,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::neptune-graph:::graph/\n

      \n
    • \n
    \n

    When resources.type equals AWS::PCAConnectorAD::Connector,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::pca-connector-ad:::connector/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QApps:QApp,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qapps:::application//qapp/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::Application,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::DataSource,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application//index//data-source/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::Index,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application//index/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::WebExperience,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application//web-experience/\n

      \n
    • \n
    \n

    When resources.type equals AWS::RDS::DBCluster,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::rds:::cluster/\n

      \n
    • \n
    \n

    When resources.type equals AWS::S3::AccessPoint, and the\n operator is set to Equals or NotEquals, the ARN must be in\n one of the following formats. To log events on all objects in an S3 access point, we\n recommend that you use only the access point ARN, don’t include the object path, and\n use the StartsWith or NotStartsWith operators.

    \n
      \n
    • \n

      \n arn::s3:::accesspoint/\n

      \n
    • \n
    • \n

      \n arn::s3:::accesspoint//object/\n

      \n
    • \n
    \n

    When resources.type equals\n AWS::S3ObjectLambda::AccessPoint, and the operator is set to\n Equals or NotEquals, the ARN must be in the following\n format:

    \n
      \n
    • \n

      \n arn::s3-object-lambda:::accesspoint/\n

      \n
    • \n
    \n

    When resources.type equals AWS::S3Outposts::Object, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::s3-outposts:::\n

      \n
    • \n
    \n

    When resources.type equals AWS::SageMaker::Endpoint, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::sagemaker:::endpoint/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::sagemaker:::experiment-trial-component/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::sagemaker:::feature-group/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SCN::Instance, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::scn:::instance/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ServiceDiscovery::Namespace, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::servicediscovery:::namespace/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ServiceDiscovery::Service, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::servicediscovery:::service/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SNS::PlatformEndpoint,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::sns:::endpoint///\n

      \n
    • \n
    \n

    When resources.type equals AWS::SNS::Topic,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::sns:::\n

      \n
    • \n
    \n

    When resources.type equals AWS::SQS::Queue,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::sqs:::\n

      \n
    • \n
    \n

    When resources.type equals AWS::SSM::ManagedNode, and\n the operator is set to Equals or NotEquals, the ARN must be\n in one of the following formats:

    \n
      \n
    • \n

      \n arn::ssm:::managed-instance/\n

      \n
    • \n
    • \n

      \n arn::ec2:::instance/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SSMMessages::ControlChannel, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::ssmmessages:::control-channel/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SWF::Domain,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::swf:::domain/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ThinClient::Device, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::thinclient:::device/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ThinClient::Environment, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::thinclient:::environment/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Timestream::Database,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::timestream:::database/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Timestream::Table,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::timestream:::database//table/\n

      \n
    • \n
    \n

    When resources.type equals AWS::VerifiedPermissions::PolicyStore, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::verifiedpermissions:::policy-store/\n

      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } }, @@ -637,7 +637,7 @@ "code": "CloudTrailAccessNotEnabled", "httpResponseCode": 400 }, - "smithy.api#documentation": "

This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see Enabling Trusted Access with Other Amazon Web Services Services and Prepare For Creating a Trail For Your Organization.

", + "smithy.api#documentation": "

This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see How to enable or disable trusted access in the Organizations User Guide and Prepare For Creating a Trail For Your Organization in the CloudTrail User Guide.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -2121,7 +2121,7 @@ "AdvancedEventSelectors": { "target": "com.amazonaws.cloudtrail#AdvancedEventSelectors", "traits": { - "smithy.api#documentation": "

The advanced event selectors to use to select the events for the data store. You can\n configure up to five advanced event selectors for each event data store.

\n

For more information about how to use advanced event selectors to log CloudTrail\n events, see Log events by using advanced event selectors in the CloudTrail User Guide.

\n

For more information about how to use advanced event selectors to include Config configuration items in your event data store, see Create an event data store for Config configuration\n items in the CloudTrail User Guide.

\n

For more information about how to use advanced event selectors to include non-Amazon Web Services events in your event data store, see Create an integration to log events from outside Amazon Web Services in the CloudTrail User Guide.

" + "smithy.api#documentation": "

The advanced event selectors to use to select the events for the data store. You can\n configure up to five advanced event selectors for each event data store.

\n

For more information about how to use advanced event selectors to log CloudTrail\n events, see Log events by using advanced event selectors in the CloudTrail User Guide.

\n

For more information about how to use advanced event selectors to include Config configuration items in your event data store, see Create an event data store for Config configuration\n items in the CloudTrail User Guide.

\n

For more information about how to use advanced event selectors to include events outside of Amazon Web Services events in your event data store, see Create an integration to log events from outside Amazon Web Services in the CloudTrail User Guide.

" } }, "MultiRegionEnabled": { @@ -2384,14 +2384,14 @@ "S3BucketName": { "target": "com.amazonaws.cloudtrail#String", "traits": { - "smithy.api#documentation": "

Specifies the name of the Amazon S3 bucket designated for publishing log files.\n See Amazon S3\n Bucket Naming Requirements.

", + "smithy.api#documentation": "

Specifies the name of the Amazon S3 bucket designated for publishing log files. \n For information about bucket naming rules, see Bucket naming rules \n in the Amazon Simple Storage Service User Guide.\n

", "smithy.api#required": {} } }, "S3KeyPrefix": { "target": "com.amazonaws.cloudtrail#String", "traits": { - "smithy.api#documentation": "

Specifies the Amazon S3 key prefix that comes after the name of the bucket you\n have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200\n characters.

" + "smithy.api#documentation": "

Specifies the Amazon S3 key prefix that comes after the name of the bucket you\n have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200\n characters.

" } }, "SnsTopicName": { @@ -2469,7 +2469,7 @@ "S3KeyPrefix": { "target": "com.amazonaws.cloudtrail#String", "traits": { - "smithy.api#documentation": "

Specifies the Amazon S3 key prefix that comes after the name of the bucket you\n have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.

" + "smithy.api#documentation": "

Specifies the Amazon S3 key prefix that comes after the name of the bucket you\n have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.

" } }, "SnsTopicName": { @@ -2551,12 +2551,12 @@ "Values": { "target": "com.amazonaws.cloudtrail#DataResourceValues", "traits": { - "smithy.api#documentation": "

An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified\n objects.

\n
    \n
  • \n

    To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3.

    \n \n

    This also enables logging of data event activity performed by any user or role\n in your Amazon Web Services account, even if that activity is performed on a bucket\n that belongs to another Amazon Web Services account.

    \n
    \n
  • \n
  • \n

    To log data events for all objects in an S3 bucket, specify the bucket and an\n empty object prefix such as arn:aws:s3:::bucket-1/. The trail logs data\n events for all objects in this S3 bucket.

    \n
  • \n
  • \n

    To log data events for specific objects, specify the S3 bucket and object prefix\n such as arn:aws:s3:::bucket-1/example-images. The trail logs data events\n for objects in this S3 bucket that match the prefix.

    \n
  • \n
  • \n

    To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda.

    \n \n

    This also enables logging of Invoke activity performed by any user\n or role in your Amazon Web Services account, even if that activity is performed on\n a function that belongs to another Amazon Web Services account.

    \n
    \n
  • \n
  • \n

    To log data events for a specific Lambda function, specify the\n function ARN.

    \n \n

    Lambda function ARNs are exact. For example, if you specify a\n function ARN\n arn:aws:lambda:us-west-2:111111111111:function:helloworld,\n data events will only be logged for\n arn:aws:lambda:us-west-2:111111111111:function:helloworld.\n They will not be logged for\n arn:aws:lambda:us-west-2:111111111111:function:helloworld2.

    \n
    \n
  • \n
  • \n

    To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb.

    \n
  • \n
" + "smithy.api#documentation": "

An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified\n resource type.

\n
    \n
  • \n

    To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3.

    \n \n

    This also enables logging of data event activity performed by any user or role\n in your Amazon Web Services account, even if that activity is performed on a bucket\n that belongs to another Amazon Web Services account.

    \n
    \n
  • \n
  • \n

    To log data events for all objects in an S3 bucket, specify the bucket and an\n empty object prefix such as arn:aws:s3:::bucket-1/. The trail logs data\n events for all objects in this S3 bucket.

    \n
  • \n
  • \n

    To log data events for specific objects, specify the S3 bucket and object prefix\n such as arn:aws:s3:::bucket-1/example-images. The trail logs data events\n for objects in this S3 bucket that match the prefix.

    \n
  • \n
  • \n

    To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda.

    \n \n

    This also enables logging of Invoke activity performed by any user\n or role in your Amazon Web Services account, even if that activity is performed on\n a function that belongs to another Amazon Web Services account.

    \n
    \n
  • \n
  • \n

    To log data events for a specific Lambda function, specify the\n function ARN.

    \n \n

    Lambda function ARNs are exact. For example, if you specify a\n function ARN\n arn:aws:lambda:us-west-2:111111111111:function:helloworld,\n data events will only be logged for\n arn:aws:lambda:us-west-2:111111111111:function:helloworld.\n They will not be logged for\n arn:aws:lambda:us-west-2:111111111111:function:helloworld2.

    \n
    \n
  • \n
  • \n

    To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb.

    \n
  • \n
" } } }, "traits": { - "smithy.api#documentation": "

The Amazon S3 buckets, Lambda functions, or Amazon DynamoDB tables that you specify in your event selectors for your trail to log data events. Data\n events provide information about the resource operations performed on or within a resource\n itself. These are also known as data plane operations. You can specify up to 250 data\n resources for a trail.

\n \n

The total number of allowed data resources is 250. This number can be distributed\n between 1 and 5 event selectors, but the total cannot exceed 250 across all\n selectors for the trail.

\n

If you are using advanced event selectors, the maximum total number of values for\n all conditions, across all advanced event selectors for the trail, is 500.

\n
\n

The following example demonstrates how logging works when you configure logging of all\n data events for an S3 bucket named bucket-1. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read\n and Write data events.

\n
    \n
  1. \n

    A user uploads an image file to bucket-1.

    \n
  2. \n
  3. \n

    The PutObject API operation is an Amazon S3 object-level API.\n It is recorded as a data event in CloudTrail. Because the CloudTrail\n user specified an S3 bucket with an empty prefix, events that occur on any object in\n that bucket are logged. The trail processes and logs the event.

    \n
  4. \n
  5. \n

    A user uploads an object to an Amazon S3 bucket named\n arn:aws:s3:::bucket-2.

    \n
  6. \n
  7. \n

    The PutObject API operation occurred for an object in an S3 bucket\n that the CloudTrail user didn't specify for the trail. The trail doesn’t log\n the event.

    \n
  8. \n
\n

The following example demonstrates how logging works when you configure logging of\n Lambda data events for a Lambda function named\n MyLambdaFunction, but not for all Lambda\n functions.

\n
    \n
  1. \n

    A user runs a script that includes a call to the\n MyLambdaFunction function and the\n MyOtherLambdaFunction function.

    \n
  2. \n
  3. \n

    The Invoke API operation on MyLambdaFunction is\n an Lambda API. It is recorded as a data event in CloudTrail.\n Because the CloudTrail user specified logging data events for\n MyLambdaFunction, any invocations of that function are\n logged. The trail processes and logs the event.

    \n
  4. \n
  5. \n

    The Invoke API operation on\n MyOtherLambdaFunction is an Lambda API. Because\n the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke operation for\n MyOtherLambdaFunction does not match the function specified\n for the trail. The trail doesn’t log the event.

    \n
  6. \n
" + "smithy.api#documentation": "

Data events provide information about the resource operations performed on or within a resource\n itself. These are also known as data plane operations. You can specify up to 250 data\n resources for a trail.

\n

Configure the DataResource to specify the resource type and resource ARNs for which you want to log data events.

\n

You can specify the following resource types in your event selectors for your trail:

\n
    \n
  • \n

    \n AWS::DynamoDB::Table\n

    \n
  • \n
  • \n

    \n AWS::Lambda::Function\n

    \n
  • \n
  • \n

    \n AWS::S3::Object\n

    \n
  • \n
\n \n

The total number of allowed data resources is 250. This number can be distributed\n between 1 and 5 event selectors, but the total cannot exceed 250 across all\n selectors for the trail.

\n

If you are using advanced event selectors, the maximum total number of values for\n all conditions, across all advanced event selectors for the trail, is 500.

\n
\n

The following example demonstrates how logging works when you configure logging of all\n data events for an S3 bucket named bucket-1. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read\n and Write data events.

\n
    \n
  1. \n

    A user uploads an image file to bucket-1.

    \n
  2. \n
  3. \n

    The PutObject API operation is an Amazon S3 object-level API.\n It is recorded as a data event in CloudTrail. Because the CloudTrail\n user specified an S3 bucket with an empty prefix, events that occur on any object in\n that bucket are logged. The trail processes and logs the event.

    \n
  4. \n
  5. \n

    A user uploads an object to an Amazon S3 bucket named\n arn:aws:s3:::bucket-2.

    \n
  6. \n
  7. \n

    The PutObject API operation occurred for an object in an S3 bucket\n that the CloudTrail user didn't specify for the trail. The trail doesn’t log\n the event.

    \n
  8. \n
\n

The following example demonstrates how logging works when you configure logging of\n Lambda data events for a Lambda function named\n MyLambdaFunction, but not for all Lambda\n functions.

\n
    \n
  1. \n

    A user runs a script that includes a call to the\n MyLambdaFunction function and the\n MyOtherLambdaFunction function.

    \n
  2. \n
  3. \n

    The Invoke API operation on MyLambdaFunction is\n an Lambda API. It is recorded as a data event in CloudTrail.\n Because the CloudTrail user specified logging data events for\n MyLambdaFunction, any invocations of that function are\n logged. The trail processes and logs the event.

    \n
  4. \n
  5. \n

    The Invoke API operation on\n MyOtherLambdaFunction is an Lambda API. Because\n the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke operation for\n MyOtherLambdaFunction does not match the function specified\n for the trail. The trail doesn’t log the event.

    \n
  6. \n
" } }, "com.amazonaws.cloudtrail#DataResourceValues": { @@ -3133,7 +3133,20 @@ ], "traits": { "smithy.api#documentation": "

Retrieves settings for one or more trails associated with the current Region for your\n account.

", - "smithy.api#idempotent": {} + "smithy.api#idempotent": {}, + "smithy.test#smokeTests": [ + { + "id": "DescribeTrailsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.cloudtrail#DescribeTrailsRequest": { @@ -3606,7 +3619,7 @@ } }, "traits": { - "smithy.api#documentation": "

A storage lake of event data against which you can run complex SQL-based queries. An\n event data store can include events that you have logged on your account. To select events for an event data\n store, use advanced event selectors.

" + "smithy.api#documentation": "

A storage lake of event data against which you can run complex SQL-based queries. An\n event data store can include events that you have logged on your account. To select events for an event data\n store, use advanced event selectors.

" } }, "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException": { @@ -4155,6 +4168,12 @@ "traits": { "smithy.api#documentation": "

\n If Lake query federation is enabled, provides the ARN of the federation role used to access the resources for the federated event data store.\n

" } + }, + "PartitionKeys": { + "target": "com.amazonaws.cloudtrail#PartitionKeyList", + "traits": { + "smithy.api#documentation": "

The partition keys for the event data store. To improve query performance and efficiency, CloudTrail Lake organizes \n event data into partitions based on values derived from partition keys.

" + } } }, "traits": { @@ -4742,7 +4761,7 @@ "LatestDeliveryError": { "target": "com.amazonaws.cloudtrail#String", "traits": { - "smithy.api#documentation": "

Displays any Amazon S3 error that CloudTrail encountered when attempting\n to deliver log files to the designated bucket. For more information, see Error\n Responses in the Amazon S3 API Reference.

\n \n

This error occurs only when there is a problem with the destination S3 bucket, and\n does not occur for requests that time out. To resolve the issue, create a new bucket,\n and then call UpdateTrail to specify the new bucket; or fix the existing\n objects so that CloudTrail can again write to the bucket.

\n
" + "smithy.api#documentation": "

Displays any Amazon S3 error that CloudTrail encountered when attempting\n to deliver log files to the designated bucket. For more information, see Error\n Responses in the Amazon S3 API Reference.

\n \n

This error occurs only when there is a problem with the destination S3 bucket, and\n does not occur for requests that time out. To resolve the issue, \n fix the bucket policy so that CloudTrail \n can write to the bucket; or create a new bucket and call UpdateTrail to specify the new bucket.

\n
" } }, "LatestNotificationError": { @@ -4796,7 +4815,7 @@ "LatestDigestDeliveryError": { "target": "com.amazonaws.cloudtrail#String", "traits": { - "smithy.api#documentation": "

Displays any Amazon S3 error that CloudTrail encountered when attempting\n to deliver a digest file to the designated bucket. For more information, see Error\n Responses in the Amazon S3 API Reference.

\n \n

This error occurs only when there is a problem with the destination S3 bucket, and\n does not occur for requests that time out. To resolve the issue, create a new bucket,\n and then call UpdateTrail to specify the new bucket; or fix the existing\n objects so that CloudTrail can again write to the bucket.

\n
" + "smithy.api#documentation": "

Displays any Amazon S3 error that CloudTrail encountered when attempting\n to deliver a digest file to the designated bucket. For more information, see Error\n Responses in the Amazon S3 API Reference.

\n \n

This error occurs only when there is a problem with the destination S3 bucket, and\n does not occur for requests that time out. To resolve the issue, \n fix the bucket policy so that CloudTrail \n can write to the bucket; or create a new bucket and call UpdateTrail to specify the new bucket.

\n
" } }, "LatestDeliveryAttemptTime": { @@ -7112,7 +7131,7 @@ "code": "NotOrganizationMasterAccount", "httpResponseCode": 400 }, - "smithy.api#documentation": "

This exception is thrown when the Amazon Web Services account making the request to\n create or update an organization trail or event data store is not the management account\n for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Create an event data store.

", + "smithy.api#documentation": "

This exception is thrown when the Amazon Web Services account making the request to\n create or update an organization trail or event data store is not the management account\n for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Organization event data stores.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -7208,6 +7227,60 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.cloudtrail#PartitionKey": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.cloudtrail#PartitionKeyName", + "traits": { + "smithy.api#documentation": "

The name of the partition key.

", + "smithy.api#required": {} + } + }, + "Type": { + "target": "com.amazonaws.cloudtrail#PartitionKeyType", + "traits": { + "smithy.api#documentation": "

The data type of the partition key. For example, bigint or string.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about a partition key for an event data store.

" + } + }, + "com.amazonaws.cloudtrail#PartitionKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.cloudtrail#PartitionKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2 + } + } + }, + "com.amazonaws.cloudtrail#PartitionKeyName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$" + } + }, + "com.amazonaws.cloudtrail#PartitionKeyType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 255 + }, + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$" + } + }, "com.amazonaws.cloudtrail#PublicKey": { "type": "structure", "members": { @@ -7293,7 +7366,7 @@ } ], "traits": { - "smithy.api#documentation": "

Configures an event selector or advanced event selectors for your trail. Use event\n selectors or advanced event selectors to specify management and data event settings for\n your trail. If you want your trail to log Insights events, be sure the event selector \n enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events for trails in the CloudTrail User Guide.\n By default, trails created without specific event selectors are configured to\n log all read and write management events, and no data events.

\n

When an event occurs in your account, CloudTrail evaluates the event selectors or\n advanced event selectors in all trails. For each trail, if the event matches any event\n selector, the trail processes and logs the event. If the event doesn't match any event\n selector, the trail doesn't log the event.

\n

Example

\n
    \n
  1. \n

    You create an event selector for a trail and specify that you want write-only\n events.

    \n
  2. \n
  3. \n

    The EC2 GetConsoleOutput and RunInstances API operations\n occur in your account.

    \n
  4. \n
  5. \n

    CloudTrail evaluates whether the events match your event selectors.

    \n
  6. \n
  7. \n

    The RunInstances is a write-only event and it matches your event\n selector. The trail logs the event.

    \n
  8. \n
  9. \n

    The GetConsoleOutput is a read-only event that doesn't match your\n event selector. The trail doesn't log the event.

    \n
  10. \n
\n

The PutEventSelectors operation must be called from the Region in which the\n trail was created; otherwise, an InvalidHomeRegionException exception is\n thrown.

\n

You can configure up to five event selectors for each trail. For more information, see\n Logging management events, Logging\n data events, and Quotas in CloudTrail in the CloudTrail User\n Guide.

\n

You can add advanced event selectors, and conditions for your advanced event selectors,\n up to a maximum of 500 values for all conditions and selectors on a trail. You can use\n either AdvancedEventSelectors or EventSelectors, but not both. If\n you apply AdvancedEventSelectors to a trail, any existing\n EventSelectors are overwritten. For more information about advanced event\n selectors, see Logging data events in the CloudTrail User Guide.

", + "smithy.api#documentation": "

Configures an event selector or advanced event selectors for your trail. Use event\n selectors or advanced event selectors to specify management and data event settings for\n your trail. If you want your trail to log Insights events, be sure the event selector \n enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide.\n By default, trails created without specific event selectors are configured to\n log all read and write management events, and no data events.

\n

When an event occurs in your account, CloudTrail evaluates the event selectors or\n advanced event selectors in all trails. For each trail, if the event matches any event\n selector, the trail processes and logs the event. If the event doesn't match any event\n selector, the trail doesn't log the event.

\n

Example

\n
    \n
  1. \n

    You create an event selector for a trail and specify that you want write-only\n events.

    \n
  2. \n
  3. \n

    The EC2 GetConsoleOutput and RunInstances API operations\n occur in your account.

    \n
  4. \n
  5. \n

    CloudTrail evaluates whether the events match your event selectors.

    \n
  6. \n
  7. \n

    The RunInstances is a write-only event and it matches your event\n selector. The trail logs the event.

    \n
  8. \n
  9. \n

    The GetConsoleOutput is a read-only event that doesn't match your\n event selector. The trail doesn't log the event.

    \n
  10. \n
\n

The PutEventSelectors operation must be called from the Region in which the\n trail was created; otherwise, an InvalidHomeRegionException exception is\n thrown.

\n

You can configure up to five event selectors for each trail. For more information, see\n Logging management events, Logging\n data events, and Quotas in CloudTrail in the CloudTrail User\n Guide.

\n

You can add advanced event selectors, and conditions for your advanced event selectors,\n up to a maximum of 500 values for all conditions and selectors on a trail. You can use\n either AdvancedEventSelectors or EventSelectors, but not both. If\n you apply AdvancedEventSelectors to a trail, any existing\n EventSelectors are overwritten. For more information about advanced event\n selectors, see Logging data events in the CloudTrail User Guide.

", "smithy.api#idempotent": {} } }, @@ -8517,7 +8590,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts an import of logged trail events from a source S3 bucket to a destination event\n data store. By default, CloudTrail only imports events contained in the S3 bucket's\n CloudTrail prefix and the prefixes inside the CloudTrail prefix, and does not check prefixes for other Amazon Web Services\n services. If you want to import CloudTrail events contained in another prefix, you\n must include the prefix in the S3LocationUri. For more considerations about\n importing trail events, see Considerations.

\n

When you start a new import, the Destinations and\n ImportSource parameters are required. Before starting a new import, disable\n any access control lists (ACLs) attached to the source S3 bucket. For more information\n about disabling ACLs, see Controlling ownership of\n objects and disabling ACLs for your bucket.

\n

When you retry an import, the ImportID parameter is required.

\n \n

If the destination event data store is for an organization, you must use the\n management account to import trail events. You cannot use the delegated administrator\n account for the organization.

\n
" + "smithy.api#documentation": "

Starts an import of logged trail events from a source S3 bucket to a destination event\n data store. By default, CloudTrail only imports events contained in the S3 bucket's\n CloudTrail prefix and the prefixes inside the CloudTrail prefix, and does not check prefixes for other Amazon Web Services\n services. If you want to import CloudTrail events contained in another prefix, you\n must include the prefix in the S3LocationUri. For more considerations about\n importing trail events, see Considerations for copying trail events in the CloudTrail User Guide.

\n

When you start a new import, the Destinations and\n ImportSource parameters are required. Before starting a new import, disable\n any access control lists (ACLs) attached to the source S3 bucket. For more information\n about disabling ACLs, see Controlling ownership of\n objects and disabling ACLs for your bucket.

\n

When you retry an import, the ImportID parameter is required.

\n \n

If the destination event data store is for an organization, you must use the\n management account to import trail events. You cannot use the delegated administrator\n account for the organization.

\n
" } }, "com.amazonaws.cloudtrail#StartImportRequest": { @@ -9146,13 +9219,13 @@ "S3BucketName": { "target": "com.amazonaws.cloudtrail#String", "traits": { - "smithy.api#documentation": "

Name of the Amazon S3 bucket into which CloudTrail delivers your trail\n files. See Amazon S3\n Bucket Naming Requirements.

" + "smithy.api#documentation": "

Name of the Amazon S3 bucket into which CloudTrail delivers your trail\n files. See Amazon S3\n Bucket naming rules.

" } }, "S3KeyPrefix": { "target": "com.amazonaws.cloudtrail#String", "traits": { - "smithy.api#documentation": "

Specifies the Amazon S3 key prefix that comes after the name of the bucket you\n have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200\n characters.

" + "smithy.api#documentation": "

Specifies the Amazon S3 key prefix that comes after the name of the bucket you\n have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200\n characters.

" } }, "SnsTopicName": { @@ -9834,13 +9907,13 @@ "S3BucketName": { "target": "com.amazonaws.cloudtrail#String", "traits": { - "smithy.api#documentation": "

Specifies the name of the Amazon S3 bucket designated for publishing log files.\n See Amazon S3\n Bucket Naming Requirements.

" + "smithy.api#documentation": "

Specifies the name of the Amazon S3 bucket designated for publishing log files.\n See Amazon S3\n Bucket naming rules.

" } }, "S3KeyPrefix": { "target": "com.amazonaws.cloudtrail#String", "traits": { - "smithy.api#documentation": "

Specifies the Amazon S3 key prefix that comes after the name of the bucket you\n have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200\n characters.

" + "smithy.api#documentation": "

Specifies the Amazon S3 key prefix that comes after the name of the bucket you\n have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200\n characters.

" } }, "SnsTopicName": { @@ -9915,7 +9988,7 @@ "S3KeyPrefix": { "target": "com.amazonaws.cloudtrail#String", "traits": { - "smithy.api#documentation": "

Specifies the Amazon S3 key prefix that comes after the name of the bucket you\n have designated for log file delivery. For more information, see Finding Your IAM Log Files.

" + "smithy.api#documentation": "

Specifies the Amazon S3 key prefix that comes after the name of the bucket you\n have designated for log file delivery. For more information, see Finding Your IAM Log Files.

" } }, "SnsTopicName": { diff --git a/models/codeartifact.json b/models/codeartifact.json index 5149e07c29..7605c3772a 100644 --- a/models/codeartifact.json +++ b/models/codeartifact.json @@ -226,7 +226,7 @@ "externalConnection": { "target": "com.amazonaws.codeartifact#ExternalConnectionName", "traits": { - "smithy.api#documentation": "

\n The name of the external connection to add to the repository. The following values are supported:\n

\n
    \n
  • \n

    \n public:npmjs - for the npm public repository.\n

    \n
  • \n
  • \n

    \n public:nuget-org - for the NuGet Gallery.\n

    \n
  • \n
  • \n

    \n public:pypi - for the Python Package Index.\n

    \n
  • \n
  • \n

    \n public:maven-central - for Maven Central.\n

    \n
  • \n
  • \n

    \n public:maven-googleandroid - for the Google Android repository.\n

    \n
  • \n
  • \n

    \n public:maven-gradleplugins - for the Gradle plugins repository.\n

    \n
  • \n
  • \n

    \n public:maven-commonsware - for the CommonsWare Android repository.\n

    \n
  • \n
  • \n

    \n public:maven-clojars - for the Clojars repository.

    \n
  • \n
", + "smithy.api#documentation": "

\n The name of the external connection to add to the repository. The following values are supported:\n

\n
    \n
  • \n

    \n public:npmjs - for the npm public repository.\n

    \n
  • \n
  • \n

    \n public:nuget-org - for the NuGet Gallery.\n

    \n
  • \n
  • \n

    \n public:pypi - for the Python Package Index.\n

    \n
  • \n
  • \n

    \n public:maven-central - for Maven Central.\n

    \n
  • \n
  • \n

    \n public:maven-googleandroid - for the Google Android repository.\n

    \n
  • \n
  • \n

    \n public:maven-gradleplugins - for the Gradle plugins repository.\n

    \n
  • \n
  • \n

    \n public:maven-commonsware - for the CommonsWare Android repository.\n

    \n
  • \n
  • \n

    \n public:maven-clojars - for the Clojars repository.

    \n
  • \n
  • \n

    \n public:ruby-gems-org - for RubyGems.org.

    \n
  • \n
  • \n

    \n public:crates-io - for Crates.io.

    \n
  • \n
", "smithy.api#httpQuery": "external-connection", "smithy.api#required": {} } @@ -262,7 +262,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the associated package. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" + "smithy.api#documentation": "

The namespace of the associated package. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" } }, "package": { @@ -461,7 +461,7 @@ "name": "codeartifact" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

CodeArtifact is a fully managed artifact repository compatible with language-native\n package managers and build tools such as npm, Apache Maven, pip, and dotnet. You can use CodeArtifact to\n share packages with development teams and pull packages. Packages can be pulled from both\n public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact\n repository and another repository, which effectively merges their contents from the point of\n view of a package manager client.

\n

\n CodeArtifact concepts\n

\n
    \n
  • \n

    \n Repository: A CodeArtifact repository contains a set of package\n versions, each of which maps to a set of assets, or files. Repositories are\n polyglot, so a single repository can contain packages of any supported type. Each\n repository exposes endpoints for fetching and publishing packages using tools such as the \n npm\n CLI or the Maven CLI (\n mvn\n ). For a list of supported package managers, see the \n CodeArtifact User Guide.

    \n
  • \n
  • \n

    \n Domain: Repositories are aggregated into a higher-level entity known as a\n domain. All package assets and metadata are stored in the domain,\n but are consumed through repositories. A given package asset, such as a Maven JAR file, is\n stored once per domain, no matter how many repositories it's present in. All of the assets\n and metadata in a domain are encrypted with the same customer master key (CMK) stored in\n Key Management Service (KMS).

    \n

    Each repository is a member of a single domain and can't be moved to a\n different domain.

    \n

    The domain allows organizational policy to be applied across multiple\n repositories, such as which accounts can access repositories in the domain, and\n which public repositories can be used as sources of packages.

    \n

    Although an organization can have multiple domains, we recommend a single production\n domain that contains all published artifacts so that teams can find and share packages\n across their organization.

    \n
  • \n
  • \n

    \n Package: A package is a bundle of software and the metadata required to\n resolve dependencies and install the software. CodeArtifact supports npm, PyPI, Maven, NuGet, Swift, Ruby, and generic package formats. \n For more information about the supported package formats and how to use CodeArtifact with them, see the \n CodeArtifact User Guide.

    \n

    In CodeArtifact, a package consists of:

    \n
      \n
    • \n

      A name (for example, webpack is the name of a\n popular npm package)

      \n
    • \n
    • \n

      An optional namespace (for example, @types in @types/node)

      \n
    • \n
    • \n

      A set of versions (for example, 1.0.0, 1.0.1,\n 1.0.2, etc.)

      \n
    • \n
    • \n

      Package-level metadata (for example, npm tags)

      \n
    • \n
    \n
  • \n
  • \n

    \n Package group: A group of packages that match a specified definition. Package \n groups can be used to apply configuration to multiple packages that match a defined pattern using \n package format, package namespace, and package name. You can use package groups to more conveniently \n configure package origin controls for multiple packages. Package origin controls are used to block or allow ingestion or publishing \n of new package versions, which protects users from malicious actions known as dependency substitution attacks.

    \n
  • \n
  • \n

    \n Package version: A version of a package, such as @types/node 12.6.9. The version number\n format and semantics vary for different package formats. For example, npm package versions\n must conform to the Semantic Versioning\n specification. In CodeArtifact, a package version consists of the version identifier,\n metadata at the package version level, and a set of assets.

    \n
  • \n
  • \n

    \n Upstream repository: One repository is upstream of another when the package versions in\n it can be accessed from the repository endpoint of the downstream repository, effectively\n merging the contents of the two repositories from the point of view of a client. CodeArtifact\n allows creating an upstream relationship between two repositories.

    \n
  • \n
  • \n

    \n Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm\n .tgz file or Maven POM and JAR files.

    \n
  • \n
\n

\n CodeArtifact supported API operations\n

\n
    \n
  • \n

    \n AssociateExternalConnection: Adds an existing external \n connection to a repository.\n

    \n
  • \n
  • \n

    \n CopyPackageVersions: Copies package versions from one \n repository to another repository in the same domain.

    \n
  • \n
  • \n

    \n CreateDomain: Creates a domain.

    \n
  • \n
  • \n

    \n CreatePackageGroup: Creates a package group.

    \n
  • \n
  • \n

    \n CreateRepository: Creates a CodeArtifact repository in a domain.

    \n
  • \n
  • \n

    \n DeleteDomain: Deletes a domain. You cannot delete a domain that contains\n repositories.

    \n
  • \n
  • \n

    \n DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain.

    \n
  • \n
  • \n

    \n DeletePackage: Deletes a package and all associated package versions.

    \n
  • \n
  • \n

    \n DeletePackageGroup: Deletes a package group. Does not delete packages or package versions that are associated with a package group.

    \n
  • \n
  • \n

    \n DeletePackageVersions: Deletes versions of a package. After a package has\n been deleted, it can be republished, but its assets and metadata cannot be restored\n because they have been permanently removed from storage.

    \n
  • \n
  • \n

    \n DeleteRepository: Deletes a repository. \n

    \n
  • \n
  • \n

    \n DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository.

    \n
  • \n
  • \n

    \n DescribeDomain: Returns a DomainDescription object that\n contains information about the requested domain.

    \n
  • \n
  • \n

    \n DescribePackage: Returns a PackageDescription\n object that contains details about a package.

    \n
  • \n
  • \n

    \n DescribePackageGroup: Returns a PackageGroup\n object that contains details about a package group.

    \n
  • \n
  • \n

    \n DescribePackageVersion: Returns a PackageVersionDescription\n object that contains details about a package version.

    \n
  • \n
  • \n

    \n DescribeRepository: Returns a RepositoryDescription object\n that contains detailed information about the requested repository.

    \n
  • \n
  • \n

    \n DisposePackageVersions: Disposes versions of a package. A package version\n with the status Disposed cannot be restored because they have been\n permanently removed from storage.

    \n
  • \n
  • \n

    \n DisassociateExternalConnection: Removes an existing external connection from a repository. \n

    \n
  • \n
  • \n

    \n GetAssociatedPackageGroup: Returns the most closely associated package group to the specified package.

    \n
  • \n
  • \n

    \n GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. \n The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours.

    \n
  • \n
  • \n

    \n GetDomainPermissionsPolicy: Returns the policy of a resource\n that is attached to the specified domain.

    \n
  • \n
  • \n

    \n GetPackageVersionAsset: Returns the contents of an asset that is in a package version.

    \n
  • \n
  • \n

    \n GetPackageVersionReadme: Gets the readme file or descriptive text for a package version.

    \n
  • \n
  • \n

    \n GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each \n package format:\n

    \n
      \n
    • \n

      \n generic\n

      \n
    • \n
    • \n

      \n maven\n

      \n
    • \n
    • \n

      \n npm\n

      \n
    • \n
    • \n

      \n nuget\n

      \n
    • \n
    • \n

      \n pypi\n

      \n
    • \n
    • \n

      \n ruby\n

      \n
    • \n
    • \n

      \n swift\n

      \n
    • \n
    \n
  • \n
  • \n

    \n GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository. \n

    \n
  • \n
  • \n

    \n ListAllowedRepositoriesForGroup: Lists the allowed repositories for a package group that has origin configuration set to ALLOW_SPECIFIC_REPOSITORIES.

    \n
  • \n
  • \n

    \n ListAssociatedPackages: Returns a list of packages associated with the requested package group.

    \n
  • \n
  • \n

    \n ListDomains: Returns a list of DomainSummary objects. Each\n returned DomainSummary object contains information about a domain.

    \n
  • \n
  • \n

    \n ListPackages: Lists the packages in a repository.

    \n
  • \n
  • \n

    \n ListPackageGroups: Returns a list of package groups in the requested domain.

    \n
  • \n
  • \n

    \n ListPackageVersionAssets: Lists the assets for a given package version.

    \n
  • \n
  • \n

    \n ListPackageVersionDependencies: Returns a list of the direct dependencies for a\n package version.

    \n
  • \n
  • \n

    \n ListPackageVersions: Returns a list of package versions for a specified\n package in a repository.

    \n
  • \n
  • \n

    \n ListRepositories: Returns a list of repositories owned by the Amazon Web Services account that called this method.

    \n
  • \n
  • \n

    \n ListRepositoriesInDomain: Returns a list of the repositories in a domain.

    \n
  • \n
  • \n

    \n ListSubPackageGroups: Returns a list of direct children of the specified package group.

    \n
  • \n
  • \n

    \n PublishPackageVersion: Creates a new package version containing one or more assets.

    \n
  • \n
  • \n

    \n PutDomainPermissionsPolicy: Attaches a resource policy to a domain.

    \n
  • \n
  • \n

    \n PutPackageOriginConfiguration: Sets the package origin configuration for a package, which determine \n how new versions of the package can be added to a specific repository.

    \n
  • \n
  • \n

    \n PutRepositoryPermissionsPolicy: Sets the resource policy on a repository\n that specifies permissions to access it.

    \n
  • \n
  • \n

    \n UpdatePackageGroup: Updates a package group. This API cannot be used to update a package group's origin configuration or pattern.

    \n
  • \n
  • \n

    \n UpdatePackageGroupOriginConfiguration: Updates the package origin configuration for a package group.

    \n
  • \n
  • \n

    \n UpdatePackageVersionsStatus: Updates the status of one or more versions of a package.

    \n
  • \n
  • \n

    \n UpdateRepository: Updates the properties of a repository.

    \n
  • \n
", + "smithy.api#documentation": "

CodeArtifact is a fully managed artifact repository compatible with language-native\n package managers and build tools such as npm, Apache Maven, pip, and dotnet. You can use CodeArtifact to\n share packages with development teams and pull packages. Packages can be pulled from both\n public and CodeArtifact repositories. You can also create an upstream relationship between a CodeArtifact\n repository and another repository, which effectively merges their contents from the point of\n view of a package manager client.

\n

\n CodeArtifact concepts\n

\n
    \n
  • \n

    \n Repository: A CodeArtifact repository contains a set of package\n versions, each of which maps to a set of assets, or files. Repositories are\n polyglot, so a single repository can contain packages of any supported type. Each\n repository exposes endpoints for fetching and publishing packages using tools such as the \n npm\n CLI or the Maven CLI (\n mvn\n ). For a list of supported package managers, see the \n CodeArtifact User Guide.

    \n
  • \n
  • \n

    \n Domain: Repositories are aggregated into a higher-level entity known as a\n domain. All package assets and metadata are stored in the domain,\n but are consumed through repositories. A given package asset, such as a Maven JAR file, is\n stored once per domain, no matter how many repositories it's present in. All of the assets\n and metadata in a domain are encrypted with the same customer master key (CMK) stored in\n Key Management Service (KMS).

    \n

    Each repository is a member of a single domain and can't be moved to a\n different domain.

    \n

    The domain allows organizational policy to be applied across multiple\n repositories, such as which accounts can access repositories in the domain, and\n which public repositories can be used as sources of packages.

    \n

    Although an organization can have multiple domains, we recommend a single production\n domain that contains all published artifacts so that teams can find and share packages\n across their organization.

    \n
  • \n
  • \n

    \n Package: A package is a bundle of software and the metadata required to\n resolve dependencies and install the software. CodeArtifact supports npm, PyPI, Maven, NuGet, Swift, Ruby, Cargo, and generic package formats. \n For more information about the supported package formats and how to use CodeArtifact with them, see the \n CodeArtifact User Guide.

    \n

    In CodeArtifact, a package consists of:

    \n
      \n
    • \n

      A name (for example, webpack is the name of a\n popular npm package)

      \n
    • \n
    • \n

      An optional namespace (for example, @types in @types/node)

      \n
    • \n
    • \n

      A set of versions (for example, 1.0.0, 1.0.1,\n 1.0.2, etc.)

      \n
    • \n
    • \n

      Package-level metadata (for example, npm tags)

      \n
    • \n
    \n
  • \n
  • \n

    \n Package group: A group of packages that match a specified definition. Package \n groups can be used to apply configuration to multiple packages that match a defined pattern using \n package format, package namespace, and package name. You can use package groups to more conveniently \n configure package origin controls for multiple packages. Package origin controls are used to block or allow ingestion or publishing \n of new package versions, which protects users from malicious actions known as dependency substitution attacks.

    \n
  • \n
  • \n

    \n Package version: A version of a package, such as @types/node 12.6.9. The version number\n format and semantics vary for different package formats. For example, npm package versions\n must conform to the Semantic Versioning\n specification. In CodeArtifact, a package version consists of the version identifier,\n metadata at the package version level, and a set of assets.

    \n
  • \n
  • \n

    \n Upstream repository: One repository is upstream of another when the package versions in\n it can be accessed from the repository endpoint of the downstream repository, effectively\n merging the contents of the two repositories from the point of view of a client. CodeArtifact\n allows creating an upstream relationship between two repositories.

    \n
  • \n
  • \n

    \n Asset: An individual file stored in CodeArtifact associated with a package version, such as an npm\n .tgz file or Maven POM and JAR files.

    \n
  • \n
\n

\n CodeArtifact supported API operations\n

\n
    \n
  • \n

    \n AssociateExternalConnection: Adds an existing external \n connection to a repository.\n

    \n
  • \n
  • \n

    \n CopyPackageVersions: Copies package versions from one \n repository to another repository in the same domain.

    \n
  • \n
  • \n

    \n CreateDomain: Creates a domain.

    \n
  • \n
  • \n

    \n CreatePackageGroup: Creates a package group.

    \n
  • \n
  • \n

    \n CreateRepository: Creates a CodeArtifact repository in a domain.

    \n
  • \n
  • \n

    \n DeleteDomain: Deletes a domain. You cannot delete a domain that contains\n repositories.

    \n
  • \n
  • \n

    \n DeleteDomainPermissionsPolicy: Deletes the resource policy that is set on a domain.

    \n
  • \n
  • \n

    \n DeletePackage: Deletes a package and all associated package versions.

    \n
  • \n
  • \n

    \n DeletePackageGroup: Deletes a package group. Does not delete packages or package versions that are associated with a package group.

    \n
  • \n
  • \n

    \n DeletePackageVersions: Deletes versions of a package. After a package has\n been deleted, it can be republished, but its assets and metadata cannot be restored\n because they have been permanently removed from storage.

    \n
  • \n
  • \n

    \n DeleteRepository: Deletes a repository. \n

    \n
  • \n
  • \n

    \n DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a repository.

    \n
  • \n
  • \n

    \n DescribeDomain: Returns a DomainDescription object that\n contains information about the requested domain.

    \n
  • \n
  • \n

    \n DescribePackage: Returns a PackageDescription\n object that contains details about a package.

    \n
  • \n
  • \n

    \n DescribePackageGroup: Returns a PackageGroup\n object that contains details about a package group.

    \n
  • \n
  • \n

    \n DescribePackageVersion: Returns a PackageVersionDescription\n object that contains details about a package version.

    \n
  • \n
  • \n

    \n DescribeRepository: Returns a RepositoryDescription object\n that contains detailed information about the requested repository.

    \n
  • \n
  • \n

    \n DisposePackageVersions: Disposes versions of a package. A package version\n with the status Disposed cannot be restored because they have been\n permanently removed from storage.

    \n
  • \n
  • \n

    \n DisassociateExternalConnection: Removes an existing external connection from a repository. \n

    \n
  • \n
  • \n

    \n GetAssociatedPackageGroup: Returns the most closely associated package group to the specified package.

    \n
  • \n
  • \n

    \n GetAuthorizationToken: Generates a temporary authorization token for accessing repositories in the domain. The token expires the authorization period has passed. \n The default authorization period is 12 hours and can be customized to any length with a maximum of 12 hours.

    \n
  • \n
  • \n

    \n GetDomainPermissionsPolicy: Returns the policy of a resource\n that is attached to the specified domain.

    \n
  • \n
  • \n

    \n GetPackageVersionAsset: Returns the contents of an asset that is in a package version.

    \n
  • \n
  • \n

    \n GetPackageVersionReadme: Gets the readme file or descriptive text for a package version.

    \n
  • \n
  • \n

    \n GetRepositoryEndpoint: Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each \n package format:\n

    \n
      \n
    • \n

      \n cargo\n

      \n
    • \n
    • \n

      \n generic\n

      \n
    • \n
    • \n

      \n maven\n

      \n
    • \n
    • \n

      \n npm\n

      \n
    • \n
    • \n

      \n nuget\n

      \n
    • \n
    • \n

      \n pypi\n

      \n
    • \n
    • \n

      \n ruby\n

      \n
    • \n
    • \n

      \n swift\n

      \n
    • \n
    \n
  • \n
  • \n

    \n GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a repository. \n

    \n
  • \n
  • \n

    \n ListAllowedRepositoriesForGroup: Lists the allowed repositories for a package group that has origin configuration set to ALLOW_SPECIFIC_REPOSITORIES.

    \n
  • \n
  • \n

    \n ListAssociatedPackages: Returns a list of packages associated with the requested package group.

    \n
  • \n
  • \n

    \n ListDomains: Returns a list of DomainSummary objects. Each\n returned DomainSummary object contains information about a domain.

    \n
  • \n
  • \n

    \n ListPackages: Lists the packages in a repository.

    \n
  • \n
  • \n

    \n ListPackageGroups: Returns a list of package groups in the requested domain.

    \n
  • \n
  • \n

    \n ListPackageVersionAssets: Lists the assets for a given package version.

    \n
  • \n
  • \n

    \n ListPackageVersionDependencies: Returns a list of the direct dependencies for a\n package version.

    \n
  • \n
  • \n

    \n ListPackageVersions: Returns a list of package versions for a specified\n package in a repository.

    \n
  • \n
  • \n

    \n ListRepositories: Returns a list of repositories owned by the Amazon Web Services account that called this method.

    \n
  • \n
  • \n

    \n ListRepositoriesInDomain: Returns a list of the repositories in a domain.

    \n
  • \n
  • \n

    \n ListSubPackageGroups: Returns a list of direct children of the specified package group.

    \n
  • \n
  • \n

    \n PublishPackageVersion: Creates a new package version containing one or more assets.

    \n
  • \n
  • \n

    \n PutDomainPermissionsPolicy: Attaches a resource policy to a domain.

    \n
  • \n
  • \n

    \n PutPackageOriginConfiguration: Sets the package origin configuration for a package, which determine \n how new versions of the package can be added to a specific repository.

    \n
  • \n
  • \n

    \n PutRepositoryPermissionsPolicy: Sets the resource policy on a repository\n that specifies permissions to access it.

    \n
  • \n
  • \n

    \n UpdatePackageGroup: Updates a package group. This API cannot be used to update a package group's origin configuration or pattern.

    \n
  • \n
  • \n

    \n UpdatePackageGroupOriginConfiguration: Updates the package origin configuration for a package group.

    \n
  • \n
  • \n

    \n UpdatePackageVersionsStatus: Updates the status of one or more versions of a package.

    \n
  • \n
  • \n

    \n UpdateRepository: Updates the properties of a repository.

    \n
  • \n
", "smithy.api#title": "CodeArtifact", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1362,7 +1362,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package versions to be copied. The package component that specifies its namespace depends on its type. For example:

\n \n

The namespace is required when copying package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package versions to be copied. The package component that specifies its namespace depends on its type. For example:

\n \n

The namespace is required when copying package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -2022,7 +2022,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package to delete. The package component that specifies its namespace depends on its type. For example:

\n \n

The namespace is required when deleting packages of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package to delete. The package component that specifies its namespace depends on its type. For example:

\n \n

The namespace is required when deleting packages of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -2124,7 +2124,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package versions to be deleted. The package component that specifies its namespace depends on its type. For example:

\n \n

The namespace is required when deleting package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package versions to be deleted. The package component that specifies its namespace depends on its type. For example:

\n \n

The namespace is required when deleting package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -2563,7 +2563,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the requested package. The package component that specifies its namespace depends on its type. For example:

\n \n

The namespace is required when requesting packages of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the requested package. The package component that specifies its namespace depends on its type. For example:

\n \n

The namespace is required when requesting packages of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -2669,7 +2669,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the requested package version. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when requesting package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the requested package version. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when requesting package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -2965,7 +2965,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package versions to be disposed. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when disposing package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package versions to be disposed. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when disposing package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -3266,7 +3266,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package from which to get the associated package group. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when getting associated package groups from packages of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package from which to get the associated package group. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when getting associated package groups from packages of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -3533,7 +3533,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package version with the requested asset file. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when requesting assets from package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package version with the requested asset file. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when requesting assets from package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -3681,7 +3681,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package version with the requested readme file. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when requesting the readme from package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package version with the requested readme file. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when requesting the readme from package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -3718,7 +3718,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package version with the requested readme file. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" + "smithy.api#documentation": "

The namespace of the package version with the requested readme file. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" } }, "package": { @@ -3776,7 +3776,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each \n package format:\n

\n
    \n
  • \n

    \n generic\n

    \n
  • \n
  • \n

    \n maven\n

    \n
  • \n
  • \n

    \n npm\n

    \n
  • \n
  • \n

    \n nuget\n

    \n
  • \n
  • \n

    \n pypi\n

    \n
  • \n
  • \n

    \n ruby\n

    \n
  • \n
  • \n

    \n swift\n

    \n
  • \n
", + "smithy.api#documentation": "

\n Returns the endpoint of a repository for a specific package format. A repository has one endpoint for each \n package format:\n

\n
    \n
  • \n

    \n cargo\n

    \n
  • \n
  • \n

    \n generic\n

    \n
  • \n
  • \n

    \n maven\n

    \n
  • \n
  • \n

    \n npm\n

    \n
  • \n
  • \n

    \n nuget\n

    \n
  • \n
  • \n

    \n pypi\n

    \n
  • \n
  • \n

    \n ruby\n

    \n
  • \n
  • \n

    \n swift\n

    \n
  • \n
", "smithy.api#http": { "method": "GET", "uri": "/v1/repository/endpoint", @@ -4522,7 +4522,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package version that contains the requested package version assets. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required requesting assets from package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package version that contains the requested package version assets. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required requesting assets from package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -4573,7 +4573,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package version that contains the requested package version assets. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" + "smithy.api#documentation": "

The namespace of the package version that contains the requested package version assets. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" } }, "package": { @@ -4682,7 +4682,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package version with the requested dependencies. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when listing dependencies from package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package version with the requested dependencies. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when listing dependencies from package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm package version is its scope.\n

    \n
  • \n
  • \n

    \n Python and NuGet package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -4726,7 +4726,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package version that contains the returned dependencies. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" + "smithy.api#documentation": "

The namespace of the package version that contains the returned dependencies. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when listing dependencies from package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm package version is its scope.\n

    \n
  • \n
  • \n

    \n Python and NuGet package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" } }, "package": { @@ -4850,7 +4850,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package that contains the requested package versions. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when deleting package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package that contains the requested package versions. The package component that specifies its \n namespace depends on its type. For example:

\n \n

The namespace is required when deleting package versions of the following formats:

\n
    \n
  • \n

    Maven

    \n
  • \n
  • \n

    Swift

    \n
  • \n
  • \n

    generic

    \n
  • \n
\n
\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -4920,7 +4920,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package that contains the requested package versions. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" + "smithy.api#documentation": "

The namespace of the package that contains the requested package versions. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" } }, "package": { @@ -5031,7 +5031,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace prefix used to filter requested packages. \n Only packages with a namespace that starts with the provided string value are returned. \n Note that although this option is called --namespace and not --namespace-prefix, it has prefix-matching behavior.

\n

Each package format uses namespace as follows:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace prefix used to filter requested packages. \n Only packages with a namespace that starts with the provided string value are returned. \n Note that although this option is called --namespace and not --namespace-prefix, it has prefix-matching behavior.

\n

Each package format uses namespace as follows:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -5500,7 +5500,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package that this package depends on. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" + "smithy.api#documentation": "

The namespace of the package that this package depends on. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" } }, "package": { @@ -5544,7 +5544,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" + "smithy.api#documentation": "

The namespace of the package. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" } }, "name": { @@ -5608,6 +5608,12 @@ "traits": { "smithy.api#enumValue": "swift" } + }, + "CARGO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "cargo" + } } } }, @@ -6044,7 +6050,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" + "smithy.api#documentation": "

The namespace of the package. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" } }, "package": { @@ -6092,7 +6098,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package version. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" + "smithy.api#documentation": "

The namespace of the package version. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
" } }, "packageName": { @@ -6774,7 +6780,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package to be updated. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package to be updated. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, @@ -7739,7 +7745,7 @@ "namespace": { "target": "com.amazonaws.codeartifact#PackageNamespace", "traits": { - "smithy.api#documentation": "

The namespace of the package version to be updated. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, and Ruby package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", + "smithy.api#documentation": "

The namespace of the package version to be updated. The package component that specifies its \n namespace depends on its type. For example:

\n
    \n
  • \n

    \n The namespace of a Maven package version is its groupId.\n

    \n
  • \n
  • \n

    \n The namespace of an npm or Swift package version is its scope.\n

    \n
  • \n
  • \n

    The namespace of a generic package is its namespace.

    \n
  • \n
  • \n

    \n Python, NuGet, Ruby, and Cargo package versions do not contain a corresponding component, package versions \n of those formats do not have a namespace.\n

    \n
  • \n
", "smithy.api#httpQuery": "namespace" } }, diff --git a/models/codebuild.json b/models/codebuild.json index bbafcd7e7e..98f50908cd 100644 --- a/models/codebuild.json +++ b/models/codebuild.json @@ -686,7 +686,7 @@ "timeoutInMinutes": { "target": "com.amazonaws.codebuild#WrapperInt", "traits": { - "smithy.api#documentation": "

How long, in minutes, for CodeBuild to wait before timing out this build if it does not\n get marked as completed.

" + "smithy.api#documentation": "

How long, in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before timing out this build if it does not\n get marked as completed.

" } }, "queuedTimeoutInMinutes": { @@ -1394,6 +1394,15 @@ "smithy.api#documentation": "

Contains summary information about a batch build group.

" } }, + "com.amazonaws.codebuild#BuildTimeOut": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 5, + "max": 2160 + } + } + }, "com.amazonaws.codebuild#Builds": { "type": "list", "member": { @@ -2881,7 +2890,16 @@ "overflowBehavior": { "target": "com.amazonaws.codebuild#FleetOverflowBehavior", "traits": { - "smithy.api#documentation": "

The compute fleet overflow behavior.

\n
    \n
  • \n

    For overflow behavior QUEUE, your overflow builds need to wait on \n the existing fleet instance to become available.

    \n
  • \n
  • \n

    For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand.

    \n
  • \n
" + "smithy.api#documentation": "

The compute fleet overflow behavior.

\n " + } + }, + "vpcConfig": { + "target": "com.amazonaws.codebuild#VpcConfig" + }, + "fleetServiceRole": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The service role associated with the compute fleet. For more information, see \n Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide.

" } }, "tags": { @@ -2964,7 +2982,7 @@ "sourceVersion": { "target": "com.amazonaws.codebuild#String", "traits": { - "smithy.api#documentation": "

A version of the build input to be built for this project. If not specified, the latest\n version is used. If specified, it must be one of:

\n
    \n
  • \n

    For CodeCommit: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For GitHub: the commit ID, pull request ID, branch name, or tag name that\n corresponds to the version of the source code you want to build. If a pull\n request ID is specified, it must use the format pr/pull-request-ID\n (for example pr/25). If a branch name is specified, the branch's\n HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is\n used.

    \n
  • \n
  • \n

    For Bitbucket: the commit ID, branch name, or tag name that corresponds to the\n version of the source code you want to build. If a branch name is specified, the\n branch's HEAD commit ID is used. If not specified, the default branch's HEAD\n commit ID is used.

    \n
  • \n
  • \n

    For Amazon S3: the version ID of the object that represents the build input ZIP\n file to use.

    \n
  • \n
\n

If sourceVersion is specified at the build level, then that version takes\n precedence over this sourceVersion (at the project level).

\n

For more information, see Source Version Sample\n with CodeBuild in the CodeBuild User Guide. \n

" + "smithy.api#documentation": "

A version of the build input to be built for this project. If not specified, the latest\n version is used. If specified, it must be one of:

\n
    \n
  • \n

    For CodeCommit: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For GitHub: the commit ID, pull request ID, branch name, or tag name that\n corresponds to the version of the source code you want to build. If a pull\n request ID is specified, it must use the format pr/pull-request-ID\n (for example pr/25). If a branch name is specified, the branch's\n HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is\n used.

    \n
  • \n
  • \n

    For GitLab: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For Bitbucket: the commit ID, branch name, or tag name that corresponds to the\n version of the source code you want to build. If a branch name is specified, the\n branch's HEAD commit ID is used. If not specified, the default branch's HEAD\n commit ID is used.

    \n
  • \n
  • \n

    For Amazon S3: the version ID of the object that represents the build input ZIP\n file to use.

    \n
  • \n
\n

If sourceVersion is specified at the build level, then that version takes\n precedence over this sourceVersion (at the project level).

\n

For more information, see Source Version Sample\n with CodeBuild in the CodeBuild User Guide. \n

" } }, "secondarySourceVersions": { @@ -3007,9 +3025,9 @@ } }, "timeoutInMinutes": { - "target": "com.amazonaws.codebuild#TimeOut", + "target": "com.amazonaws.codebuild#BuildTimeOut", "traits": { - "smithy.api#documentation": "

How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before it times out\n any build that has not been marked as completed. The default is 60 minutes.

" + "smithy.api#documentation": "

How long, in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before it times out\n any build that has not been marked as completed. The default is 60 minutes.

" } }, "queuedTimeoutInMinutes": { @@ -3033,7 +3051,7 @@ "vpcConfig": { "target": "com.amazonaws.codebuild#VpcConfig", "traits": { - "smithy.api#documentation": "

VpcConfig enables CodeBuild to access resources in an Amazon VPC.

" + "smithy.api#documentation": "

VpcConfig enables CodeBuild to access resources in an Amazon VPC.

\n \n

If you're using compute fleets during project creation, do not provide vpcConfig.

\n
" } }, "badgeEnabled": { @@ -3210,6 +3228,18 @@ "traits": { "smithy.api#documentation": "

Specifies the type of build this webhook will trigger.

" } + }, + "manualCreation": { + "target": "com.amazonaws.codebuild#WrapperBoolean", + "traits": { + "smithy.api#documentation": "

If manualCreation is true, CodeBuild doesn't create a webhook in GitHub and instead returns payloadUrl and \n secret values for the webhook. The payloadUrl and secret values in the output can be \n used to manually create a webhook within GitHub.

\n \n

\n manualCreation is only available for GitHub webhooks.

\n
" + } + }, + "scopeConfiguration": { + "target": "com.amazonaws.codebuild#ScopeConfiguration", + "traits": { + "smithy.api#documentation": "

The scope configuration for global or organization webhooks.

\n \n

Global or organization webhooks are only available for GitHub and Github Enterprise webhooks.

\n
" + } } }, "traits": { @@ -4091,7 +4121,16 @@ "overflowBehavior": { "target": "com.amazonaws.codebuild#FleetOverflowBehavior", "traits": { - "smithy.api#documentation": "

The compute fleet overflow behavior.

\n
    \n
  • \n

    For overflow behavior QUEUE, your overflow builds need to wait on \n the existing fleet instance to become available.

    \n
  • \n
  • \n

    For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand.

    \n
  • \n
" + "smithy.api#documentation": "

The compute fleet overflow behavior.

\n " + } + }, + "vpcConfig": { + "target": "com.amazonaws.codebuild#VpcConfig" + }, + "fleetServiceRole": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The service role associated with the compute fleet. For more information, see \n Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide.

" } }, "tags": { @@ -4139,6 +4178,12 @@ "traits": { "smithy.api#enumValue": "UPDATE_FAILED" } + }, + "ACTION_REQUIRED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTION_REQUIRED" + } } } }, @@ -4521,7 +4566,7 @@ "token": { "target": "com.amazonaws.codebuild#SensitiveNonEmptyString", "traits": { - "smithy.api#documentation": "

For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket,\n this is either the access token or the app password.

", + "smithy.api#documentation": "

For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket,\n this is either the access token or the app password. For the authType CODECONNECTIONS, \n this is the connectionArn.

", "smithy.api#required": {} } }, @@ -4535,7 +4580,7 @@ "authType": { "target": "com.amazonaws.codebuild#AuthType", "traits": { - "smithy.api#documentation": "

The type of authentication used to connect to a GitHub, GitHub Enterprise, or\n Bitbucket repository. An OAUTH connection is not supported by the API and must be\n created using the CodeBuild console.

", + "smithy.api#documentation": "

The type of authentication used to connect to a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or\n Bitbucket repository. An OAUTH connection is not supported by the API and must be\n created using the CodeBuild console. Note that CODECONNECTIONS is only valid for \n GitLab and GitLab Self Managed.

", "smithy.api#required": {} } }, @@ -5834,7 +5879,7 @@ "sourceVersion": { "target": "com.amazonaws.codebuild#String", "traits": { - "smithy.api#documentation": "

A version of the build input to be built for this project. If not specified, the\n latest version is used. If specified, it must be one of:

\n
    \n
  • \n

    For CodeCommit: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For GitHub: the commit ID, pull request ID, branch name, or tag name that\n corresponds to the version of the source code you want to build. If a pull\n request ID is specified, it must use the format pr/pull-request-ID\n (for example pr/25). If a branch name is specified, the branch's\n HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is\n used.

    \n
  • \n
  • \n

    For Bitbucket: the commit ID, branch name, or tag name that corresponds to the\n version of the source code you want to build. If a branch name is specified, the\n branch's HEAD commit ID is used. If not specified, the default branch's HEAD\n commit ID is used.

    \n
  • \n
  • \n

    For Amazon S3: the version ID of the object that represents the build input ZIP\n file to use.

    \n
  • \n
\n

If sourceVersion is specified at the build level, then that version\n takes precedence over this sourceVersion (at the project level).

\n

For more information, see Source Version Sample\n with CodeBuild in the CodeBuild User Guide. \n

" + "smithy.api#documentation": "

A version of the build input to be built for this project. If not specified, the\n latest version is used. If specified, it must be one of:

\n
    \n
  • \n

    For CodeCommit: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For GitHub: the commit ID, pull request ID, branch name, or tag name that\n corresponds to the version of the source code you want to build. If a pull\n request ID is specified, it must use the format pr/pull-request-ID\n (for example pr/25). If a branch name is specified, the branch's\n HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is\n used.

    \n
  • \n
  • \n

    For GitLab: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For Bitbucket: the commit ID, branch name, or tag name that corresponds to the\n version of the source code you want to build. If a branch name is specified, the\n branch's HEAD commit ID is used. If not specified, the default branch's HEAD\n commit ID is used.

    \n
  • \n
  • \n

    For Amazon S3: the version ID of the object that represents the build input ZIP\n file to use.

    \n
  • \n
\n

If sourceVersion is specified at the build level, then that version\n takes precedence over this sourceVersion (at the project level).

\n

For more information, see Source Version Sample\n with CodeBuild in the CodeBuild User Guide. \n

" } }, "secondarySourceVersions": { @@ -5874,9 +5919,9 @@ } }, "timeoutInMinutes": { - "target": "com.amazonaws.codebuild#TimeOut", + "target": "com.amazonaws.codebuild#BuildTimeOut", "traits": { - "smithy.api#documentation": "

How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before timing out any\n related build that did not get marked as completed. The default is 60 minutes.

" + "smithy.api#documentation": "

How long, in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before timing out any\n related build that did not get marked as completed. The default is 60 minutes.

" } }, "queuedTimeoutInMinutes": { @@ -6290,9 +6335,9 @@ "traits": { "smithy.api#length": { "min": 2, - "max": 255 + "max": 150 }, - "smithy.api#pattern": "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,254}$" + "smithy.api#pattern": "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,149}$" } }, "com.amazonaws.codebuild#ProjectNames": { @@ -6424,7 +6469,7 @@ "sourceVersion": { "target": "com.amazonaws.codebuild#String", "traits": { - "smithy.api#documentation": "

The source version for the corresponding source identifier. If specified, must be one\n of:

\n
    \n
  • \n

    For CodeCommit: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For GitHub or GitLab: the commit ID, pull request ID, branch name, or tag name that\n corresponds to the version of the source code you want to build. If a pull\n request ID is specified, it must use the format pr/pull-request-ID\n (for example, pr/25). If a branch name is specified, the branch's\n HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is\n used.

    \n
  • \n
  • \n

    For Bitbucket: the commit ID, branch name, or tag name that corresponds to the\n version of the source code you want to build. If a branch name is specified, the\n branch's HEAD commit ID is used. If not specified, the default branch's HEAD\n commit ID is used.

    \n
  • \n
  • \n

    For Amazon S3: the version ID of the object that represents the build input ZIP\n file to use.

    \n
  • \n
\n

For more information, see Source Version Sample\n with CodeBuild in the CodeBuild User Guide.

", + "smithy.api#documentation": "

The source version for the corresponding source identifier. If specified, must be one\n of:

\n
    \n
  • \n

    For CodeCommit: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For GitHub: the commit ID, pull request ID, branch name, or tag name that\n corresponds to the version of the source code you want to build. If a pull\n request ID is specified, it must use the format pr/pull-request-ID\n (for example, pr/25). If a branch name is specified, the branch's\n HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is\n used.

    \n
  • \n
  • \n

    For GitLab: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For Bitbucket: the commit ID, branch name, or tag name that corresponds to the\n version of the source code you want to build. If a branch name is specified, the\n branch's HEAD commit ID is used. If not specified, the default branch's HEAD\n commit ID is used.

    \n
  • \n
  • \n

    For Amazon S3: the version ID of the object that represents the build input ZIP\n file to use.

    \n
  • \n
\n

For more information, see Source Version Sample\n with CodeBuild in the CodeBuild User Guide.

", "smithy.api#required": {} } } @@ -7355,6 +7400,34 @@ "smithy.api#documentation": "

The scaling configuration output of a compute fleet.

" } }, + "com.amazonaws.codebuild#ScopeConfiguration": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.codebuild#String", + "traits": { + "smithy.api#documentation": "

The name of either the enterprise or organization that will send webhook events to CodeBuild, depending on if the webhook is a global or organization webhook respectively.

", + "smithy.api#required": {} + } + }, + "domain": { + "target": "com.amazonaws.codebuild#String", + "traits": { + "smithy.api#documentation": "

The domain of the GitHub Enterprise organization. Note that this parameter is only required if your project's source type is GITHUB_ENTERPRISE

" + } + }, + "scope": { + "target": "com.amazonaws.codebuild#WebhookScopeType", + "traits": { + "smithy.api#documentation": "

The type of scope for a GitHub webhook.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configuration information about the scope for a webhook.

" + } + }, "com.amazonaws.codebuild#SecurityGroupIds": { "type": "list", "member": { @@ -7766,7 +7839,7 @@ } }, "buildTimeoutInMinutesOverride": { - "target": "com.amazonaws.codebuild#TimeOut", + "target": "com.amazonaws.codebuild#BuildTimeOut", "traits": { "smithy.api#documentation": "

Overrides the build timeout specified in the batch build project.

" } @@ -7863,7 +7936,7 @@ "sourceVersion": { "target": "com.amazonaws.codebuild#String", "traits": { - "smithy.api#documentation": "

The version of the build input to be built, for this build only. If not specified,\n the latest version is used. If specified, the contents depends on the source\n provider:

\n
\n
CodeCommit
\n
\n

The commit ID, branch, or Git tag to use.

\n
\n
GitHub
\n
\n

The commit ID, pull request ID, branch name, or tag name that corresponds\n to the version of the source code you want to build. If a pull request ID is\n specified, it must use the format pr/pull-request-ID (for\n example pr/25). If a branch name is specified, the branch's\n HEAD commit ID is used. If not specified, the default branch's HEAD commit\n ID is used.

\n
\n
Bitbucket
\n
\n

The commit ID, branch name, or tag name that corresponds to the version of\n the source code you want to build. If a branch name is specified, the\n branch's HEAD commit ID is used. If not specified, the default branch's HEAD\n commit ID is used.

\n
\n
Amazon S3
\n
\n

The version ID of the object that represents the build input ZIP file to\n use.

\n
\n
\n

If sourceVersion is specified at the project level, then this\n sourceVersion (at the build level) takes precedence.

\n

For more information, see Source Version Sample\n with CodeBuild in the CodeBuild User Guide.

" + "smithy.api#documentation": "

The version of the build input to be built, for this build only. If not specified,\n the latest version is used. If specified, the contents depends on the source\n provider:

\n
\n
CodeCommit
\n
\n

The commit ID, branch, or Git tag to use.

\n
\n
GitHub
\n
\n

The commit ID, pull request ID, branch name, or tag name that corresponds\n to the version of the source code you want to build. If a pull request ID is\n specified, it must use the format pr/pull-request-ID (for\n example pr/25). If a branch name is specified, the branch's\n HEAD commit ID is used. If not specified, the default branch's HEAD commit\n ID is used.

\n
\n
GitLab
\n
\n

The commit ID, branch, or Git tag to use.

\n
\n
Bitbucket
\n
\n

The commit ID, branch name, or tag name that corresponds to the version of\n the source code you want to build. If a branch name is specified, the\n branch's HEAD commit ID is used. If not specified, the default branch's HEAD\n commit ID is used.

\n
\n
Amazon S3
\n
\n

The version ID of the object that represents the build input ZIP file to\n use.

\n
\n
\n

If sourceVersion is specified at the project level, then this\n sourceVersion (at the build level) takes precedence.

\n

For more information, see Source Version Sample\n with CodeBuild in the CodeBuild User Guide.

" } }, "artifactsOverride": { @@ -7899,7 +7972,7 @@ "sourceAuthOverride": { "target": "com.amazonaws.codebuild#SourceAuth", "traits": { - "smithy.api#documentation": "

An authorization type for this build that overrides the one defined in the build\n project. This override applies only if the build project's source is BitBucket or\n GitHub.

" + "smithy.api#documentation": "

An authorization type for this build that overrides the one defined in the build\n project. This override applies only if the build project's source is BitBucket, GitHub, \n GitLab, or GitLab Self Managed.

" } }, "gitCloneDepthOverride": { @@ -7981,9 +8054,9 @@ } }, "timeoutInMinutesOverride": { - "target": "com.amazonaws.codebuild#TimeOut", + "target": "com.amazonaws.codebuild#BuildTimeOut", "traits": { - "smithy.api#documentation": "

The number of build timeout minutes, from 5 to 480 (8 hours), that overrides, for this\n build only, the latest setting already defined in the build project.

" + "smithy.api#documentation": "

The number of build timeout minutes, from 5 to 2160 (36 hours), that overrides, for this\n build only, the latest setting already defined in the build project.

" } }, "queuedTimeoutInMinutesOverride": { @@ -8445,7 +8518,16 @@ "overflowBehavior": { "target": "com.amazonaws.codebuild#FleetOverflowBehavior", "traits": { - "smithy.api#documentation": "

The compute fleet overflow behavior.

\n
    \n
  • \n

    For overflow behavior QUEUE, your overflow builds need to wait on \n the existing fleet instance to become available.

    \n
  • \n
  • \n

    For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand.

    \n
  • \n
" + "smithy.api#documentation": "

The compute fleet overflow behavior.

\n " + } + }, + "vpcConfig": { + "target": "com.amazonaws.codebuild#VpcConfig" + }, + "fleetServiceRole": { + "target": "com.amazonaws.codebuild#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The service role associated with the compute fleet. For more information, see \n Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide.

" } }, "tags": { @@ -8524,7 +8606,7 @@ "sourceVersion": { "target": "com.amazonaws.codebuild#String", "traits": { - "smithy.api#documentation": "

A version of the build input to be built for this project. If not specified, the\n latest version is used. If specified, it must be one of:

\n
    \n
  • \n

    For CodeCommit: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For GitHub: the commit ID, pull request ID, branch name, or tag name that\n corresponds to the version of the source code you want to build. If a pull\n request ID is specified, it must use the format pr/pull-request-ID\n (for example pr/25). If a branch name is specified, the branch's\n HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is\n used.

    \n
  • \n
  • \n

    For Bitbucket: the commit ID, branch name, or tag name that corresponds to the\n version of the source code you want to build. If a branch name is specified, the\n branch's HEAD commit ID is used. If not specified, the default branch's HEAD\n commit ID is used.

    \n
  • \n
  • \n

    For Amazon S3: the version ID of the object that represents the build input ZIP\n file to use.

    \n
  • \n
\n

If sourceVersion is specified at the build level, then that version\n takes precedence over this sourceVersion (at the project level).

\n

For more information, see Source Version Sample\n with CodeBuild in the CodeBuild User Guide. \n

" + "smithy.api#documentation": "

A version of the build input to be built for this project. If not specified, the\n latest version is used. If specified, it must be one of:

\n
    \n
  • \n

    For CodeCommit: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For GitHub: the commit ID, pull request ID, branch name, or tag name that\n corresponds to the version of the source code you want to build. If a pull\n request ID is specified, it must use the format pr/pull-request-ID\n (for example pr/25). If a branch name is specified, the branch's\n HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is\n used.

    \n
  • \n
  • \n

    For GitLab: the commit ID, branch, or Git tag to use.

    \n
  • \n
  • \n

    For Bitbucket: the commit ID, branch name, or tag name that corresponds to the\n version of the source code you want to build. If a branch name is specified, the\n branch's HEAD commit ID is used. If not specified, the default branch's HEAD\n commit ID is used.

    \n
  • \n
  • \n

    For Amazon S3: the version ID of the object that represents the build input ZIP\n file to use.

    \n
  • \n
\n

If sourceVersion is specified at the build level, then that version\n takes precedence over this sourceVersion (at the project level).

\n

For more information, see Source Version Sample\n with CodeBuild in the CodeBuild User Guide. \n

" } }, "secondarySourceVersions": { @@ -8564,9 +8646,9 @@ } }, "timeoutInMinutes": { - "target": "com.amazonaws.codebuild#TimeOut", + "target": "com.amazonaws.codebuild#BuildTimeOut", "traits": { - "smithy.api#documentation": "

The replacement value in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before\n timing out any related build that did not get marked as completed.

" + "smithy.api#documentation": "

The replacement value in minutes, from 5 to 2160 (36 hours), for CodeBuild to wait before\n timing out any related build that did not get marked as completed.

" } }, "queuedTimeoutInMinutes": { @@ -8922,11 +9004,23 @@ "smithy.api#documentation": "

Specifies the type of build this webhook will trigger.

" } }, + "manualCreation": { + "target": "com.amazonaws.codebuild#WrapperBoolean", + "traits": { + "smithy.api#documentation": "

If manualCreation is true, CodeBuild doesn't create a webhook in GitHub and instead returns payloadUrl and \n secret values for the webhook. The payloadUrl and secret values in the output can \n be used to manually create a webhook within GitHub.

\n \n

manualCreation is only available for GitHub webhooks.

\n
" + } + }, "lastModifiedSecret": { "target": "com.amazonaws.codebuild#Timestamp", "traits": { "smithy.api#documentation": "

A timestamp that indicates the last time a repository's secret token was modified.\n

" } + }, + "scopeConfiguration": { + "target": "com.amazonaws.codebuild#ScopeConfiguration", + "traits": { + "smithy.api#documentation": "

The scope configuration for global or organization webhooks.

\n \n

Global or organization webhooks are only available for GitHub and Github Enterprise webhooks.

\n
" + } } }, "traits": { @@ -8956,7 +9050,7 @@ "type": { "target": "com.amazonaws.codebuild#WebhookFilterType", "traits": { - "smithy.api#documentation": "

The type of webhook filter. There are nine webhook filter types: EVENT,\n ACTOR_ACCOUNT_ID, HEAD_REF, BASE_REF,\n FILE_PATH, COMMIT_MESSAGE, TAG_NAME, RELEASE_NAME, \n and WORKFLOW_NAME.

\n
    \n
  • \n

    \n EVENT\n

    \n
      \n
    • \n

      A webhook event triggers a build when the provided pattern\n matches one of nine event types: PUSH,\n PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, \n PULL_REQUEST_CLOSED, PULL_REQUEST_REOPENED, \n PULL_REQUEST_MERGED, RELEASED, PRERELEASED, \n and WORKFLOW_JOB_QUEUED. The EVENT patterns are\n specified as a comma-separated string. For example, PUSH,\n PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull\n request created, and pull request updated events.

      \n \n

      The PULL_REQUEST_REOPENED works with GitHub and GitHub\n Enterprise only. The RELEASED, PRERELEASED, \n and WORKFLOW_JOB_QUEUED work with GitHub only.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    ACTOR_ACCOUNT_ID

    \n
      \n
    • \n

      A webhook event triggers a build when a GitHub, GitHub Enterprise, or\n Bitbucket account ID matches the regular expression pattern.\n

      \n
    • \n
    \n
  • \n
  • \n

    HEAD_REF

    \n
      \n
    • \n

      A webhook event triggers a build when the head reference matches the\n regular expression pattern. For example,\n refs/heads/branch-name and refs/tags/tag-name.

      \n \n

      Works with GitHub and GitHub Enterprise push, GitHub and GitHub\n Enterprise pull request, Bitbucket push, and Bitbucket pull request events.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    BASE_REF

    \n
      \n
    • \n

      A webhook event triggers a build when the base reference matches the\n regular expression pattern. For example,\n refs/heads/branch-name.

      \n \n

      Works with pull request events only.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    FILE_PATH

    \n
      \n
    • \n

      A webhook triggers a build when the path of a changed file matches the\n regular expression pattern.

      \n \n

      Works with GitHub and Bitbucket events push and pull requests events.\n Also works with GitHub Enterprise push events, but does not work with\n GitHub Enterprise pull request events.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    COMMIT_MESSAGE

    \n
      \n
    • \n

      A webhook triggers a build when the head commit message matches the\n regular expression pattern.

      \n \n

      Works with GitHub and Bitbucket events push and pull requests events.\n Also works with GitHub Enterprise push events, but does not work with\n GitHub Enterprise pull request events.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    TAG_NAME

    \n
      \n
    • \n

      A webhook triggers a build when the tag name of the release matches the \n regular expression pattern.

      \n \n

      Works with RELEASED and PRERELEASED events only.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    RELEASE_NAME

    \n
      \n
    • \n

      A webhook triggers a build when the release name matches the \n regular expression pattern.

      \n \n

      Works with RELEASED and PRERELEASED events only.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    WORKFLOW_NAME

    \n
      \n
    • \n

      A webhook triggers a build when the workflow name matches the \n regular expression pattern.

      \n \n

      Works with WORKFLOW_JOB_QUEUED events only.

      \n
      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

The type of webhook filter. There are nine webhook filter types: EVENT,\n ACTOR_ACCOUNT_ID, HEAD_REF, BASE_REF,\n FILE_PATH, COMMIT_MESSAGE, TAG_NAME, RELEASE_NAME, \n and WORKFLOW_NAME.

\n
    \n
  • \n

    \n EVENT\n

    \n
      \n
    • \n

      A webhook event triggers a build when the provided pattern\n matches one of nine event types: PUSH,\n PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, \n PULL_REQUEST_CLOSED, PULL_REQUEST_REOPENED, \n PULL_REQUEST_MERGED, RELEASED, PRERELEASED, \n and WORKFLOW_JOB_QUEUED. The EVENT patterns are\n specified as a comma-separated string. For example, PUSH,\n PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull\n request created, and pull request updated events.

      \n \n

      Types PULL_REQUEST_REOPENED and WORKFLOW_JOB_QUEUED \n work with GitHub and GitHub Enterprise only. Types RELEASED and \n PRERELEASED work with GitHub only.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    ACTOR_ACCOUNT_ID

    \n
      \n
    • \n

      A webhook event triggers a build when a GitHub, GitHub Enterprise, or\n Bitbucket account ID matches the regular expression pattern.\n

      \n
    • \n
    \n
  • \n
  • \n

    HEAD_REF

    \n
      \n
    • \n

      A webhook event triggers a build when the head reference matches the\n regular expression pattern. For example,\n refs/heads/branch-name and refs/tags/tag-name.

      \n \n

      Works with GitHub and GitHub Enterprise push, GitHub and GitHub\n Enterprise pull request, Bitbucket push, and Bitbucket pull request events.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    BASE_REF

    \n
      \n
    • \n

      A webhook event triggers a build when the base reference matches the\n regular expression pattern. For example,\n refs/heads/branch-name.

      \n \n

      Works with pull request events only.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    FILE_PATH

    \n
      \n
    • \n

      A webhook triggers a build when the path of a changed file matches the\n regular expression pattern.

      \n \n

      Works with GitHub and Bitbucket events push and pull requests events.\n Also works with GitHub Enterprise push events, but does not work with\n GitHub Enterprise pull request events.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    COMMIT_MESSAGE

    \n
      \n
    • \n

      A webhook triggers a build when the head commit message matches the\n regular expression pattern.

      \n \n

      Works with GitHub and Bitbucket events push and pull requests events.\n Also works with GitHub Enterprise push events, but does not work with\n GitHub Enterprise pull request events.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    TAG_NAME

    \n
      \n
    • \n

      A webhook triggers a build when the tag name of the release matches the \n regular expression pattern.

      \n \n

      Works with RELEASED and PRERELEASED events only.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    RELEASE_NAME

    \n
      \n
    • \n

      A webhook triggers a build when the release name matches the \n regular expression pattern.

      \n \n

      Works with RELEASED and PRERELEASED events only.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    REPOSITORY_NAME

    \n
      \n
    • \n

      A webhook triggers a build when the repository name matches the \n regular expression pattern.

      \n \n

      Works with GitHub global or organization webhooks only.

      \n
      \n
    • \n
    \n
  • \n
  • \n

    WORKFLOW_NAME

    \n
      \n
    • \n

      A webhook triggers a build when the workflow name matches the \n regular expression pattern.

      \n \n

      Works with WORKFLOW_JOB_QUEUED events only.

      \n
      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } }, @@ -9037,6 +9131,23 @@ } } }, + "com.amazonaws.codebuild#WebhookScopeType": { + "type": "enum", + "members": { + "GITHUB_ORGANIZATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GITHUB_ORGANIZATION" + } + }, + "GITHUB_GLOBAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GITHUB_GLOBAL" + } + } + } + }, "com.amazonaws.codebuild#WrapperBoolean": { "type": "boolean" }, diff --git a/models/codeguru-security.json b/models/codeguru-security.json index 08cc347b4b..e9ed31793c 100644 --- a/models/codeguru-security.json +++ b/models/codeguru-security.json @@ -43,36 +43,36 @@ "date": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The date from which the finding metrics were retrieved.

" + "smithy.api#documentation": "

The date from which the findings metrics were retrieved.

" } }, "newFindings": { "target": "com.amazonaws.codegurusecurity#FindingMetricsValuePerSeverity", "traits": { - "smithy.api#documentation": "

The number of new findings of each severity in account on the specified date.

" + "smithy.api#documentation": "

The number of new findings of each severity on the specified date.

" } }, "closedFindings": { "target": "com.amazonaws.codegurusecurity#FindingMetricsValuePerSeverity", "traits": { - "smithy.api#documentation": "

The number of closed findings of each severity in an account on the specified date.

" + "smithy.api#documentation": "

The number of closed findings of each severity on the specified date.

" } }, "openFindings": { "target": "com.amazonaws.codegurusecurity#FindingMetricsValuePerSeverity", "traits": { - "smithy.api#documentation": "

The number of open findings of each severity in an account as of the specified date.

" + "smithy.api#documentation": "

The number of open findings of each severity as of the specified date.

" } }, "meanTimeToClose": { "target": "com.amazonaws.codegurusecurity#FindingMetricsValuePerSeverity", "traits": { - "smithy.api#documentation": "

The average time it takes to close findings of each severity in days.

" + "smithy.api#documentation": "

The average time in days it takes to close findings of each severity as of a specified\n date.

" } } }, "traits": { - "smithy.api#documentation": "

A summary of findings metrics in an account.

" + "smithy.api#documentation": "

A summary of findings metrics for an account on a specified date.

" } }, "com.amazonaws.codegurusecurity#AnalysisType": { @@ -167,7 +167,7 @@ ], "origin": "*" }, - "smithy.api#documentation": "\n

Amazon CodeGuru Security is in preview release and is subject to\n change.

\n
\n

This section provides documentation for the Amazon CodeGuru Security API operations.\n CodeGuru Security is a service that uses program analysis and machine learning to detect\n security policy violations and vulnerabilities, and recommends ways to address these security\n risks.

\n

By proactively detecting and providing recommendations for addressing security risks,\n CodeGuru Security improves the overall security of your application code. For more information\n about CodeGuru Security, see the \n Amazon CodeGuru Security User Guide.

", + "smithy.api#documentation": "\n

Amazon CodeGuru Security is in preview release and is subject to change.

\n
\n

This section provides documentation for the Amazon CodeGuru Security API operations.\n CodeGuru Security is a service that uses program analysis and machine learning to detect\n security policy violations and vulnerabilities, and recommends ways to address these security\n risks.

\n

By proactively detecting and providing recommendations for addressing security risks,\n CodeGuru Security improves the overall security of your application code. For more information\n about CodeGuru Security, see the \n Amazon CodeGuru Security User Guide.

", "smithy.api#title": "Amazon CodeGuru Security", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -211,7 +211,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -230,7 +229,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -258,13 +256,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -277,7 +276,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -291,7 +289,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -314,7 +311,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -349,11 +345,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -364,16 +358,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -387,14 +384,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -403,15 +398,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -422,16 +416,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -445,7 +442,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -465,11 +461,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -480,20 +474,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -504,18 +500,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] }, @@ -858,7 +858,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of all requested findings.

", + "smithy.api#documentation": "

Returns a list of requested findings from standard scans.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -930,7 +930,7 @@ "findings": { "target": "com.amazonaws.codegurusecurity#Findings", "traits": { - "smithy.api#documentation": "

A list of all requested findings.

", + "smithy.api#documentation": "

A list of all findings which were successfully fetched.

", "smithy.api#required": {} } }, @@ -1080,7 +1080,7 @@ } ], "traits": { - "smithy.api#documentation": "

Use to create a scan using code uploaded to an S3 bucket.

", + "smithy.api#documentation": "

Use to create a scan using code uploaded to an Amazon S3 bucket.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1101,14 +1101,14 @@ "resourceId": { "target": "com.amazonaws.codegurusecurity#ResourceId", "traits": { - "smithy.api#documentation": "

The identifier for an input resource used to create a scan.

", + "smithy.api#documentation": "

The identifier for the resource object to be scanned.

", "smithy.api#required": {} } }, "scanName": { "target": "com.amazonaws.codegurusecurity#ScanName", "traits": { - "smithy.api#documentation": "

The unique name that CodeGuru Security uses to track revisions across multiple\n scans of the same resource. Only allowed for a STANDARD scan type. If not\n specified, it will be auto generated.

", + "smithy.api#documentation": "

The unique name that CodeGuru Security uses to track revisions across multiple scans of\n the same resource. Only allowed for a STANDARD scan type.

", "smithy.api#required": {} } }, @@ -1200,7 +1200,7 @@ } ], "traits": { - "smithy.api#documentation": "

Generates a pre-signed URL and request headers used to upload a code resource.

\n

You can upload your code resource to the URL and add the request headers using any HTTP\n client.

", + "smithy.api#documentation": "

Generates a pre-signed URL, request headers used to upload a code resource, and code\n artifact identifier for the uploaded resource.

\n

You can upload your code resource to the URL with the request headers using any HTTP\n client.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1229,7 +1229,7 @@ "s3Url": { "target": "com.amazonaws.codegurusecurity#S3Url", "traits": { - "smithy.api#documentation": "

A pre-signed S3 URL. You can upload the code file you want to scan and add the required\n requestHeaders using any HTTP client.

", + "smithy.api#documentation": "

A pre-signed S3 URL. You can upload the code file you want to scan with the required\n requestHeaders using any HTTP client.

", "smithy.api#required": {} } }, @@ -1243,7 +1243,7 @@ "codeArtifactId": { "target": "com.amazonaws.codegurusecurity#Uuid", "traits": { - "smithy.api#documentation": "

The identifier for the uploaded code resource.

", + "smithy.api#documentation": "

The identifier for the uploaded code resource. Pass this to CreateScan to use\n the uploaded resources.

", "smithy.api#required": {} } } @@ -1264,12 +1264,12 @@ "kmsKeyArn": { "target": "com.amazonaws.codegurusecurity#KmsKeyArn", "traits": { - "smithy.api#documentation": "

The KMS key ARN to use for encryption. This must be provided as a header when uploading\n your code resource.

" + "smithy.api#documentation": "

The KMS key ARN that is used for encryption. If an AWS-managed key is used for encryption,\n returns empty.

" } } }, "traits": { - "smithy.api#documentation": "

Information about account-level configuration.

" + "smithy.api#documentation": "

Information about the encryption configuration for an account. Required to call\n UpdateAccountConfiguration.

" } }, "com.amazonaws.codegurusecurity#ErrorCode": { @@ -1307,6 +1307,14 @@ } } }, + "com.amazonaws.codegurusecurity#ErrorMessage": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, "com.amazonaws.codegurusecurity#FilePath": { "type": "structure", "members": { @@ -1363,7 +1371,7 @@ "generatorId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The identifier for the component that generated a finding such as AWSCodeGuruSecurity or\n AWSInspector.

" + "smithy.api#documentation": "

The identifier for the component that generated a finding such as AmazonCodeGuruSecurity.

" } }, "id": { @@ -1405,7 +1413,7 @@ "severity": { "target": "com.amazonaws.codegurusecurity#Severity", "traits": { - "smithy.api#documentation": "

The severity of the finding.

" + "smithy.api#documentation": "

The severity of the finding. Severity can be critical, high, medium, low, or\n informational. For information on severity levels, see \n Finding severity in the \n Amazon CodeGuru Security User Guide.

" } }, "remediation": { @@ -1489,36 +1497,36 @@ "info": { "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

The finding is related to quality or readability improvements and not considered\n actionable.

" + "smithy.api#documentation": "

A numeric value corresponding to an informational finding.

" } }, "low": { "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

The severity of the finding is low and does require action on its own.

" + "smithy.api#documentation": "

A numeric value corresponding to a low severity finding.

" } }, "medium": { "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

The severity of the finding is medium and should be addressed as a mid-term priority.

" + "smithy.api#documentation": "

A numeric value corresponding to a medium severity finding.

" } }, "high": { "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

The severity of the finding is high and should be addressed as a near-term priority.

" + "smithy.api#documentation": "

A numeric value corresponding to a high severity finding.

" } }, "critical": { "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

The severity of the finding is critical and should be addressed immediately.

" + "smithy.api#documentation": "

A numeric value corresponding to a critical finding.

" } } }, "traits": { - "smithy.api#documentation": "

The severity of the issue in the code that generated a finding.

" + "smithy.api#documentation": "

A numeric value corresponding to the severity of a finding, such as the number of open\n findings or the average time it takes to close findings of a given severity.

" } }, "com.amazonaws.codegurusecurity#Findings": { @@ -1556,7 +1564,7 @@ } ], "traits": { - "smithy.api#documentation": "

Use to get account level configuration.

", + "smithy.api#documentation": "

Use to get the encryption configuration for an account.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -1578,7 +1586,7 @@ "encryptionConfig": { "target": "com.amazonaws.codegurusecurity#EncryptionConfig", "traits": { - "smithy.api#documentation": "

An EncryptionConfig object that contains the KMS key ARN to use for\n encryption. By default, CodeGuru Security uses an AWS-managed key for encryption. To specify\n your own key, call UpdateAccountConfiguration.

", + "smithy.api#documentation": "

An EncryptionConfig object that contains the KMS key ARN that is used for\n encryption. By default, CodeGuru Security uses an AWS-managed key for encryption. To specify\n your own key, call UpdateAccountConfiguration. If you do not specify a\n customer-managed key, returns empty.

", "smithy.api#required": {} } } @@ -1652,11 +1660,11 @@ "maxResults": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response. Use this parameter when\n paginating results. If additional results exist beyond the number you specify, the nextToken\n element is returned in the response. Use nextToken in a subsequent request to retrieve\n additional results.

", + "smithy.api#documentation": "

The maximum number of results to return in the response. Use this parameter when\n paginating results. If additional results exist beyond the number you specify, the nextToken\n element is returned in the response. Use nextToken in a subsequent request to retrieve\n additional results. If not specified, returns 1000 results.

", "smithy.api#httpQuery": "maxResults", "smithy.api#range": { "min": 1, - "max": 100 + "max": 1000 } } }, @@ -1715,7 +1723,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns top level metrics about an account from a specified date, including number of open\n findings, the categories with most findings, the scans with most open findings, and scans with\n most open critical findings.

", + "smithy.api#documentation": "

Returns a summary of metrics for an account from a specified date, including number of open\n findings, the categories with most findings, the scans with most open findings, and scans with\n most open critical findings.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -1730,7 +1738,7 @@ "date": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The date you want to retrieve summary metrics from, rounded to the nearest day. The date\n must be within the past two years since metrics data is only stored for two years. If a date\n outside of this range is passed, the response will be empty.

", + "smithy.api#documentation": "

The date you want to retrieve summary metrics from, rounded to the nearest day. The date\n must be within the past two years.

", "smithy.api#httpQuery": "date", "smithy.api#required": {} } @@ -1774,6 +1782,9 @@ }, { "target": "com.amazonaws.codegurusecurity#ThrottlingException" + }, + { + "target": "com.amazonaws.codegurusecurity#ValidationException" } ], "traits": { @@ -1829,7 +1840,7 @@ "scanState": { "target": "com.amazonaws.codegurusecurity#ScanState", "traits": { - "smithy.api#documentation": "

The current state of the scan. Pass either InProgress,\n Successful, or Failed.

", + "smithy.api#documentation": "

The current state of the scan. Returns either InProgress,\n Successful, or Failed.

", "smithy.api#required": {} } }, @@ -1864,6 +1875,12 @@ "traits": { "smithy.api#documentation": "

The ARN for the scan name.

" } + }, + "errorMessage": { + "target": "com.amazonaws.codegurusecurity#ErrorMessage", + "traits": { + "smithy.api#documentation": "

Details about the error that causes a scan to fail to be retrieved.

" + } } }, "traits": { @@ -1970,7 +1987,7 @@ "maxResults": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response. Use this parameter when\n paginating results. If additional results exist beyond the number you specify, the nextToken\n element is returned in the response. Use nextToken in a subsequent request to retrieve\n additional results.

", + "smithy.api#documentation": "

The maximum number of results to return in the response. Use this parameter when\n paginating results. If additional results exist beyond the number you specify, the nextToken\n element is returned in the response. Use nextToken in a subsequent request to retrieve\n additional results. If not specified, returns 1000 results.

", "smithy.api#httpQuery": "maxResults", "smithy.api#range": { "min": 1, @@ -1981,7 +1998,7 @@ "startDate": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The start date of the interval which you want to retrieve metrics from.

", + "smithy.api#documentation": "

The start date of the interval which you want to retrieve metrics from. Rounds to the\n nearest day.

", "smithy.api#httpQuery": "startDate", "smithy.api#required": {} } @@ -1989,7 +2006,7 @@ "endDate": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The end date of the interval which you want to retrieve metrics from.

", + "smithy.api#documentation": "

The end date of the interval which you want to retrieve metrics from. Round to the nearest\n day.

", "smithy.api#httpQuery": "endDate", "smithy.api#required": {} } @@ -2042,7 +2059,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of all the standard scans in an account. Does not return express\n scans.

", + "smithy.api#documentation": "

Returns a list of all scans in an account. Does not return EXPRESS\n scans.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -2070,7 +2087,7 @@ "maxResults": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in the response. Use this parameter when\n paginating results. If additional results exist beyond the number you specify, the nextToken\n element is returned in the response. Use nextToken in a subsequent request to retrieve\n additional results.

", + "smithy.api#documentation": "

The maximum number of results to return in the response. Use this parameter when\n paginating results. If additional results exist beyond the number you specify, the nextToken\n element is returned in the response. Use nextToken in a subsequent request to retrieve\n additional results. If not specified, returns 100 results.

", "smithy.api#httpQuery": "maxResults", "smithy.api#range": { "min": 1, @@ -2147,7 +2164,7 @@ "resourceArn": { "target": "com.amazonaws.codegurusecurity#ScanNameArn", "traits": { - "smithy.api#documentation": "

The ARN of the ScanName object. You can retrieve this ARN by calling\n ListScans or GetScan.

", + "smithy.api#documentation": "

The ARN of the ScanName object. You can retrieve this ARN by calling\n CreateScan, ListScans, or GetScan.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2183,30 +2200,30 @@ "openFindings": { "target": "com.amazonaws.codegurusecurity#FindingMetricsValuePerSeverity", "traits": { - "smithy.api#documentation": "

The number of open findings of each severity in an account.

" + "smithy.api#documentation": "

The number of open findings of each severity.

" } }, "categoriesWithMostFindings": { "target": "com.amazonaws.codegurusecurity#CategoriesWithMostFindings", "traits": { - "smithy.api#documentation": "

A list of CategoryWithFindingNum objects for the top 5 finding categories\n with the most open findings in an account.

" + "smithy.api#documentation": "

A list of CategoryWithFindingNum objects for the top 5 finding categories\n with the most findings.

" } }, "scansWithMostOpenFindings": { "target": "com.amazonaws.codegurusecurity#ScansWithMostOpenFindings", "traits": { - "smithy.api#documentation": "

A list of ScanNameWithFindingNum objects for the top 3 scans with the most\n number of open critical findings in an account.

" + "smithy.api#documentation": "

A list of ScanNameWithFindingNum objects for the top 3 scans with the most\n number of open findings.

" } }, "scansWithMostOpenCriticalFindings": { "target": "com.amazonaws.codegurusecurity#ScansWithMostOpenCriticalFindings", "traits": { - "smithy.api#documentation": "

A list of ScanNameWithFindingNum objects for the top 3 scans with the most\n number of open findings in an account.

" + "smithy.api#documentation": "

A list of ScanNameWithFindingNum objects for the top 3 scans with the most\n number of open critical findings.

" } } }, "traits": { - "smithy.api#documentation": "

Information about summary metrics in an account.

" + "smithy.api#documentation": "

A summary of metrics for an account as of a specified date.

" } }, "com.amazonaws.codegurusecurity#NextToken": { @@ -2289,18 +2306,18 @@ "id": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The identifier for the resource.

" + "smithy.api#documentation": "

The scanName of the scan that was run on the resource.

" } }, "subResourceId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The identifier for a section of the resource, such as an AWS Lambda layer.

" + "smithy.api#documentation": "

The identifier for a section of the resource.

" } } }, "traits": { - "smithy.api#documentation": "

Information about a resource, such as an Amazon S3 bucket or AWS Lambda function, that\n contains a finding.

" + "smithy.api#documentation": "

Information about a resource that contains a finding.

" } }, "com.amazonaws.codegurusecurity#ResourceId": { @@ -2309,12 +2326,12 @@ "codeArtifactId": { "target": "com.amazonaws.codegurusecurity#Uuid", "traits": { - "smithy.api#documentation": "

The identifier for the code file uploaded to the resource where a finding was detected.

" + "smithy.api#documentation": "

The identifier for the code file uploaded to the resource object. Returned by\n CreateUploadUrl when you upload resources to be scanned.

" } } }, "traits": { - "smithy.api#documentation": "

The identifier for a resource object that contains resources where a finding was detected.

" + "smithy.api#documentation": "

The identifier for a resource object that contains resources to scan. Specifying a\n codeArtifactId is required to create a scan.

" } }, "com.amazonaws.codegurusecurity#ResourceNotFoundException": { @@ -2396,12 +2413,12 @@ "findingNumber": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The number of open findings generated by a scan.

" + "smithy.api#documentation": "

The number of findings generated by a scan.

" } } }, "traits": { - "smithy.api#documentation": "

Information about a scan with open findings.

" + "smithy.api#documentation": "

Information about the number of findings generated by a scan.

" } }, "com.amazonaws.codegurusecurity#ScanState": { @@ -2590,7 +2607,7 @@ "code": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The suggested code to add to your file.

" + "smithy.api#documentation": "

The suggested code fix. If applicable, includes code patch to replace your source code.

" } } }, @@ -2683,7 +2700,7 @@ "resourceArn": { "target": "com.amazonaws.codegurusecurity#ScanNameArn", "traits": { - "smithy.api#documentation": "

The ARN of the ScanName object. You can retrieve this ARN by calling\n ListScans or GetScan.

", + "smithy.api#documentation": "

The ARN of the ScanName object. You can retrieve this ARN by calling\n CreateScan, ListScans, or GetScan.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2799,7 +2816,7 @@ "resourceArn": { "target": "com.amazonaws.codegurusecurity#ScanNameArn", "traits": { - "smithy.api#documentation": "

The ARN of the ScanName object. You can retrieve this ARN by calling\n ListScans or GetScan.

", + "smithy.api#documentation": "

The ARN of the ScanName object. You can retrieve this ARN by calling\n CreateScan, ListScans, or GetScan.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2850,7 +2867,7 @@ } ], "traits": { - "smithy.api#documentation": "

Use to update account-level configuration with an encryption key.

", + "smithy.api#documentation": "

Use to update the encryption configuration for an account.

", "smithy.api#http": { "code": 200, "method": "PUT", @@ -2864,7 +2881,7 @@ "encryptionConfig": { "target": "com.amazonaws.codegurusecurity#EncryptionConfig", "traits": { - "smithy.api#documentation": "

The KMS key ARN you want to use for encryption. Defaults to service-side encryption if missing.

", + "smithy.api#documentation": "

The customer-managed KMS key ARN you want to use for encryption. If not specified,\n CodeGuru Security will use an AWS-managed key for encryption. If you previously specified a\n customer-managed KMS key and want CodeGuru Security to use an AWS-managed key for encryption\n instead, pass nothing.

", "smithy.api#required": {} } } @@ -2879,7 +2896,7 @@ "encryptionConfig": { "target": "com.amazonaws.codegurusecurity#EncryptionConfig", "traits": { - "smithy.api#documentation": "

An EncryptionConfig object that contains the KMS key ARN to use for\n encryption.

", + "smithy.api#documentation": "

An EncryptionConfig object that contains the KMS key ARN that is used for\n encryption. If you did not specify a customer-managed KMS key in the request, returns empty.\n

", "smithy.api#required": {} } } @@ -3024,7 +3041,10 @@ "itemCount": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The number of times the vulnerability appears in your code.

" + "smithy.api#deprecated": { + "message": "This shape is not used." + }, + "smithy.api#documentation": "

The number of times the vulnerability appears in your code.

" } } }, diff --git a/models/codepipeline.json b/models/codepipeline.json index fd5aa3e825..777d741a7d 100644 --- a/models/codepipeline.json +++ b/models/codepipeline.json @@ -3653,12 +3653,12 @@ "result": { "target": "com.amazonaws.codepipeline#Result", "traits": { - "smithy.api#documentation": "

The specified result for when the failure conditions are met, such as rolling back the stage.

" + "smithy.api#documentation": "

The specified result for when the failure conditions are met, such as rolling back the\n stage.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration that specifies the result, such as rollback, to occur upon stage failure.

" + "smithy.api#documentation": "

The configuration that specifies the result, such as rollback, to occur upon stage\n failure.

" } }, "com.amazonaws.codepipeline#FailureDetails": { @@ -3871,7 +3871,22 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the metadata, structure, stages, and actions of a pipeline. Can be used to\n return the entire structure of a pipeline in JSON format, which can then be modified and\n used to update the pipeline structure with UpdatePipeline.

" + "smithy.api#documentation": "

Returns the metadata, structure, stages, and actions of a pipeline. Can be used to\n return the entire structure of a pipeline in JSON format, which can then be modified and\n used to update the pipeline structure with UpdatePipeline.

", + "smithy.test#smokeTests": [ + { + "id": "GetPipelineFailure", + "params": { + "name": "fake-pipeline" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.codepipeline#GetPipelineExecution": { @@ -4953,7 +4968,7 @@ "maxResults": { "target": "com.amazonaws.codepipeline#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to return in a single call. To retrieve the remaining\n results, make another call with the returned nextToken value. Action execution history\n is retained for up to 12 months, based on action execution start times. Default value is\n 100.

\n \n

Detailed execution history is available for executions run on or after February\n 21, 2019.

\n
" + "smithy.api#documentation": "

The maximum number of results to return in a single call. To retrieve the remaining\n results, make another call with the returned nextToken value. Action execution history\n is retained for up to 12 months, based on action execution start times. Default value is\n 100.

" } }, "nextToken": { @@ -5081,7 +5096,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets a summary of the most recent executions for a pipeline.

", + "smithy.api#documentation": "

Gets a summary of the most recent executions for a pipeline.

\n \n

When applying the filter for pipeline executions that have succeeded in the stage,\n the operation returns all executions in the current pipeline version beginning on\n February 1, 2024.

\n
", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -5168,7 +5183,20 @@ "outputToken": "nextToken", "items": "pipelines", "pageSize": "maxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListPipelinesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.codepipeline#ListPipelinesInput": { @@ -5828,7 +5856,7 @@ } }, "traits": { - "smithy.api#documentation": "

The specified pipeline execution is outdated and cannot be used as a target pipeline execution for rollback.

", + "smithy.api#documentation": "

The specified pipeline execution is outdated and cannot be used as a target pipeline\n execution for rollback.

", "smithy.api#error": "client" } }, @@ -7425,7 +7453,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list that allows you to specify, or override, the source revision for a pipeline\n execution that's being started. A source revision is the version with all the changes to\n your application code, or source artifact, for the pipeline execution.

" + "smithy.api#documentation": "

A list that allows you to specify, or override, the source revision for a pipeline\n execution that's being started. A source revision is the version with all the changes to\n your application code, or source artifact, for the pipeline execution.

\n \n

For the S3_OBJECT_VERSION_ID and S3_OBJECT_KEY types of source revisions, either\n of the types can be used independently, or they can be used together to override the\n source with a specific ObjectKey and VersionID.

\n
" } }, "com.amazonaws.codepipeline#SourceRevisionOverrideList": { @@ -7460,6 +7488,12 @@ "traits": { "smithy.api#enumValue": "S3_OBJECT_VERSION_ID" } + }, + "S3_OBJECT_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "S3_OBJECT_KEY" + } } } }, @@ -7515,7 +7549,7 @@ "onFailure": { "target": "com.amazonaws.codepipeline#FailureConditions", "traits": { - "smithy.api#documentation": "

The method to use when a stage has not completed successfully. For example, configuring this field for rollback will roll back a failed stage automatically to the last successful pipeline execution in the stage.

" + "smithy.api#documentation": "

The method to use when a stage has not completed successfully. For example,\n configuring this field for rollback will roll back a failed stage automatically to the\n last successful pipeline execution in the stage.

" } } }, @@ -7543,7 +7577,7 @@ "type": { "target": "com.amazonaws.codepipeline#ExecutionType", "traits": { - "smithy.api#documentation": "

The type of pipeline execution for the stage, such as a rollback pipeline execution.

" + "smithy.api#documentation": "

The type of pipeline execution for the stage, such as a rollback pipeline\n execution.

" } } }, @@ -7922,12 +7956,12 @@ "stageName": { "target": "com.amazonaws.codepipeline#StageName", "traits": { - "smithy.api#documentation": "

The name of the stage for filtering for pipeline executions where the stage was successful in the current pipeline\n version.

" + "smithy.api#documentation": "

The name of the stage for filtering for pipeline executions where the stage was\n successful in the current pipeline version.

" } } }, "traits": { - "smithy.api#documentation": "

Filter for pipeline executions that have successfully completed the stage in the current pipeline version.

" + "smithy.api#documentation": "

Filter for pipeline executions that have successfully completed the stage in the\n current pipeline version.

" } }, "com.amazonaws.codepipeline#Tag": { diff --git a/models/cognito-identity-provider.json b/models/cognito-identity-provider.json index 7889954dc8..a10adaa7e3 100644 --- a/models/cognito-identity-provider.json +++ b/models/cognito-identity-provider.json @@ -358,7 +358,7 @@ "sdkId": "Cognito Identity Provider", "arnNamespace": "cognito-idp", "cloudFormationName": "Cognito", - "cloudTrailEventSource": "cognitoidentityprovider.amazonaws.com", + "cloudTrailEventSource": "cognito-idp.amazonaws.com", "docId": "cognito-idp-2016-04-18", "endpointPrefix": "cognito-idp" }, @@ -10636,7 +10636,22 @@ "outputToken": "NextToken", "items": "UserPools", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListUserPoolsSuccess", + "params": { + "MaxResults": 10 + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.cognitoidentityprovider#ListUserPoolsRequest": { @@ -15700,6 +15715,12 @@ "traits": { "smithy.api#enumValue": "FORCE_CHANGE_PASSWORD" } + }, + "EXTERNAL_PROVIDER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXTERNAL_PROVIDER" + } } } }, diff --git a/models/compute-optimizer.json b/models/compute-optimizer.json index 6a3ad2cf7e..6e23f90145 100644 --- a/models/compute-optimizer.json +++ b/models/compute-optimizer.json @@ -89,6 +89,12 @@ "target": "com.amazonaws.computeoptimizer#AccountId" } }, + "com.amazonaws.computeoptimizer#AllocatedStorage": { + "type": "integer", + "traits": { + "smithy.api#default": 0 + } + }, "com.amazonaws.computeoptimizer#AutoScalingConfiguration": { "type": "enum", "members": { @@ -220,6 +226,12 @@ "smithy.api#documentation": "

An array of objects that describe the current configuration of the Auto Scaling\n group.

" } }, + "currentInstanceGpuInfo": { + "target": "com.amazonaws.computeoptimizer#GpuInfo", + "traits": { + "smithy.api#documentation": "

\n Describes the GPU accelerator settings for the current instance type of the Auto Scaling group.\n

" + } + }, "recommendationOptions": { "target": "com.amazonaws.computeoptimizer#AutoScalingGroupRecommendationOptions", "traits": { @@ -249,12 +261,6 @@ "traits": { "smithy.api#documentation": "

The applications that might be running on the instances in the Auto Scaling group\n as inferred by Compute Optimizer.

\n

Compute Optimizer can infer if one of the following applications might be running on\n the instances:

\n
    \n
  • \n

    \n AmazonEmr - Infers that Amazon EMR might be running on\n the instances.

    \n
  • \n
  • \n

    \n ApacheCassandra - Infers that Apache Cassandra might be running\n on the instances.

    \n
  • \n
  • \n

    \n ApacheHadoop - Infers that Apache Hadoop might be running on the\n instances.

    \n
  • \n
  • \n

    \n Memcached - Infers that Memcached might be running on the\n instances.

    \n
  • \n
  • \n

    \n NGINX - Infers that NGINX might be running on the\n instances.

    \n
  • \n
  • \n

    \n PostgreSql - Infers that PostgreSQL might be running on the\n instances.

    \n
  • \n
  • \n

    \n Redis - Infers that Redis might be running on the\n instances.

    \n
  • \n
  • \n

    \n Kafka - Infers that Kafka might be running on the\n instance.

    \n
  • \n
  • \n

    \n SQLServer - Infers that SQLServer might be running on the\n instance.

    \n
  • \n
" } - }, - "currentInstanceGpuInfo": { - "target": "com.amazonaws.computeoptimizer#GpuInfo", - "traits": { - "smithy.api#documentation": "

\n Describes the GPU accelerator settings for the current instance type of the Auto Scaling group.\n

" - } } }, "traits": { @@ -270,6 +276,12 @@ "smithy.api#documentation": "

An array of objects that describe an Auto Scaling group configuration.

" } }, + "instanceGpuInfo": { + "target": "com.amazonaws.computeoptimizer#GpuInfo", + "traits": { + "smithy.api#documentation": "

\n Describes the GPU accelerator settings for the recommended instance type of the Auto Scaling group.\n

" + } + }, "projectedUtilizationMetrics": { "target": "com.amazonaws.computeoptimizer#ProjectedUtilizationMetrics", "traits": { @@ -296,23 +308,17 @@ "smithy.api#documentation": "

An object that describes the savings opportunity for the Auto Scaling group\n recommendation option. Savings opportunity includes the estimated monthly savings amount\n and percentage.

" } }, - "migrationEffort": { - "target": "com.amazonaws.computeoptimizer#MigrationEffort", - "traits": { - "smithy.api#documentation": "

The level of effort required to migrate from the current instance type to the\n recommended instance type.

\n

For example, the migration effort is Low if Amazon EMR is the\n inferred workload type and an Amazon Web Services Graviton instance type is recommended.\n The migration effort is Medium if a workload type couldn't be inferred but\n an Amazon Web Services Graviton instance type is recommended. The migration effort is\n VeryLow if both the current and recommended instance types are of the\n same CPU architecture.

" - } - }, - "instanceGpuInfo": { - "target": "com.amazonaws.computeoptimizer#GpuInfo", - "traits": { - "smithy.api#documentation": "

\n Describes the GPU accelerator settings for the recommended instance type of the Auto Scaling group.\n

" - } - }, "savingsOpportunityAfterDiscounts": { "target": "com.amazonaws.computeoptimizer#AutoScalingGroupSavingsOpportunityAfterDiscounts", "traits": { "smithy.api#documentation": "

\n An object that describes the savings opportunity for the Auto Scaling group recommendation option that includes Savings Plans and Reserved Instances discounts. \n Savings opportunity includes the estimated monthly savings and percentage.\n

" } + }, + "migrationEffort": { + "target": "com.amazonaws.computeoptimizer#MigrationEffort", + "traits": { + "smithy.api#documentation": "

The level of effort required to migrate from the current instance type to the\n recommended instance type.

\n

For example, the migration effort is Low if Amazon EMR is the\n inferred workload type and an Amazon Web Services Graviton instance type is recommended.\n The migration effort is Medium if a workload type couldn't be inferred but\n an Amazon Web Services Graviton instance type is recommended. The migration effort is\n VeryLow if both the current and recommended instance types are of the\n same CPU architecture.

" + } } }, "traits": { @@ -383,6 +389,9 @@ { "target": "com.amazonaws.computeoptimizer#ExportLicenseRecommendations" }, + { + "target": "com.amazonaws.computeoptimizer#ExportRDSDatabaseRecommendations" + }, { "target": "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendations" }, @@ -416,6 +425,12 @@ { "target": "com.amazonaws.computeoptimizer#GetLicenseRecommendations" }, + { + "target": "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendationProjectedMetrics" + }, + { + "target": "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendations" + }, { "target": "com.amazonaws.computeoptimizer#GetRecommendationPreferences" }, @@ -1397,6 +1412,9 @@ } } }, + "com.amazonaws.computeoptimizer#CurrentDBInstanceClass": { + "type": "string" + }, "com.amazonaws.computeoptimizer#CurrentInstanceType": { "type": "string" }, @@ -1554,6 +1572,48 @@ } } }, + "com.amazonaws.computeoptimizer#DBInstanceClass": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#DBStorageConfiguration": { + "type": "structure", + "members": { + "storageType": { + "target": "com.amazonaws.computeoptimizer#StorageType", + "traits": { + "smithy.api#documentation": "

\n The type of RDS storage.\n

" + } + }, + "allocatedStorage": { + "target": "com.amazonaws.computeoptimizer#AllocatedStorage", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The size of the RDS storage in gigabytes (GB).\n

" + } + }, + "iops": { + "target": "com.amazonaws.computeoptimizer#NullableIOPS", + "traits": { + "smithy.api#documentation": "

\n The provisioned IOPs of the RDS storage.\n

" + } + }, + "maxAllocatedStorage": { + "target": "com.amazonaws.computeoptimizer#NullableMaxAllocatedStorage", + "traits": { + "smithy.api#documentation": "

\n The maximum limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the RDS instance.\n

" + } + }, + "storageThroughput": { + "target": "com.amazonaws.computeoptimizer#NullableStorageThroughput", + "traits": { + "smithy.api#documentation": "

\n The storage throughput of the RDS storage.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The configuration of the recommended RDS storage.\n

" + } + }, "com.amazonaws.computeoptimizer#DeleteRecommendationPreferences": { "type": "operation", "input": { @@ -1598,7 +1658,7 @@ "resourceType": { "target": "com.amazonaws.computeoptimizer#ResourceType", "traits": { - "smithy.api#documentation": "

The target resource type of the recommendation preference to delete.

\n

The Ec2Instance option encompasses standalone instances and instances\n that are part of Auto Scaling groups. The AutoScalingGroup option\n encompasses only instances that are part of an Auto Scaling group.

\n \n

The valid values for this parameter are Ec2Instance and\n AutoScalingGroup.

\n
", + "smithy.api#documentation": "

The target resource type of the recommendation preference to delete.

\n

The Ec2Instance option encompasses standalone instances and instances\n that are part of Auto Scaling groups. The AutoScalingGroup option\n encompasses only instances that are part of an Auto Scaling group.

", "smithy.api#required": {} } }, @@ -2239,17 +2299,17 @@ "smithy.api#documentation": "

\n The risk of the current Amazon ECS service not meeting the performance needs of its workloads. \n The higher the risk, the more likely the current service can't meet the performance \n requirements of its workload.\n

" } }, - "tags": { - "target": "com.amazonaws.computeoptimizer#Tags", - "traits": { - "smithy.api#documentation": "

\n A list of tags assigned to your Amazon ECS service recommendations.\n

" - } - }, "effectiveRecommendationPreferences": { "target": "com.amazonaws.computeoptimizer#ECSEffectiveRecommendationPreferences", "traits": { "smithy.api#documentation": "

\n Describes the effective recommendation preferences for Amazon ECS services.\n

" } + }, + "tags": { + "target": "com.amazonaws.computeoptimizer#Tags", + "traits": { + "smithy.api#documentation": "

\n A list of tags assigned to your Amazon ECS service recommendations.\n

" + } } }, "traits": { @@ -2375,6 +2435,12 @@ "savingsOpportunity": { "target": "com.amazonaws.computeoptimizer#SavingsOpportunity" }, + "savingsOpportunityAfterDiscounts": { + "target": "com.amazonaws.computeoptimizer#ECSSavingsOpportunityAfterDiscounts", + "traits": { + "smithy.api#documentation": "

\n Describes the savings opportunity for Amazon ECS service recommendations or for the recommendation option.\n

\n

Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. \n You can achieve this by implementing a given Compute Optimizer recommendation.

" + } + }, "projectedUtilizationMetrics": { "target": "com.amazonaws.computeoptimizer#ECSServiceProjectedUtilizationMetrics", "traits": { @@ -2386,12 +2452,6 @@ "traits": { "smithy.api#documentation": "

\n The CPU and memory size recommendations for the containers within the task of your Amazon ECS service.\n

" } - }, - "savingsOpportunityAfterDiscounts": { - "target": "com.amazonaws.computeoptimizer#ECSSavingsOpportunityAfterDiscounts", - "traits": { - "smithy.api#documentation": "

\n Describes the savings opportunity for Amazon ECS service recommendations or for the recommendation option.\n

\n

Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. \n You can achieve this by implementing a given Compute Optimizer recommendation.

" - } } }, "traits": { @@ -2521,7 +2581,7 @@ "cpuVendorArchitectures": { "target": "com.amazonaws.computeoptimizer#CpuVendorArchitectures", "traits": { - "smithy.api#documentation": "

Describes the CPU vendor and architecture for an instance or Auto Scaling group\n recommendations.

\n

For example, when you specify AWS_ARM64 with:

\n " + "smithy.api#documentation": "

Describes the CPU vendor and architecture for an instance or Auto Scaling group\n recommendations.

\n

For example, when you specify AWS_ARM64 with:

\n " } }, "enhancedInfrastructureMetrics": { @@ -2571,6 +2631,12 @@ "smithy.api#documentation": "

Describes the effective recommendation preferences for a resource.

" } }, + "com.amazonaws.computeoptimizer#Engine": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#EngineVersion": { + "type": "string" + }, "com.amazonaws.computeoptimizer#EnhancedInfrastructureMetrics": { "type": "enum", "members": { @@ -3283,6 +3349,109 @@ "smithy.api#output": {} } }, + "com.amazonaws.computeoptimizer#ExportRDSDatabaseRecommendations": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#ExportRDSDatabaseRecommendationsRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#ExportRDSDatabaseRecommendationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#LimitExceededException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

\n Export optimization recommendations for your Amazon Relational Database Service (Amazon RDS).\n

\n

Recommendations are exported in a comma-separated values (CSV) file, and its metadata\n in a JavaScript Object Notation (JSON) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting\n Recommendations in the Compute Optimizer User\n Guide.

\n

You can have only one Amazon RDS export job in progress per Amazon Web Services Region.

" + } + }, + "com.amazonaws.computeoptimizer#ExportRDSDatabaseRecommendationsRequest": { + "type": "structure", + "members": { + "accountIds": { + "target": "com.amazonaws.computeoptimizer#AccountIds", + "traits": { + "smithy.api#documentation": "

\n The Amazon Web Services account IDs for the export Amazon RDS recommendations.\n

\n

If your account is the management account or the delegated administrator \n of an organization, use this parameter to specify the member account you want to \n export recommendations to.

\n

This parameter can't be specified together with the include member accounts \n parameter. The parameters are mutually exclusive.

\n

If this parameter or the include member accounts parameter is omitted,\n the recommendations for member accounts aren't included in the export.

\n

You can specify multiple account IDs per request.

" + } + }, + "filters": { + "target": "com.amazonaws.computeoptimizer#RDSDBRecommendationFilters", + "traits": { + "smithy.api#documentation": "

\n An array of objects to specify a filter that exports a more specific set of \n Amazon RDS recommendations.\n

" + } + }, + "fieldsToExport": { + "target": "com.amazonaws.computeoptimizer#ExportableRDSDBFields", + "traits": { + "smithy.api#documentation": "

The recommendations data to include in the export file. For more information about the\n fields that can be exported, see Exported files in the Compute Optimizer User\n Guide.

" + } + }, + "s3DestinationConfig": { + "target": "com.amazonaws.computeoptimizer#S3DestinationConfig", + "traits": { + "smithy.api#required": {} + } + }, + "fileFormat": { + "target": "com.amazonaws.computeoptimizer#FileFormat", + "traits": { + "smithy.api#documentation": "

\n The format of the export file. \n

\n

The CSV file is the only export file format currently supported.

" + } + }, + "includeMemberAccounts": { + "target": "com.amazonaws.computeoptimizer#IncludeMemberAccounts", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

If your account is the management account or the delegated administrator of an organization,\n this parameter indicates whether to include recommendations for resources in all member accounts of\n the organization.

\n

The member accounts must also be opted in to Compute Optimizer, and trusted access for\n Compute Optimizer must be enabled in the organization account. For more information,\n see Compute Optimizer and Amazon Web Services Organizations trusted access in the\n Compute Optimizer User Guide.

\n

If this parameter is omitted, recommendations for member accounts of the \n organization aren't included in the export file.

\n

If this parameter or the account ID parameter is omitted, recommendations for \n member accounts aren't included in the export.

" + } + }, + "recommendationPreferences": { + "target": "com.amazonaws.computeoptimizer#RecommendationPreferences" + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.computeoptimizer#ExportRDSDatabaseRecommendationsResponse": { + "type": "structure", + "members": { + "jobId": { + "target": "com.amazonaws.computeoptimizer#JobId", + "traits": { + "smithy.api#documentation": "

\n The identification number of the export job.\n

\n

To view the status of an export job, use the \n DescribeRecommendationExportJobs action and specify the job ID.\n

" + } + }, + "s3Destination": { + "target": "com.amazonaws.computeoptimizer#S3Destination" + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.computeoptimizer#ExportableAutoScalingGroupField": { "type": "enum", "members": { @@ -3598,6 +3767,18 @@ "smithy.api#enumValue": "EffectiveRecommendationPreferencesInferredWorkloadTypes" } }, + "EFFECTIVE_RECOMMENDATION_PREFERENCES_PREFERRED_RESOURCES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EffectiveRecommendationPreferencesPreferredResources" + } + }, + "EFFECTIVE_RECOMMENDATION_PREFERENCES_LOOKBACK_PERIOD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EffectiveRecommendationPreferencesLookBackPeriod" + } + }, "INFERRED_WORKLOAD_TYPES": { "target": "smithy.api#Unit", "traits": { @@ -3669,18 +3850,6 @@ "traits": { "smithy.api#enumValue": "RecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts" } - }, - "EFFECTIVE_RECOMMENDATION_PREFERENCES_PREFERRED_RESOURCES": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "EffectiveRecommendationPreferencesPreferredResources" - } - }, - "EFFECTIVE_RECOMMENDATION_PREFERENCES_LOOKBACK_PERIOD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "EffectiveRecommendationPreferencesLookBackPeriod" - } } } }, @@ -4190,16 +4359,16 @@ "smithy.api#enumValue": "EffectiveRecommendationPreferencesExternalMetricsSource" } }, - "INSTANCE_STATE": { + "TAGS": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "InstanceState" + "smithy.api#enumValue": "Tags" } }, - "TAGS": { + "INSTANCE_STATE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Tags" + "smithy.api#enumValue": "InstanceState" } }, "EXTERNAL_METRIC_STATUS_CODE": { @@ -4640,921 +4809,866 @@ "target": "com.amazonaws.computeoptimizer#ExportableLicenseField" } }, - "com.amazonaws.computeoptimizer#ExportableVolumeField": { + "com.amazonaws.computeoptimizer#ExportableRDSDBField": { "type": "enum", "members": { + "RESOURCE_ARN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ResourceArn" + } + }, "ACCOUNT_ID": { "target": "smithy.api#Unit", "traits": { "smithy.api#enumValue": "AccountId" } }, - "VOLUME_ARN": { + "ENGINE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "VolumeArn" + "smithy.api#enumValue": "Engine" } }, - "FINDING": { + "ENGINE_VERSION": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Finding" + "smithy.api#enumValue": "EngineVersion" } }, - "UTILIZATION_METRICS_VOLUME_READ_OPS_PER_SECOND_MAXIMUM": { + "IDLE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "UtilizationMetricsVolumeReadOpsPerSecondMaximum" + "smithy.api#enumValue": "Idle" } }, - "UTILIZATION_METRICS_VOLUME_WRITE_OPS_PER_SECOND_MAXIMUM": { + "MULTI_AZ_DB_INSTANCE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "UtilizationMetricsVolumeWriteOpsPerSecondMaximum" + "smithy.api#enumValue": "MultiAZDBInstance" } }, - "UTILIZATION_METRICS_VOLUME_READ_BYTES_PER_SECOND_MAXIMUM": { + "CURRENT_DB_INSTANCE_CLASS": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "UtilizationMetricsVolumeReadBytesPerSecondMaximum" + "smithy.api#enumValue": "CurrentDBInstanceClass" } }, - "UTILIZATION_METRICS_VOLUME_WRITE_BYTES_PER_SECOND_MAXIMUM": { + "CURRENT_STORAGE_CONFIGURATION_STORAGE_TYPE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "UtilizationMetricsVolumeWriteBytesPerSecondMaximum" + "smithy.api#enumValue": "CurrentStorageConfigurationStorageType" } }, - "LOOKBACK_PERIOD_IN_DAYS": { + "CURRENT_STORAGE_CONFIGURATION_ALLOCATED_STORAGE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "LookbackPeriodInDays" + "smithy.api#enumValue": "CurrentStorageConfigurationAllocatedStorage" } }, - "CURRENT_CONFIGURATION_VOLUME_TYPE": { + "CURRENT_STORAGE_CONFIGURATION_MAX_ALLOCATED_STORAGE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CurrentConfigurationVolumeType" + "smithy.api#enumValue": "CurrentStorageConfigurationMaxAllocatedStorage" } }, - "CURRENT_CONFIGURATION_VOLUME_BASELINE_IOPS": { + "CURRENT_STORAGE_CONFIGURATION_IOPS": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CurrentConfigurationVolumeBaselineIOPS" + "smithy.api#enumValue": "CurrentStorageConfigurationIOPS" } }, - "CURRENT_CONFIGURATION_VOLUME_BASELINE_THROUGHPUT": { + "CURRENT_STORAGE_CONFIGURATION_STORAGE_THROUGHPUT": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CurrentConfigurationVolumeBaselineThroughput" + "smithy.api#enumValue": "CurrentStorageConfigurationStorageThroughput" } }, - "CURRENT_CONFIGURATION_VOLUME_BURST_IOPS": { + "CURRENT_INSTANCE_ON_DEMAND_HOURLY_PRICE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CurrentConfigurationVolumeBurstIOPS" + "smithy.api#enumValue": "CurrentInstanceOnDemandHourlyPrice" } }, - "CURRENT_CONFIGURATION_VOLUME_BURST_THROUGHPUT": { + "CURRENT_STORAGE_ON_DEMAND_MONTHLY_PRICE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CurrentConfigurationVolumeBurstThroughput" + "smithy.api#enumValue": "CurrentStorageOnDemandMonthlyPrice" } }, - "CURRENT_CONFIGURATION_VOLUME_SIZE": { + "LOOKBACK_PERIOD_IN_DAYS": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CurrentConfigurationVolumeSize" + "smithy.api#enumValue": "LookbackPeriodInDays" } }, - "CURRENT_MONTHLY_PRICE": { + "UTILIZATION_METRICS_CPU_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CurrentMonthlyPrice" + "smithy.api#enumValue": "UtilizationMetricsCpuMaximum" } }, - "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_TYPE": { + "UTILIZATION_METRICS_MEMORY_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeType" + "smithy.api#enumValue": "UtilizationMetricsMemoryMaximum" } }, - "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_BASELINE_IOPS": { + "UTILIZATION_METRICS_EBS_VOLUME_STORAGE_SPACE_UTILIZATION_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeBaselineIOPS" + "smithy.api#enumValue": "UtilizationMetricsEBSVolumeStorageSpaceUtilizationMaximum" } }, - "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_BASELINE_THROUGHPUT": { + "UTILIZATION_METRICS_NETWORK_RECEIVE_THROUGHPUT_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeBaselineThroughput" + "smithy.api#enumValue": "UtilizationMetricsNetworkReceiveThroughputMaximum" } }, - "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_BURST_IOPS": { + "UTILIZATION_METRICS_NETWORK_TRANSMIT_THROUGHPUT_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeBurstIOPS" + "smithy.api#enumValue": "UtilizationMetricsNetworkTransmitThroughputMaximum" } }, - "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_BURST_THROUGHPUT": { + "UTILIZATION_METRICS_EBS_VOLUME_READ_IOPS_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeBurstThroughput" + "smithy.api#enumValue": "UtilizationMetricsEBSVolumeReadIOPSMaximum" } }, - "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_SIZE": { + "UTILIZATION_METRICS_EBS_VOLUME_WRITE_IOPS_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeSize" + "smithy.api#enumValue": "UtilizationMetricsEBSVolumeWriteIOPSMaximum" } }, - "RECOMMENDATION_OPTIONS_MONTHLY_PRICE": { + "UTILIZATION_METRICS_EBS_VOLUME_READ_THROUGHPUT_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsMonthlyPrice" + "smithy.api#enumValue": "UtilizationMetricsEBSVolumeReadThroughputMaximum" } }, - "RECOMMENDATION_OPTIONS_PERFORMANCE_RISK": { + "UTILIZATION_METRICS_EBS_VOLUME_WRITE_THROUGHPUT_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsPerformanceRisk" + "smithy.api#enumValue": "UtilizationMetricsEBSVolumeWriteThroughputMaximum" } }, - "LAST_REFRESH_TIMESTAMP": { + "UTILIZATION_METRICS_DATABASE_CONNECTIONS_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "LastRefreshTimestamp" + "smithy.api#enumValue": "UtilizationMetricsDatabaseConnectionsMaximum" } }, - "CURRENT_PERFORMANCE_RISK": { + "INSTANCE_FINDING": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CurrentPerformanceRisk" + "smithy.api#enumValue": "InstanceFinding" } }, - "RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE": { + "INSTANCE_FINDING_REASON_CODES": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsSavingsOpportunityPercentage" + "smithy.api#enumValue": "InstanceFindingReasonCodes" } }, - "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY": { + "STORAGE_FINDING": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsEstimatedMonthlySavingsCurrency" + "smithy.api#enumValue": "StorageFinding" } }, - "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE": { + "STORAGE_FINDING_REASON_CODES": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsEstimatedMonthlySavingsValue" + "smithy.api#enumValue": "StorageFindingReasonCodes" } }, - "ROOT_VOLUME": { + "INSTANCE_RECOMMENDATION_OPTIONS_DB_INSTANCE_CLASS": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RootVolume" + "smithy.api#enumValue": "InstanceRecommendationOptionsDBInstanceClass" } }, - "TAGS": { + "INSTANCE_RECOMMENDATION_OPTIONS_RANK": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Tags" + "smithy.api#enumValue": "InstanceRecommendationOptionsRank" } }, - "CURRENT_CONFIGURATION_ROOT_VOLUME": { + "INSTANCE_RECOMMENDATION_OPTIONS_PERFORMANCE_RISK": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CurrentConfigurationRootVolume" + "smithy.api#enumValue": "InstanceRecommendationOptionsPerformanceRisk" } }, - "EFFECTIVE_RECOMMENDATION_PREFERENCES_SAVINGS_ESTIMATION_MODE": { + "INSTANCE_RECOMMENDATION_OPTIONS_PROJECTED_UTILIZATION_METRICS_CPU_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "EffectiveRecommendationPreferencesSavingsEstimationMode" + "smithy.api#enumValue": "InstanceRecommendationOptionsProjectedUtilizationMetricsCpuMaximum" } }, - "RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_AFTER_DISCOUNTS_PERCENTAGE": { + "STORAGE_RECOMMENDATION_OPTIONS_STORAGE_TYPE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsSavingsOpportunityAfterDiscountsPercentage" + "smithy.api#enumValue": "StorageRecommendationOptionsStorageType" } }, - "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY_AFTER_DISCOUNTS": { + "STORAGE_RECOMMENDATION_OPTIONS_ALLOCATED_STORAGE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts" + "smithy.api#enumValue": "StorageRecommendationOptionsAllocatedStorage" } }, - "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE_AFTER_DISCOUNTS": { + "STORAGE_RECOMMENDATION_OPTIONS_MAX_ALLOCATED_STORAGE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts" - } - } - } - }, - "com.amazonaws.computeoptimizer#ExportableVolumeFields": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#ExportableVolumeField" - } - }, - "com.amazonaws.computeoptimizer#ExternalMetricStatus": { - "type": "structure", - "members": { - "statusCode": { - "target": "com.amazonaws.computeoptimizer#ExternalMetricStatusCode", - "traits": { - "smithy.api#documentation": "

\n The status code for Compute Optimizer's integration with an external metrics provider.\n

" + "smithy.api#enumValue": "StorageRecommendationOptionsMaxAllocatedStorage" } }, - "statusReason": { - "target": "com.amazonaws.computeoptimizer#ExternalMetricStatusReason", + "STORAGE_RECOMMENDATION_OPTIONS_IOPS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n The reason for Compute Optimizer's integration status with your external metric provider.\n

" + "smithy.api#enumValue": "StorageRecommendationOptionsIOPS" } - } - }, - "traits": { - "smithy.api#documentation": "

\n Describes Compute Optimizer's integration status with your chosen external metric provider. For example, Datadog.\n

" - } - }, - "com.amazonaws.computeoptimizer#ExternalMetricStatusCode": { - "type": "enum", - "members": { - "NO_EXTERNAL_METRIC_SET": { + }, + "STORAGE_RECOMMENDATION_OPTIONS_STORAGE_THROUGHPUT": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "NO_EXTERNAL_METRIC_SET" + "smithy.api#enumValue": "StorageRecommendationOptionsStorageThroughput" } }, - "INTEGRATION_SUCCESS": { + "STORAGE_RECOMMENDATION_OPTIONS_RANK": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "INTEGRATION_SUCCESS" + "smithy.api#enumValue": "StorageRecommendationOptionsRank" } }, - "DATADOG_INTEGRATION_ERROR": { + "INSTANCE_RECOMMENDATION_OPTIONS_INSTANCE_ON_DEMAND_HOURLY_PRICE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "DATADOG_INTEGRATION_ERROR" + "smithy.api#enumValue": "InstanceRecommendationOptionsInstanceOnDemandHourlyPrice" } }, - "DYNATRACE_INTEGRATION_ERROR": { + "INSTANCE_RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "DYNATRACE_INTEGRATION_ERROR" + "smithy.api#enumValue": "InstanceRecommendationOptionsSavingsOpportunityPercentage" } }, - "NEWRELIC_INTEGRATION_ERROR": { + "INSTANCE_RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "NEWRELIC_INTEGRATION_ERROR" + "smithy.api#enumValue": "InstanceRecommendationOptionsEstimatedMonthlySavingsCurrency" } }, - "INSTANA_INTEGRATION_ERROR": { + "INSTANCE_RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "INSTANA_INTEGRATION_ERROR" + "smithy.api#enumValue": "InstanceRecommendationOptionsEstimatedMonthlySavingsValue" } }, - "INSUFFICIENT_DATADOG_METRICS": { + "INSTANCE_RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_AFTER_DISCOUNTS_PERCENTAGE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "INSUFFICIENT_DATADOG_METRICS" + "smithy.api#enumValue": "InstanceRecommendationOptionsSavingsOpportunityAfterDiscountsPercentage" } }, - "INSUFFICIENT_DYNATRACE_METRICS": { + "INSTANCE_RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY_AFTER_DISCOUNTS": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "INSUFFICIENT_DYNATRACE_METRICS" + "smithy.api#enumValue": "InstanceRecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts" } }, - "INSUFFICIENT_NEWRELIC_METRICS": { + "INSTANCE_RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE_AFTER_DISCOUNTS": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "INSUFFICIENT_NEWRELIC_METRICS" + "smithy.api#enumValue": "InstanceRecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts" } }, - "INSUFFICIENT_INSTANA_METRICS": { + "STORAGE_RECOMMENDATION_OPTIONS_ON_DEMAND_MONTHLY_PRICE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "INSUFFICIENT_INSTANA_METRICS" + "smithy.api#enumValue": "StorageRecommendationOptionsOnDemandMonthlyPrice" } - } - } - }, - "com.amazonaws.computeoptimizer#ExternalMetricStatusReason": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#ExternalMetricsPreference": { - "type": "structure", - "members": { - "source": { - "target": "com.amazonaws.computeoptimizer#ExternalMetricsSource", + }, + "STORAGE_RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

Contains the source options for external metrics preferences.

" + "smithy.api#enumValue": "StorageRecommendationOptionsSavingsOpportunityPercentage" } - } - }, - "traits": { - "smithy.api#documentation": "

Describes the external metrics preferences for EC2 rightsizing recommendations.\n

" - } - }, - "com.amazonaws.computeoptimizer#ExternalMetricsSource": { - "type": "enum", - "members": { - "DATADOG": { + }, + "STORAGE_RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Datadog" + "smithy.api#enumValue": "StorageRecommendationOptionsEstimatedMonthlySavingsCurrency" } }, - "DYNATRACE": { + "STORAGE_RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Dynatrace" + "smithy.api#enumValue": "StorageRecommendationOptionsEstimatedMonthlySavingsValue" } }, - "NEWRELIC": { + "STORAGE_RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_AFTER_DISCOUNTS_PERCENTAGE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "NewRelic" + "smithy.api#enumValue": "StorageRecommendationOptionsSavingsOpportunityAfterDiscountsPercentage" } }, - "INSTANA": { + "STORAGE_RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY_AFTER_DISCOUNTS": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Instana" + "smithy.api#enumValue": "StorageRecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts" } - } - } - }, - "com.amazonaws.computeoptimizer#FailureReason": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#FileFormat": { - "type": "enum", - "members": { - "CSV": { + }, + "STORAGE_RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE_AFTER_DISCOUNTS": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Csv" + "smithy.api#enumValue": "StorageRecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts" } - } - } - }, - "com.amazonaws.computeoptimizer#Filter": { - "type": "structure", - "members": { - "name": { - "target": "com.amazonaws.computeoptimizer#FilterName", + }, + "EFFECTIVE_RECOMMENDATION_PREFERENCES_CPU_VENDOR_ARCHITECTURES": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The name of the filter.

\n

Specify Finding to return recommendations with a specific finding\n classification. For example, Underprovisioned.

\n

Specify RecommendationSourceType to return recommendations of a specific\n resource type. For example, Ec2Instance.

\n

Specify FindingReasonCodes to return recommendations with a specific\n finding reason code. For example, CPUUnderprovisioned.

\n

Specify InferredWorkloadTypes to return recommendations of a specific\n inferred workload. For example, Redis.

\n

You can filter your EC2 instance recommendations by tag:key and tag-key tags.

\n

A tag:key is a key and value combination of a tag assigned to your \n recommendations. Use the tag key in the filter name and the tag value \n as the filter value. For example, to find all recommendations that have \n a tag with the key of Owner and the value of TeamA, \n specify tag:Owner for the filter name and TeamA for the filter value.

\n

A tag-key is the key of a tag assigned to your recommendations. Use \n this filter to find all of your recommendations that have a tag with a \n specific key. This doesn’t consider the tag value. For example, you can find \n your recommendations with a tag key value of Owner or without any tag \n keys assigned.

" + "smithy.api#enumValue": "EffectiveRecommendationPreferencesCpuVendorArchitectures" } }, - "values": { - "target": "com.amazonaws.computeoptimizer#FilterValues", + "EFFECTIVE_RECOMMENDATION_PREFERENCES_ENHANCED_INFRASTRUCTURE_METRICS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The value of the filter.

\n

The valid values for this parameter are as follows, depending on what you specify for\n the name parameter and the resource type that you wish to filter results\n for:

\n
    \n
  • \n

    Specify Optimized or NotOptimized if you specify the\n name parameter as Finding and you want to filter\n results for Auto Scaling groups.

    \n
  • \n
  • \n

    Specify Underprovisioned, Overprovisioned, or\n Optimized if you specify the name parameter as\n Finding and you want to filter results for EC2\n instances.

    \n
  • \n
  • \n

    Specify Ec2Instance or AutoScalingGroup if you\n specify the name parameter as\n RecommendationSourceType.

    \n
  • \n
  • \n

    Specify one of the following options if you specify the name\n parameter as FindingReasonCodes:

    \n
      \n
    • \n

      \n \n CPUOverprovisioned\n — The\n instance’s CPU configuration can be sized down while still meeting the\n performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n CPUUnderprovisioned\n —\n The instance’s CPU configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better CPU performance.

      \n
    • \n
    • \n

      \n \n MemoryOverprovisioned\n —\n The instance’s memory configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n MemoryUnderprovisioned\n —\n The instance’s memory configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better memory performance.

      \n
    • \n
    • \n

      \n \n EBSThroughputOverprovisioned\n — The\n instance’s EBS throughput configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n EBSThroughputUnderprovisioned\n — The\n instance’s EBS throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better EBS throughput performance.

      \n
    • \n
    • \n

      \n \n EBSIOPSOverprovisioned\n —\n The instance’s EBS IOPS configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n EBSIOPSUnderprovisioned\n \n — The instance’s EBS IOPS configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better EBS IOPS performance.

      \n
    • \n
    • \n

      \n \n NetworkBandwidthOverprovisioned\n — The\n instance’s network bandwidth configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n NetworkBandwidthUnderprovisioned\n — The\n instance’s network bandwidth configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better network bandwidth performance. This finding reason\n happens when the NetworkIn or NetworkOut\n performance of an instance is impacted.

      \n
    • \n
    • \n

      \n \n NetworkPPSOverprovisioned\n — The instance’s\n network PPS (packets per second) configuration can be sized down while\n still meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n NetworkPPSUnderprovisioned\n — The instance’s\n network PPS (packets per second) configuration doesn't meet the\n performance requirements of your workload and there is an alternative\n instance type that provides better network PPS performance.

      \n
    • \n
    • \n

      \n \n DiskIOPSOverprovisioned\n \n — The instance’s disk IOPS configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n DiskIOPSUnderprovisioned\n \n — The instance’s disk IOPS configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better disk IOPS performance.

      \n
    • \n
    • \n

      \n \n DiskThroughputOverprovisioned\n — The\n instance’s disk throughput configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n DiskThroughputUnderprovisioned\n — The\n instance’s disk throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better disk throughput performance.

      \n
    • \n
    \n
  • \n
" + "smithy.api#enumValue": "EffectiveRecommendationPreferencesEnhancedInfrastructureMetrics" } - } - }, - "traits": { - "smithy.api#documentation": "

Describes a filter that returns a more specific list of recommendations. Use this\n filter with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

\n

You can use EBSFilter with the GetEBSVolumeRecommendations action,\n LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, and JobFilter with\n the DescribeRecommendationExportJobs action.

" - } - }, - "com.amazonaws.computeoptimizer#FilterName": { - "type": "enum", - "members": { - "FINDING": { + }, + "EFFECTIVE_RECOMMENDATION_PREFERENCES_LOOKBACK_PERIOD": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Finding" + "smithy.api#enumValue": "EffectiveRecommendationPreferencesLookBackPeriod" } }, - "FINDING_REASON_CODES": { + "EFFECTIVE_RECOMMENDATION_PREFERENCES_SAVINGS_ESTIMATION_MODE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "FindingReasonCodes" + "smithy.api#enumValue": "EffectiveRecommendationPreferencesSavingsEstimationMode" } }, - "RECOMMENDATION_SOURCE_TYPE": { + "LAST_REFRESH_TIMESTAMP": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "RecommendationSourceType" + "smithy.api#enumValue": "LastRefreshTimestamp" } }, - "INFERRED_WORKLOAD_TYPES": { + "TAGS": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "InferredWorkloadTypes" + "smithy.api#enumValue": "Tags" } } } }, - "com.amazonaws.computeoptimizer#FilterValue": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#FilterValues": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#FilterValue" - } - }, - "com.amazonaws.computeoptimizer#Filters": { + "com.amazonaws.computeoptimizer#ExportableRDSDBFields": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#Filter" + "target": "com.amazonaws.computeoptimizer#ExportableRDSDBField" } }, - "com.amazonaws.computeoptimizer#Finding": { + "com.amazonaws.computeoptimizer#ExportableVolumeField": { "type": "enum", "members": { - "UNDER_PROVISIONED": { + "ACCOUNT_ID": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Underprovisioned" + "smithy.api#enumValue": "AccountId" } }, - "OVER_PROVISIONED": { + "VOLUME_ARN": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Overprovisioned" + "smithy.api#enumValue": "VolumeArn" } }, - "OPTIMIZED": { + "FINDING": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Optimized" + "smithy.api#enumValue": "Finding" } }, - "NOT_OPTIMIZED": { + "UTILIZATION_METRICS_VOLUME_READ_OPS_PER_SECOND_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "NotOptimized" + "smithy.api#enumValue": "UtilizationMetricsVolumeReadOpsPerSecondMaximum" } - } - } - }, - "com.amazonaws.computeoptimizer#FindingReasonCode": { - "type": "enum", - "members": { - "MEMORY_OVER_PROVISIONED": { + }, + "UTILIZATION_METRICS_VOLUME_WRITE_OPS_PER_SECOND_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "MemoryOverprovisioned" + "smithy.api#enumValue": "UtilizationMetricsVolumeWriteOpsPerSecondMaximum" } }, - "MEMORY_UNDER_PROVISIONED": { + "UTILIZATION_METRICS_VOLUME_READ_BYTES_PER_SECOND_MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "MemoryUnderprovisioned" + "smithy.api#enumValue": "UtilizationMetricsVolumeReadBytesPerSecondMaximum" } - } - } - }, - "com.amazonaws.computeoptimizer#FunctionArn": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#FunctionArns": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#FunctionArn" - } - }, - "com.amazonaws.computeoptimizer#FunctionVersion": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendations": { - "type": "operation", - "input": { - "target": "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendationsRequest" - }, - "output": { - "target": "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendationsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.computeoptimizer#AccessDeniedException" }, - { - "target": "com.amazonaws.computeoptimizer#InternalServerException" + "UTILIZATION_METRICS_VOLUME_WRITE_BYTES_PER_SECOND_MAXIMUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UtilizationMetricsVolumeWriteBytesPerSecondMaximum" + } }, - { - "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + "LOOKBACK_PERIOD_IN_DAYS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LookbackPeriodInDays" + } }, - { - "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + "CURRENT_CONFIGURATION_VOLUME_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CurrentConfigurationVolumeType" + } }, - { - "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + "CURRENT_CONFIGURATION_VOLUME_BASELINE_IOPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CurrentConfigurationVolumeBaselineIOPS" + } }, - { - "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + "CURRENT_CONFIGURATION_VOLUME_BASELINE_THROUGHPUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CurrentConfigurationVolumeBaselineThroughput" + } }, - { - "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + "CURRENT_CONFIGURATION_VOLUME_BURST_IOPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CurrentConfigurationVolumeBurstIOPS" + } }, - { - "target": "com.amazonaws.computeoptimizer#ThrottlingException" - } - ], - "traits": { - "smithy.api#documentation": "

Returns Auto Scaling group recommendations.

\n

Compute Optimizer generates recommendations for Amazon EC2 Auto Scaling groups that\n meet a specific set of requirements. For more information, see the Supported\n resources and requirements in the Compute Optimizer User\n Guide.

" - } - }, - "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendationsRequest": { - "type": "structure", - "members": { - "accountIds": { - "target": "com.amazonaws.computeoptimizer#AccountIds", + "CURRENT_CONFIGURATION_VOLUME_BURST_THROUGHPUT": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return Auto Scaling group\n recommendations.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return Auto Scaling group\n recommendations.

\n

Only one account ID can be specified per request.

" + "smithy.api#enumValue": "CurrentConfigurationVolumeBurstThroughput" } }, - "autoScalingGroupArns": { - "target": "com.amazonaws.computeoptimizer#AutoScalingGroupArns", + "CURRENT_CONFIGURATION_VOLUME_SIZE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Auto Scaling groups for which to return\n recommendations.

" + "smithy.api#enumValue": "CurrentConfigurationVolumeSize" } }, - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + "CURRENT_MONTHLY_PRICE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The token to advance to the next page of Auto Scaling group\n recommendations.

" + "smithy.api#enumValue": "CurrentMonthlyPrice" } }, - "maxResults": { - "target": "com.amazonaws.computeoptimizer#MaxResults", + "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_TYPE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The maximum number of Auto Scaling group recommendations to return with a single\n request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeType" } }, - "filters": { - "target": "com.amazonaws.computeoptimizer#Filters", + "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_BASELINE_IOPS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An array of objects to specify a filter that returns a more specific list of Auto Scaling group recommendations.

" + "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeBaselineIOPS" } }, - "recommendationPreferences": { - "target": "com.amazonaws.computeoptimizer#RecommendationPreferences", + "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_BASELINE_THROUGHPUT": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An object to specify the preferences for the Auto Scaling group recommendations\n to return in the response.

" + "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeBaselineThroughput" } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendationsResponse": { - "type": "structure", - "members": { - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + }, + "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_BURST_IOPS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The token to use to advance to the next page of Auto Scaling group\n recommendations.

\n

This value is null when there are no more pages of Auto Scaling group\n recommendations to return.

" + "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeBurstIOPS" } }, - "autoScalingGroupRecommendations": { - "target": "com.amazonaws.computeoptimizer#AutoScalingGroupRecommendations", + "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_BURST_THROUGHPUT": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An array of objects that describe Auto Scaling group recommendations.

" + "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeBurstThroughput" } }, - "errors": { - "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", + "RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_SIZE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An array of objects that describe errors of the request.

\n

For example, an error is returned if you request recommendations for an unsupported\n Auto Scaling group.

" + "smithy.api#enumValue": "RecommendationOptionsConfigurationVolumeSize" } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.computeoptimizer#GetEBSVolumeRecommendations": { - "type": "operation", - "input": { - "target": "com.amazonaws.computeoptimizer#GetEBSVolumeRecommendationsRequest" - }, - "output": { - "target": "com.amazonaws.computeoptimizer#GetEBSVolumeRecommendationsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.computeoptimizer#AccessDeniedException" }, - { - "target": "com.amazonaws.computeoptimizer#InternalServerException" + "RECOMMENDATION_OPTIONS_MONTHLY_PRICE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RecommendationOptionsMonthlyPrice" + } }, - { - "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + "RECOMMENDATION_OPTIONS_PERFORMANCE_RISK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RecommendationOptionsPerformanceRisk" + } }, - { - "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + "LAST_REFRESH_TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LastRefreshTimestamp" + } }, - { - "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + "CURRENT_PERFORMANCE_RISK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CurrentPerformanceRisk" + } }, - { - "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + "RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RecommendationOptionsSavingsOpportunityPercentage" + } }, - { - "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RecommendationOptionsEstimatedMonthlySavingsCurrency" + } }, - { - "target": "com.amazonaws.computeoptimizer#ThrottlingException" - } - ], - "traits": { - "smithy.api#documentation": "

Returns Amazon Elastic Block Store (Amazon EBS) volume recommendations.

\n

Compute Optimizer generates recommendations for Amazon EBS volumes that\n meet a specific set of requirements. For more information, see the Supported\n resources and requirements in the Compute Optimizer User\n Guide.

" - } - }, - "com.amazonaws.computeoptimizer#GetEBSVolumeRecommendationsRequest": { - "type": "structure", - "members": { - "volumeArns": { - "target": "com.amazonaws.computeoptimizer#VolumeArns", + "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the volumes for which to return\n recommendations.

" + "smithy.api#enumValue": "RecommendationOptionsEstimatedMonthlySavingsValue" } }, - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + "TAGS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The token to advance to the next page of volume recommendations.

" + "smithy.api#enumValue": "Tags" } }, - "maxResults": { - "target": "com.amazonaws.computeoptimizer#MaxResults", + "ROOT_VOLUME": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The maximum number of volume recommendations to return with a single request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + "smithy.api#enumValue": "RootVolume" } }, - "filters": { - "target": "com.amazonaws.computeoptimizer#EBSFilters", + "CURRENT_CONFIGURATION_ROOT_VOLUME": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An array of objects to specify a filter that returns a more specific list of volume\n recommendations.

" + "smithy.api#enumValue": "CurrentConfigurationRootVolume" } }, - "accountIds": { - "target": "com.amazonaws.computeoptimizer#AccountIds", + "EFFECTIVE_RECOMMENDATION_PREFERENCES_SAVINGS_ESTIMATION_MODE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return volume\n recommendations.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return volume recommendations.

\n

Only one account ID can be specified per request.

" + "smithy.api#enumValue": "EffectiveRecommendationPreferencesSavingsEstimationMode" } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.computeoptimizer#GetEBSVolumeRecommendationsResponse": { - "type": "structure", - "members": { - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + }, + "RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_AFTER_DISCOUNTS_PERCENTAGE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The token to use to advance to the next page of volume recommendations.

\n

This value is null when there are no more pages of volume recommendations to\n return.

" + "smithy.api#enumValue": "RecommendationOptionsSavingsOpportunityAfterDiscountsPercentage" } }, - "volumeRecommendations": { - "target": "com.amazonaws.computeoptimizer#VolumeRecommendations", + "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY_AFTER_DISCOUNTS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An array of objects that describe volume recommendations.

" + "smithy.api#enumValue": "RecommendationOptionsEstimatedMonthlySavingsCurrencyAfterDiscounts" } }, - "errors": { - "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", + "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE_AFTER_DISCOUNTS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An array of objects that describe errors of the request.

\n

For example, an error is returned if you request recommendations for an unsupported\n volume.

" + "smithy.api#enumValue": "RecommendationOptionsEstimatedMonthlySavingsValueAfterDiscounts" } } - }, - "traits": { - "smithy.api#output": {} } }, - "com.amazonaws.computeoptimizer#GetEC2InstanceRecommendations": { - "type": "operation", - "input": { - "target": "com.amazonaws.computeoptimizer#GetEC2InstanceRecommendationsRequest" - }, - "output": { - "target": "com.amazonaws.computeoptimizer#GetEC2InstanceRecommendationsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.computeoptimizer#AccessDeniedException" - }, - { - "target": "com.amazonaws.computeoptimizer#InternalServerException" - }, - { - "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" - }, - { - "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" - }, - { - "target": "com.amazonaws.computeoptimizer#OptInRequiredException" - }, - { - "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + "com.amazonaws.computeoptimizer#ExportableVolumeFields": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#ExportableVolumeField" + } + }, + "com.amazonaws.computeoptimizer#ExternalMetricStatus": { + "type": "structure", + "members": { + "statusCode": { + "target": "com.amazonaws.computeoptimizer#ExternalMetricStatusCode", + "traits": { + "smithy.api#documentation": "

\n The status code for Compute Optimizer's integration with an external metrics provider.\n

" + } }, - { - "target": "com.amazonaws.computeoptimizer#ThrottlingException" + "statusReason": { + "target": "com.amazonaws.computeoptimizer#ExternalMetricStatusReason", + "traits": { + "smithy.api#documentation": "

\n The reason for Compute Optimizer's integration status with your external metric provider.\n

" + } } - ], + }, "traits": { - "smithy.api#documentation": "

Returns Amazon EC2 instance recommendations.

\n

Compute Optimizer generates recommendations for Amazon Elastic Compute Cloud (Amazon EC2) instances that meet a specific set of requirements. For more\n information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.

" + "smithy.api#documentation": "

\n Describes Compute Optimizer's integration status with your chosen external metric provider. For example, Datadog.\n

" } }, - "com.amazonaws.computeoptimizer#GetEC2InstanceRecommendationsRequest": { - "type": "structure", + "com.amazonaws.computeoptimizer#ExternalMetricStatusCode": { + "type": "enum", "members": { - "instanceArns": { - "target": "com.amazonaws.computeoptimizer#InstanceArns", + "NO_EXTERNAL_METRIC_SET": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the instances for which to return\n recommendations.

" + "smithy.api#enumValue": "NO_EXTERNAL_METRIC_SET" } }, - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + "INTEGRATION_SUCCESS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The token to advance to the next page of instance recommendations.

" + "smithy.api#enumValue": "INTEGRATION_SUCCESS" } }, - "maxResults": { - "target": "com.amazonaws.computeoptimizer#MaxResults", + "DATADOG_INTEGRATION_ERROR": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The maximum number of instance recommendations to return with a single request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + "smithy.api#enumValue": "DATADOG_INTEGRATION_ERROR" } }, - "filters": { - "target": "com.amazonaws.computeoptimizer#Filters", + "DYNATRACE_INTEGRATION_ERROR": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An array of objects to specify a filter that returns a more specific list of instance\n recommendations.

" + "smithy.api#enumValue": "DYNATRACE_INTEGRATION_ERROR" } }, - "accountIds": { - "target": "com.amazonaws.computeoptimizer#AccountIds", + "NEWRELIC_INTEGRATION_ERROR": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return instance\n recommendations.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return instance recommendations.

\n

Only one account ID can be specified per request.

" + "smithy.api#enumValue": "NEWRELIC_INTEGRATION_ERROR" } }, - "recommendationPreferences": { - "target": "com.amazonaws.computeoptimizer#RecommendationPreferences", + "INSTANA_INTEGRATION_ERROR": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An object to specify the preferences for the Amazon EC2 instance\n recommendations to return in the response.

" + "smithy.api#enumValue": "INSTANA_INTEGRATION_ERROR" } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.computeoptimizer#GetEC2InstanceRecommendationsResponse": { - "type": "structure", - "members": { - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + }, + "INSUFFICIENT_DATADOG_METRICS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The token to use to advance to the next page of instance recommendations.

\n

This value is null when there are no more pages of instance recommendations to\n return.

" + "smithy.api#enumValue": "INSUFFICIENT_DATADOG_METRICS" } }, - "instanceRecommendations": { - "target": "com.amazonaws.computeoptimizer#InstanceRecommendations", + "INSUFFICIENT_DYNATRACE_METRICS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An array of objects that describe instance recommendations.

" + "smithy.api#enumValue": "INSUFFICIENT_DYNATRACE_METRICS" } }, - "errors": { - "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", + "INSUFFICIENT_NEWRELIC_METRICS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An array of objects that describe errors of the request.

\n

For example, an error is returned if you request recommendations for an instance of an\n unsupported instance family.

" + "smithy.api#enumValue": "INSUFFICIENT_NEWRELIC_METRICS" } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetrics": { - "type": "operation", - "input": { - "target": "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetricsRequest" - }, - "output": { - "target": "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetricsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.computeoptimizer#AccessDeniedException" - }, - { - "target": "com.amazonaws.computeoptimizer#InternalServerException" - }, - { - "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" - }, - { - "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" - }, - { - "target": "com.amazonaws.computeoptimizer#OptInRequiredException" - }, - { - "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" }, - { - "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.computeoptimizer#ThrottlingException" + "INSUFFICIENT_INSTANA_METRICS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INSUFFICIENT_INSTANA_METRICS" + } } - ], - "traits": { - "smithy.api#documentation": "

Returns the projected utilization metrics of Amazon EC2 instance\n recommendations.

\n \n

The Cpu and Memory metrics are the only projected\n utilization metrics returned when you run this action. Additionally, the\n Memory metric is returned only for resources that have the unified\n CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

\n
" } }, - "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetricsRequest": { + "com.amazonaws.computeoptimizer#ExternalMetricStatusReason": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#ExternalMetricsPreference": { "type": "structure", "members": { - "instanceArn": { - "target": "com.amazonaws.computeoptimizer#InstanceArn", + "source": { + "target": "com.amazonaws.computeoptimizer#ExternalMetricsSource", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the instances for which to return recommendation\n projected metrics.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Contains the source options for external metrics preferences.

" } - }, - "stat": { - "target": "com.amazonaws.computeoptimizer#MetricStatistic", + } + }, + "traits": { + "smithy.api#documentation": "

Describes the external metrics preferences for EC2 rightsizing recommendations.\n

" + } + }, + "com.amazonaws.computeoptimizer#ExternalMetricsSource": { + "type": "enum", + "members": { + "DATADOG": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The statistic of the projected metrics.

", - "smithy.api#required": {} + "smithy.api#enumValue": "Datadog" } }, - "period": { - "target": "com.amazonaws.computeoptimizer#Period", + "DYNATRACE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The granularity, in seconds, of the projected metrics data points.

", - "smithy.api#required": {} + "smithy.api#enumValue": "Dynatrace" } }, - "startTime": { - "target": "com.amazonaws.computeoptimizer#Timestamp", + "NEWRELIC": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The timestamp of the first projected metrics data point to return.

", - "smithy.api#required": {} + "smithy.api#enumValue": "NewRelic" } }, - "endTime": { - "target": "com.amazonaws.computeoptimizer#Timestamp", + "INSTANA": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The timestamp of the last projected metrics data point to return.

", - "smithy.api#required": {} + "smithy.api#enumValue": "Instana" } - }, - "recommendationPreferences": { - "target": "com.amazonaws.computeoptimizer#RecommendationPreferences", + } + } + }, + "com.amazonaws.computeoptimizer#FailureReason": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#FileFormat": { + "type": "enum", + "members": { + "CSV": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An object to specify the preferences for the Amazon EC2 recommendation\n projected metrics to return in the response.

" + "smithy.api#enumValue": "Csv" } } - }, - "traits": { - "smithy.api#input": {} } }, - "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetricsResponse": { + "com.amazonaws.computeoptimizer#Filter": { "type": "structure", "members": { - "recommendedOptionProjectedMetrics": { - "target": "com.amazonaws.computeoptimizer#RecommendedOptionProjectedMetrics", + "name": { + "target": "com.amazonaws.computeoptimizer#FilterName", "traits": { - "smithy.api#documentation": "

An array of objects that describes projected metrics.

" + "smithy.api#documentation": "

The name of the filter.

\n

Specify Finding to return recommendations with a specific finding\n classification. For example, Underprovisioned.

\n

Specify RecommendationSourceType to return recommendations of a specific\n resource type. For example, Ec2Instance.

\n

Specify FindingReasonCodes to return recommendations with a specific\n finding reason code. For example, CPUUnderprovisioned.

\n

Specify InferredWorkloadTypes to return recommendations of a specific\n inferred workload. For example, Redis.

\n

You can filter your EC2 instance recommendations by tag:key and tag-key tags.

\n

A tag:key is a key and value combination of a tag assigned to your \n recommendations. Use the tag key in the filter name and the tag value \n as the filter value. For example, to find all recommendations that have \n a tag with the key of Owner and the value of TeamA, \n specify tag:Owner for the filter name and TeamA for the filter value.

\n

A tag-key is the key of a tag assigned to your recommendations. Use \n this filter to find all of your recommendations that have a tag with a \n specific key. This doesn’t consider the tag value. For example, you can find \n your recommendations with a tag key value of Owner or without any tag \n keys assigned.

" + } + }, + "values": { + "target": "com.amazonaws.computeoptimizer#FilterValues", + "traits": { + "smithy.api#documentation": "

The value of the filter.

\n

The valid values for this parameter are as follows, depending on what you specify for\n the name parameter and the resource type that you wish to filter results\n for:

\n
    \n
  • \n

    Specify Optimized or NotOptimized if you specify the\n name parameter as Finding and you want to filter\n results for Auto Scaling groups.

    \n
  • \n
  • \n

    Specify Underprovisioned, Overprovisioned, or\n Optimized if you specify the name parameter as\n Finding and you want to filter results for EC2\n instances.

    \n
  • \n
  • \n

    Specify Ec2Instance or AutoScalingGroup if you\n specify the name parameter as\n RecommendationSourceType.

    \n
  • \n
  • \n

    Specify one of the following options if you specify the name\n parameter as FindingReasonCodes:

    \n
      \n
    • \n

      \n \n CPUOverprovisioned\n — The\n instance’s CPU configuration can be sized down while still meeting the\n performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n CPUUnderprovisioned\n —\n The instance’s CPU configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better CPU performance.

      \n
    • \n
    • \n

      \n \n MemoryOverprovisioned\n —\n The instance’s memory configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n MemoryUnderprovisioned\n —\n The instance’s memory configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better memory performance.

      \n
    • \n
    • \n

      \n \n EBSThroughputOverprovisioned\n — The\n instance’s EBS throughput configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n EBSThroughputUnderprovisioned\n — The\n instance’s EBS throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better EBS throughput performance.

      \n
    • \n
    • \n

      \n \n EBSIOPSOverprovisioned\n —\n The instance’s EBS IOPS configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n EBSIOPSUnderprovisioned\n \n — The instance’s EBS IOPS configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better EBS IOPS performance.

      \n
    • \n
    • \n

      \n \n NetworkBandwidthOverprovisioned\n — The\n instance’s network bandwidth configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n NetworkBandwidthUnderprovisioned\n — The\n instance’s network bandwidth configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better network bandwidth performance. This finding reason\n happens when the NetworkIn or NetworkOut\n performance of an instance is impacted.

      \n
    • \n
    • \n

      \n \n NetworkPPSOverprovisioned\n — The instance’s\n network PPS (packets per second) configuration can be sized down while\n still meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n NetworkPPSUnderprovisioned\n — The instance’s\n network PPS (packets per second) configuration doesn't meet the\n performance requirements of your workload and there is an alternative\n instance type that provides better network PPS performance.

      \n
    • \n
    • \n

      \n \n DiskIOPSOverprovisioned\n \n — The instance’s disk IOPS configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n DiskIOPSUnderprovisioned\n \n — The instance’s disk IOPS configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better disk IOPS performance.

      \n
    • \n
    • \n

      \n \n DiskThroughputOverprovisioned\n — The\n instance’s disk throughput configuration can be sized down while still\n meeting the performance requirements of your workload.

      \n
    • \n
    • \n

      \n \n DiskThroughputUnderprovisioned\n — The\n instance’s disk throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better disk throughput performance.

      \n
    • \n
    \n
  • \n
" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

Describes a filter that returns a more specific list of recommendations. Use this\n filter with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

\n

You can use EBSFilter with the GetEBSVolumeRecommendations action,\n LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, and JobFilter with\n the DescribeRecommendationExportJobs action.

" } }, - "com.amazonaws.computeoptimizer#GetECSServiceRecommendationProjectedMetrics": { - "type": "operation", - "input": { - "target": "com.amazonaws.computeoptimizer#GetECSServiceRecommendationProjectedMetricsRequest" - }, + "com.amazonaws.computeoptimizer#FilterName": { + "type": "enum", + "members": { + "FINDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Finding" + } + }, + "FINDING_REASON_CODES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FindingReasonCodes" + } + }, + "RECOMMENDATION_SOURCE_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RecommendationSourceType" + } + }, + "INFERRED_WORKLOAD_TYPES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InferredWorkloadTypes" + } + } + } + }, + "com.amazonaws.computeoptimizer#FilterValue": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#FilterValues": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#FilterValue" + } + }, + "com.amazonaws.computeoptimizer#Filters": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#Filter" + } + }, + "com.amazonaws.computeoptimizer#Finding": { + "type": "enum", + "members": { + "UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Underprovisioned" + } + }, + "OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Overprovisioned" + } + }, + "OPTIMIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Optimized" + } + }, + "NOT_OPTIMIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotOptimized" + } + } + } + }, + "com.amazonaws.computeoptimizer#FindingReasonCode": { + "type": "enum", + "members": { + "MEMORY_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MemoryOverprovisioned" + } + }, + "MEMORY_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MemoryUnderprovisioned" + } + } + } + }, + "com.amazonaws.computeoptimizer#FunctionArn": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#FunctionArns": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#FunctionArn" + } + }, + "com.amazonaws.computeoptimizer#FunctionVersion": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendations": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendationsRequest" + }, "output": { - "target": "com.amazonaws.computeoptimizer#GetECSServiceRecommendationProjectedMetricsResponse" + "target": "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendationsResponse" }, "errors": [ { @@ -5583,46 +5697,46 @@ } ], "traits": { - "smithy.api#documentation": "

\n Returns the projected metrics of Amazon ECS service recommendations.\n

" + "smithy.api#documentation": "

Returns Auto Scaling group recommendations.

\n

Compute Optimizer generates recommendations for Amazon EC2 Auto Scaling groups that\n meet a specific set of requirements. For more information, see the Supported\n resources and requirements in the Compute Optimizer User\n Guide.

" } }, - "com.amazonaws.computeoptimizer#GetECSServiceRecommendationProjectedMetricsRequest": { + "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendationsRequest": { "type": "structure", "members": { - "serviceArn": { - "target": "com.amazonaws.computeoptimizer#ServiceArn", + "accountIds": { + "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

\n The ARN that identifies the Amazon ECS service.\n

\n

\n The following is the format of the ARN:\n

\n

\n arn:aws:ecs:region:aws_account_id:service/cluster-name/service-name\n

", - "smithy.api#required": {} + "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return Auto Scaling group\n recommendations.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return Auto Scaling group\n recommendations.

\n

Only one account ID can be specified per request.

" } }, - "stat": { - "target": "com.amazonaws.computeoptimizer#MetricStatistic", + "autoScalingGroupArns": { + "target": "com.amazonaws.computeoptimizer#AutoScalingGroupArns", "traits": { - "smithy.api#documentation": "

\n The statistic of the projected metrics.\n

", - "smithy.api#required": {} + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Auto Scaling groups for which to return\n recommendations.

" } }, - "period": { - "target": "com.amazonaws.computeoptimizer#Period", + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n The granularity, in seconds, of the projected metrics data points.\n

", - "smithy.api#required": {} + "smithy.api#documentation": "

The token to advance to the next page of Auto Scaling group\n recommendations.

" } }, - "startTime": { - "target": "com.amazonaws.computeoptimizer#Timestamp", + "maxResults": { + "target": "com.amazonaws.computeoptimizer#MaxResults", "traits": { - "smithy.api#documentation": "

\n The timestamp of the first projected metrics data point to return.\n

", - "smithy.api#required": {} + "smithy.api#documentation": "

The maximum number of Auto Scaling group recommendations to return with a single\n request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" } }, - "endTime": { - "target": "com.amazonaws.computeoptimizer#Timestamp", + "filters": { + "target": "com.amazonaws.computeoptimizer#Filters", "traits": { - "smithy.api#documentation": "

\n The timestamp of the last projected metrics data point to return.\n

", - "smithy.api#required": {} + "smithy.api#documentation": "

An array of objects to specify a filter that returns a more specific list of Auto Scaling group recommendations.

" + } + }, + "recommendationPreferences": { + "target": "com.amazonaws.computeoptimizer#RecommendationPreferences", + "traits": { + "smithy.api#documentation": "

An object to specify the preferences for the Auto Scaling group recommendations\n to return in the response.

" } } }, @@ -5630,13 +5744,25 @@ "smithy.api#input": {} } }, - "com.amazonaws.computeoptimizer#GetECSServiceRecommendationProjectedMetricsResponse": { + "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendationsResponse": { "type": "structure", "members": { - "recommendedOptionProjectedMetrics": { - "target": "com.amazonaws.computeoptimizer#ECSServiceRecommendedOptionProjectedMetrics", + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

\n An array of objects that describes the projected metrics.\n

" + "smithy.api#documentation": "

The token to use to advance to the next page of Auto Scaling group\n recommendations.

\n

This value is null when there are no more pages of Auto Scaling group\n recommendations to return.

" + } + }, + "autoScalingGroupRecommendations": { + "target": "com.amazonaws.computeoptimizer#AutoScalingGroupRecommendations", + "traits": { + "smithy.api#documentation": "

An array of objects that describe Auto Scaling group recommendations.

" + } + }, + "errors": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", + "traits": { + "smithy.api#documentation": "

An array of objects that describe errors of the request.

\n

For example, an error is returned if you request recommendations for an unsupported\n Auto Scaling group.

" } } }, @@ -5644,13 +5770,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.computeoptimizer#GetECSServiceRecommendations": { + "com.amazonaws.computeoptimizer#GetEBSVolumeRecommendations": { "type": "operation", "input": { - "target": "com.amazonaws.computeoptimizer#GetECSServiceRecommendationsRequest" + "target": "com.amazonaws.computeoptimizer#GetEBSVolumeRecommendationsRequest" }, "output": { - "target": "com.amazonaws.computeoptimizer#GetECSServiceRecommendationsResponse" + "target": "com.amazonaws.computeoptimizer#GetEBSVolumeRecommendationsResponse" }, "errors": [ { @@ -5679,40 +5805,40 @@ } ], "traits": { - "smithy.api#documentation": "

\n Returns Amazon ECS service recommendations.\n

\n

\n Compute Optimizer generates recommendations for Amazon ECS services on \n Fargate that meet a specific set of requirements. For more\n information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.\n

" + "smithy.api#documentation": "

Returns Amazon Elastic Block Store (Amazon EBS) volume recommendations.

\n

Compute Optimizer generates recommendations for Amazon EBS volumes that\n meet a specific set of requirements. For more information, see the Supported\n resources and requirements in the Compute Optimizer User\n Guide.

" } }, - "com.amazonaws.computeoptimizer#GetECSServiceRecommendationsRequest": { + "com.amazonaws.computeoptimizer#GetEBSVolumeRecommendationsRequest": { "type": "structure", "members": { - "serviceArns": { - "target": "com.amazonaws.computeoptimizer#ServiceArns", + "volumeArns": { + "target": "com.amazonaws.computeoptimizer#VolumeArns", "traits": { - "smithy.api#documentation": "

\n The ARN that identifies the Amazon ECS service.\n

\n

\n The following is the format of the ARN:\n

\n

\n arn:aws:ecs:region:aws_account_id:service/cluster-name/service-name\n

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the volumes for which to return\n recommendations.

" } }, "nextToken": { "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

\n The token to advance to the next page of Amazon ECS service recommendations.\n

" + "smithy.api#documentation": "

The token to advance to the next page of volume recommendations.

" } }, "maxResults": { "target": "com.amazonaws.computeoptimizer#MaxResults", "traits": { - "smithy.api#documentation": "

\n The maximum number of Amazon ECS service recommendations to return with a single request.\n

\n

To retrieve the remaining results, make another request with the returned \n nextToken value.

" + "smithy.api#documentation": "

The maximum number of volume recommendations to return with a single request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" } }, "filters": { - "target": "com.amazonaws.computeoptimizer#ECSServiceRecommendationFilters", + "target": "com.amazonaws.computeoptimizer#EBSFilters", "traits": { - "smithy.api#documentation": "

\n An array of objects to specify a filter that returns a more specific list of Amazon ECS service recommendations.\n

" + "smithy.api#documentation": "

An array of objects to specify a filter that returns a more specific list of volume\n recommendations.

" } }, "accountIds": { "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

\n Return the Amazon ECS service recommendations to the specified Amazon Web Services account IDs.\n

\n

If your account is the management account or the delegated administrator \n of an organization, use this parameter to return the Amazon ECS service recommendations to specific\n member accounts.

\n

You can only specify one account ID per request.

" + "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return volume\n recommendations.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return volume recommendations.

\n

Only one account ID can be specified per request.

" } } }, @@ -5720,25 +5846,25 @@ "smithy.api#input": {} } }, - "com.amazonaws.computeoptimizer#GetECSServiceRecommendationsResponse": { + "com.amazonaws.computeoptimizer#GetEBSVolumeRecommendationsResponse": { "type": "structure", "members": { "nextToken": { "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

\n The token to advance to the next page of Amazon ECS service recommendations.\n

" + "smithy.api#documentation": "

The token to use to advance to the next page of volume recommendations.

\n

This value is null when there are no more pages of volume recommendations to\n return.

" } }, - "ecsServiceRecommendations": { - "target": "com.amazonaws.computeoptimizer#ECSServiceRecommendations", + "volumeRecommendations": { + "target": "com.amazonaws.computeoptimizer#VolumeRecommendations", "traits": { - "smithy.api#documentation": "

\n An array of objects that describe the Amazon ECS service recommendations.\n

" + "smithy.api#documentation": "

An array of objects that describe volume recommendations.

" } }, "errors": { "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", "traits": { - "smithy.api#documentation": "

\n An array of objects that describe errors of the request.\n

" + "smithy.api#documentation": "

An array of objects that describe errors of the request.

\n

For example, an error is returned if you request recommendations for an unsupported\n volume.

" } } }, @@ -5746,13 +5872,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferences": { + "com.amazonaws.computeoptimizer#GetEC2InstanceRecommendations": { "type": "operation", "input": { - "target": "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesRequest" + "target": "com.amazonaws.computeoptimizer#GetEC2InstanceRecommendationsRequest" }, "output": { - "target": "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesResponse" + "target": "com.amazonaws.computeoptimizer#GetEC2InstanceRecommendationsResponse" }, "errors": [ { @@ -5781,133 +5907,72 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the recommendation preferences that are in effect for a given resource, such\n as enhanced infrastructure metrics. Considers all applicable preferences that you might\n have set at the resource, account, and organization level.

\n

When you create a recommendation preference, you can set its status to\n Active or Inactive. Use this action to view the\n recommendation preferences that are in effect, or Active.

" - } - }, - "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesRequest": { - "type": "structure", - "members": { - "resourceArn": { - "target": "com.amazonaws.computeoptimizer#ResourceArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource for which to confirm effective\n recommendation preferences. Only EC2 instance and Auto Scaling group ARNs are\n currently supported.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} + "smithy.api#documentation": "

Returns Amazon EC2 instance recommendations.

\n

Compute Optimizer generates recommendations for Amazon Elastic Compute Cloud (Amazon EC2) instances that meet a specific set of requirements. For more\n information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.

" } }, - "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesResponse": { + "com.amazonaws.computeoptimizer#GetEC2InstanceRecommendationsRequest": { "type": "structure", "members": { - "enhancedInfrastructureMetrics": { - "target": "com.amazonaws.computeoptimizer#EnhancedInfrastructureMetrics", + "instanceArns": { + "target": "com.amazonaws.computeoptimizer#InstanceArns", "traits": { - "smithy.api#documentation": "

The status of the enhanced infrastructure metrics recommendation preference. Considers\n all applicable preferences that you might have set at the resource, account, and\n organization level.

\n

A status of Active confirms that the preference is applied in the latest\n recommendation refresh, and a status of Inactive confirms that it's not yet\n applied to recommendations.

\n

To validate whether the preference is applied to your last generated set of\n recommendations, review the effectiveRecommendationPreferences value in the\n response of the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

\n

For more information, see Enhanced\n infrastructure metrics in the Compute Optimizer User\n Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the instances for which to return\n recommendations.

" } }, - "externalMetricsPreference": { - "target": "com.amazonaws.computeoptimizer#ExternalMetricsPreference", + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

The provider of the external metrics recommendation preference. Considers all\n applicable preferences that you might have set at the account and organization\n level.

\n

If the preference is applied in the latest recommendation refresh, an object with a\n valid source value appears in the response. If the preference isn't applied\n to the recommendations already, then this object doesn't appear in the response.

\n

To validate whether the preference is applied to your last generated set of\n recommendations, review the effectiveRecommendationPreferences value in the\n response of the GetEC2InstanceRecommendations actions.

\n

For more information, see Enhanced\n infrastructure metrics in the Compute Optimizer User\n Guide.

" + "smithy.api#documentation": "

The token to advance to the next page of instance recommendations.

" } }, - "lookBackPeriod": { - "target": "com.amazonaws.computeoptimizer#LookBackPeriodPreference", + "maxResults": { + "target": "com.amazonaws.computeoptimizer#MaxResults", "traits": { - "smithy.api#documentation": "

\n The number of days the utilization metrics of the Amazon Web Services resource are analyzed. \n

\n

To validate that the preference is applied to your last generated set of recommendations, review \n the effectiveRecommendationPreferences value in the response of the \n GetAutoScalingGroupRecommendations or GetEC2InstanceRecommendations actions.

" + "smithy.api#documentation": "

The maximum number of instance recommendations to return with a single request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" } }, - "utilizationPreferences": { - "target": "com.amazonaws.computeoptimizer#UtilizationPreferences", + "filters": { + "target": "com.amazonaws.computeoptimizer#Filters", "traits": { - "smithy.api#documentation": "

\n The resource’s CPU and memory utilization preferences, such as threshold and headroom, \n that were used to generate rightsizing recommendations. It considers all applicable preferences \n that you set at the resource, account, and organization level.\n

\n

To validate that the preference is applied to your last generated set of recommendations, review the \n effectiveRecommendationPreferences value in the response of the \n GetAutoScalingGroupRecommendations or GetEC2InstanceRecommendations actions.

" + "smithy.api#documentation": "

An array of objects to specify a filter that returns a more specific list of instance\n recommendations.

" } }, - "preferredResources": { - "target": "com.amazonaws.computeoptimizer#EffectivePreferredResources", + "accountIds": { + "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

\n The resource type values that are considered as candidates when generating rightsizing recommendations. \n This object resolves any wildcard expressions and returns the effective list of candidate resource type \n values. It also considers all applicable preferences that you set at the resource, account, and \n organization level.\n

\n

To validate that the preference is applied to your last generated set of recommendations, review the \n effectiveRecommendationPreferences value in the response of the GetAutoScalingGroupRecommendations \n or GetEC2InstanceRecommendations actions.

" + "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return instance\n recommendations.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return instance recommendations.

\n

Only one account ID can be specified per request.

" } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.computeoptimizer#GetEnrollmentStatus": { - "type": "operation", - "input": { - "target": "com.amazonaws.computeoptimizer#GetEnrollmentStatusRequest" - }, - "output": { - "target": "com.amazonaws.computeoptimizer#GetEnrollmentStatusResponse" - }, - "errors": [ - { - "target": "com.amazonaws.computeoptimizer#AccessDeniedException" - }, - { - "target": "com.amazonaws.computeoptimizer#InternalServerException" }, - { - "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" - }, - { - "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" - }, - { - "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.computeoptimizer#ThrottlingException" + "recommendationPreferences": { + "target": "com.amazonaws.computeoptimizer#RecommendationPreferences", + "traits": { + "smithy.api#documentation": "

An object to specify the preferences for the Amazon EC2 instance\n recommendations to return in the response.

" + } } - ], - "traits": { - "smithy.api#documentation": "

Returns the enrollment (opt in) status of an account to the Compute Optimizer\n service.

\n

If the account is the management account of an organization, this action also confirms\n the enrollment status of member accounts of the organization. Use the GetEnrollmentStatusesForOrganization action to get detailed information\n about the enrollment status of member accounts of an organization.

" - } - }, - "com.amazonaws.computeoptimizer#GetEnrollmentStatusRequest": { - "type": "structure", - "members": {}, + }, "traits": { "smithy.api#input": {} } }, - "com.amazonaws.computeoptimizer#GetEnrollmentStatusResponse": { + "com.amazonaws.computeoptimizer#GetEC2InstanceRecommendationsResponse": { "type": "structure", "members": { - "status": { - "target": "com.amazonaws.computeoptimizer#Status", - "traits": { - "smithy.api#documentation": "

The enrollment status of the account.

" - } - }, - "statusReason": { - "target": "com.amazonaws.computeoptimizer#StatusReason", - "traits": { - "smithy.api#documentation": "

The reason for the enrollment status of the account.

\n

For example, an account might show a status of Pending because member\n accounts of an organization require more time to be enrolled in the service.

" - } - }, - "memberAccountsEnrolled": { - "target": "com.amazonaws.computeoptimizer#MemberAccountsEnrolled", + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "

Confirms the enrollment status of member accounts of the organization, if the account\n is a management account of an organization.

" + "smithy.api#documentation": "

The token to use to advance to the next page of instance recommendations.

\n

This value is null when there are no more pages of instance recommendations to\n return.

" } }, - "lastUpdatedTimestamp": { - "target": "com.amazonaws.computeoptimizer#LastUpdatedTimestamp", + "instanceRecommendations": { + "target": "com.amazonaws.computeoptimizer#InstanceRecommendations", "traits": { - "smithy.api#documentation": "

The Unix epoch timestamp, in seconds, of when the account enrollment status was last\n updated.

" + "smithy.api#documentation": "

An array of objects that describe instance recommendations.

" } }, - "numberOfMemberAccountsOptedIn": { - "target": "com.amazonaws.computeoptimizer#NumberOfMemberAccountsOptedIn", + "errors": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", "traits": { - "smithy.api#documentation": "

The count of organization member accounts that are opted in to the service, if your\n account is an organization management account.

" + "smithy.api#documentation": "

An array of objects that describe errors of the request.

\n

For example, an error is returned if you request recommendations for an instance of an\n unsupported instance family.

" } } }, @@ -5915,13 +5980,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.computeoptimizer#GetEnrollmentStatusesForOrganization": { + "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetrics": { "type": "operation", "input": { - "target": "com.amazonaws.computeoptimizer#GetEnrollmentStatusesForOrganizationRequest" + "target": "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetricsRequest" }, "output": { - "target": "com.amazonaws.computeoptimizer#GetEnrollmentStatusesForOrganizationResponse" + "target": "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetricsResponse" }, "errors": [ { @@ -5936,6 +6001,12 @@ { "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, { "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" }, @@ -5944,34 +6015,52 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the Compute Optimizer enrollment (opt-in) status of organization member\n accounts, if your account is an organization management account.

\n

To get the enrollment status of standalone accounts, use the GetEnrollmentStatus action.

", - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "items": "accountEnrollmentStatuses", - "pageSize": "maxResults" - } + "smithy.api#documentation": "

Returns the projected utilization metrics of Amazon EC2 instance\n recommendations.

\n \n

The Cpu and Memory metrics are the only projected\n utilization metrics returned when you run this action. Additionally, the\n Memory metric is returned only for resources that have the unified\n CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

\n
" } }, - "com.amazonaws.computeoptimizer#GetEnrollmentStatusesForOrganizationRequest": { + "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetricsRequest": { "type": "structure", "members": { - "filters": { - "target": "com.amazonaws.computeoptimizer#EnrollmentFilters", + "instanceArn": { + "target": "com.amazonaws.computeoptimizer#InstanceArn", "traits": { - "smithy.api#documentation": "

An array of objects to specify a filter that returns a more specific list of account\n enrollment statuses.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the instances for which to return recommendation\n projected metrics.

", + "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + "stat": { + "target": "com.amazonaws.computeoptimizer#MetricStatistic", "traits": { - "smithy.api#documentation": "

The token to advance to the next page of account enrollment statuses.

" + "smithy.api#documentation": "

The statistic of the projected metrics.

", + "smithy.api#required": {} } }, - "maxResults": { - "target": "com.amazonaws.computeoptimizer#MaxResults", + "period": { + "target": "com.amazonaws.computeoptimizer#Period", "traits": { - "smithy.api#documentation": "

The maximum number of account enrollment statuses to return with a single request. You\n can specify up to 100 statuses to return with each request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + "smithy.api#default": 0, + "smithy.api#documentation": "

The granularity, in seconds, of the projected metrics data points.

", + "smithy.api#required": {} + } + }, + "startTime": { + "target": "com.amazonaws.computeoptimizer#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of the first projected metrics data point to return.

", + "smithy.api#required": {} + } + }, + "endTime": { + "target": "com.amazonaws.computeoptimizer#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of the last projected metrics data point to return.

", + "smithy.api#required": {} + } + }, + "recommendationPreferences": { + "target": "com.amazonaws.computeoptimizer#RecommendationPreferences", + "traits": { + "smithy.api#documentation": "

An object to specify the preferences for the Amazon EC2 recommendation\n projected metrics to return in the response.

" } } }, @@ -5979,19 +6068,13 @@ "smithy.api#input": {} } }, - "com.amazonaws.computeoptimizer#GetEnrollmentStatusesForOrganizationResponse": { + "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetricsResponse": { "type": "structure", "members": { - "accountEnrollmentStatuses": { - "target": "com.amazonaws.computeoptimizer#AccountEnrollmentStatuses", - "traits": { - "smithy.api#documentation": "

An array of objects that describe the enrollment statuses of organization member\n accounts.

" - } - }, - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + "recommendedOptionProjectedMetrics": { + "target": "com.amazonaws.computeoptimizer#RecommendedOptionProjectedMetrics", "traits": { - "smithy.api#documentation": "

The token to use to advance to the next page of account enrollment statuses.

\n

This value is null when there are no more pages of account enrollment statuses to\n return.

" + "smithy.api#documentation": "

An array of objects that describes projected metrics.

" } } }, @@ -5999,13 +6082,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendations": { + "com.amazonaws.computeoptimizer#GetECSServiceRecommendationProjectedMetrics": { "type": "operation", "input": { - "target": "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendationsRequest" + "target": "com.amazonaws.computeoptimizer#GetECSServiceRecommendationProjectedMetricsRequest" }, "output": { - "target": "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendationsResponse" + "target": "com.amazonaws.computeoptimizer#GetECSServiceRecommendationProjectedMetricsResponse" }, "errors": [ { @@ -6017,15 +6100,15 @@ { "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" }, - { - "target": "com.amazonaws.computeoptimizer#LimitExceededException" - }, { "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" }, { "target": "com.amazonaws.computeoptimizer#OptInRequiredException" }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, { "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" }, @@ -6034,46 +6117,46 @@ } ], "traits": { - "smithy.api#documentation": "

Returns Lambda function recommendations.

\n

Compute Optimizer generates recommendations for functions that meet a specific set\n of requirements. For more information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.

", - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "items": "lambdaFunctionRecommendations", - "pageSize": "maxResults" - } + "smithy.api#documentation": "

\n Returns the projected metrics of Amazon ECS service recommendations.\n

" } }, - "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendationsRequest": { + "com.amazonaws.computeoptimizer#GetECSServiceRecommendationProjectedMetricsRequest": { "type": "structure", "members": { - "functionArns": { - "target": "com.amazonaws.computeoptimizer#FunctionArns", + "serviceArn": { + "target": "com.amazonaws.computeoptimizer#ServiceArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the functions for which to return\n recommendations.

\n

You can specify a qualified or unqualified ARN. If you specify an unqualified ARN\n without a function version suffix, Compute Optimizer will return recommendations for the\n latest ($LATEST) version of the function. If you specify a qualified ARN\n with a version suffix, Compute Optimizer will return recommendations for the specified\n function version. For more information about using function versions, see Using\n versions in the Lambda Developer\n Guide.

" + "smithy.api#documentation": "

\n The ARN that identifies the Amazon ECS service.\n

\n

\n The following is the format of the ARN:\n

\n

\n arn:aws:ecs:region:aws_account_id:service/cluster-name/service-name\n

", + "smithy.api#required": {} } }, - "accountIds": { - "target": "com.amazonaws.computeoptimizer#AccountIds", + "stat": { + "target": "com.amazonaws.computeoptimizer#MetricStatistic", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return function\n recommendations.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return function recommendations.

\n

Only one account ID can be specified per request.

" + "smithy.api#documentation": "

\n The statistic of the projected metrics.\n

", + "smithy.api#required": {} } }, - "filters": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilters", + "period": { + "target": "com.amazonaws.computeoptimizer#Period", "traits": { - "smithy.api#documentation": "

An array of objects to specify a filter that returns a more specific list of function\n recommendations.

" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The granularity, in seconds, of the projected metrics data points.\n

", + "smithy.api#required": {} } }, - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + "startTime": { + "target": "com.amazonaws.computeoptimizer#Timestamp", "traits": { - "smithy.api#documentation": "

The token to advance to the next page of function recommendations.

" + "smithy.api#documentation": "

\n The timestamp of the first projected metrics data point to return.\n

", + "smithy.api#required": {} } }, - "maxResults": { - "target": "com.amazonaws.computeoptimizer#MaxResults", + "endTime": { + "target": "com.amazonaws.computeoptimizer#Timestamp", "traits": { - "smithy.api#documentation": "

The maximum number of function recommendations to return with a single request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + "smithy.api#documentation": "

\n The timestamp of the last projected metrics data point to return.\n

", + "smithy.api#required": {} } } }, @@ -6081,19 +6164,13 @@ "smithy.api#input": {} } }, - "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendationsResponse": { + "com.amazonaws.computeoptimizer#GetECSServiceRecommendationProjectedMetricsResponse": { "type": "structure", "members": { - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", - "traits": { - "smithy.api#documentation": "

The token to use to advance to the next page of function recommendations.

\n

This value is null when there are no more pages of function recommendations to\n return.

" - } - }, - "lambdaFunctionRecommendations": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendations", + "recommendedOptionProjectedMetrics": { + "target": "com.amazonaws.computeoptimizer#ECSServiceRecommendedOptionProjectedMetrics", "traits": { - "smithy.api#documentation": "

An array of objects that describe function recommendations.

" + "smithy.api#documentation": "

\n An array of objects that describes the projected metrics.\n

" } } }, @@ -6101,13 +6178,13 @@ "smithy.api#output": {} } }, - "com.amazonaws.computeoptimizer#GetLicenseRecommendations": { + "com.amazonaws.computeoptimizer#GetECSServiceRecommendations": { "type": "operation", "input": { - "target": "com.amazonaws.computeoptimizer#GetLicenseRecommendationsRequest" + "target": "com.amazonaws.computeoptimizer#GetECSServiceRecommendationsRequest" }, "output": { - "target": "com.amazonaws.computeoptimizer#GetLicenseRecommendationsResponse" + "target": "com.amazonaws.computeoptimizer#GetECSServiceRecommendationsResponse" }, "errors": [ { @@ -6136,1204 +6213,2663 @@ } ], "traits": { - "smithy.api#documentation": "

Returns license recommendations for Amazon EC2 instances that run on a specific license.

\n

Compute Optimizer generates recommendations for licenses that meet a specific set of requirements. For more\n information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.

" + "smithy.api#documentation": "

\n Returns Amazon ECS service recommendations.\n

\n

\n Compute Optimizer generates recommendations for Amazon ECS services on \n Fargate that meet a specific set of requirements. For more\n information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.\n

" } }, - "com.amazonaws.computeoptimizer#GetLicenseRecommendationsRequest": { + "com.amazonaws.computeoptimizer#GetECSServiceRecommendationsRequest": { "type": "structure", "members": { - "resourceArns": { - "target": "com.amazonaws.computeoptimizer#ResourceArns", + "serviceArns": { + "target": "com.amazonaws.computeoptimizer#ServiceArns", "traits": { - "smithy.api#documentation": "

\n The ARN that identifies the Amazon EC2 instance.\n

\n

\n The following is the format of the ARN:\n

\n

\n arn:aws:ec2:region:aws_account_id:instance/instance-id\n

" + "smithy.api#documentation": "

\n The ARN that identifies the Amazon ECS service.\n

\n

\n The following is the format of the ARN:\n

\n

\n arn:aws:ecs:region:aws_account_id:service/cluster-name/service-name\n

" } }, "nextToken": { "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

\n The token to advance to the next page of license recommendations.\n

" + "smithy.api#documentation": "

\n The token to advance to the next page of Amazon ECS service recommendations.\n

" } }, "maxResults": { "target": "com.amazonaws.computeoptimizer#MaxResults", "traits": { - "smithy.api#documentation": "

\n The maximum number of license recommendations to return with a single request.\n

\n

\n To retrieve the remaining results, make another request with the returned\n nextToken value.\n

" + "smithy.api#documentation": "

\n The maximum number of Amazon ECS service recommendations to return with a single request.\n

\n

To retrieve the remaining results, make another request with the returned \n nextToken value.

" } }, "filters": { - "target": "com.amazonaws.computeoptimizer#LicenseRecommendationFilters", + "target": "com.amazonaws.computeoptimizer#ECSServiceRecommendationFilters", "traits": { - "smithy.api#documentation": "

\n An array of objects to specify a filter that returns a more specific list of license recommendations.\n

" + "smithy.api#documentation": "

\n An array of objects to specify a filter that returns a more specific list of Amazon ECS service recommendations.\n

" } }, "accountIds": { "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return license recommendations.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return license recommendations.

\n

Only one account ID can be specified per request.

" + "smithy.api#documentation": "

\n Return the Amazon ECS service recommendations to the specified Amazon Web Services account IDs.\n

\n

If your account is the management account or the delegated administrator \n of an organization, use this parameter to return the Amazon ECS service recommendations to specific\n member accounts.

\n

You can only specify one account ID per request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.computeoptimizer#GetECSServiceRecommendationsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

\n The token to advance to the next page of Amazon ECS service recommendations.\n

" + } + }, + "ecsServiceRecommendations": { + "target": "com.amazonaws.computeoptimizer#ECSServiceRecommendations", + "traits": { + "smithy.api#documentation": "

\n An array of objects that describe the Amazon ECS service recommendations.\n

" + } + }, + "errors": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", + "traits": { + "smithy.api#documentation": "

\n An array of objects that describe errors of the request.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferences": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the recommendation preferences that are in effect for a given resource, such\n as enhanced infrastructure metrics. Considers all applicable preferences that you might\n have set at the resource, account, and organization level.

\n

When you create a recommendation preference, you can set its status to\n Active or Inactive. Use this action to view the\n recommendation preferences that are in effect, or Active.

" + } + }, + "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.computeoptimizer#ResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource for which to confirm effective\n recommendation preferences. Only EC2 instance and Auto Scaling group ARNs are\n currently supported.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesResponse": { + "type": "structure", + "members": { + "enhancedInfrastructureMetrics": { + "target": "com.amazonaws.computeoptimizer#EnhancedInfrastructureMetrics", + "traits": { + "smithy.api#documentation": "

The status of the enhanced infrastructure metrics recommendation preference. Considers\n all applicable preferences that you might have set at the resource, account, and\n organization level.

\n

A status of Active confirms that the preference is applied in the latest\n recommendation refresh, and a status of Inactive confirms that it's not yet\n applied to recommendations.

\n

To validate whether the preference is applied to your last generated set of\n recommendations, review the effectiveRecommendationPreferences value in the\n response of the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

\n

For more information, see Enhanced\n infrastructure metrics in the Compute Optimizer User\n Guide.

" + } + }, + "externalMetricsPreference": { + "target": "com.amazonaws.computeoptimizer#ExternalMetricsPreference", + "traits": { + "smithy.api#documentation": "

The provider of the external metrics recommendation preference. Considers all\n applicable preferences that you might have set at the account and organization\n level.

\n

If the preference is applied in the latest recommendation refresh, an object with a\n valid source value appears in the response. If the preference isn't applied\n to the recommendations already, then this object doesn't appear in the response.

\n

To validate whether the preference is applied to your last generated set of\n recommendations, review the effectiveRecommendationPreferences value in the\n response of the GetEC2InstanceRecommendations actions.

\n

For more information, see Enhanced\n infrastructure metrics in the Compute Optimizer User\n Guide.

" + } + }, + "lookBackPeriod": { + "target": "com.amazonaws.computeoptimizer#LookBackPeriodPreference", + "traits": { + "smithy.api#documentation": "

\n The number of days the utilization metrics of the Amazon Web Services resource are analyzed. \n

\n

To validate that the preference is applied to your last generated set of recommendations, review \n the effectiveRecommendationPreferences value in the response of the \n GetAutoScalingGroupRecommendations or GetEC2InstanceRecommendations actions.

" + } + }, + "utilizationPreferences": { + "target": "com.amazonaws.computeoptimizer#UtilizationPreferences", + "traits": { + "smithy.api#documentation": "

\n The resource’s CPU and memory utilization preferences, such as threshold and headroom, \n that were used to generate rightsizing recommendations. It considers all applicable preferences \n that you set at the resource, account, and organization level.\n

\n

To validate that the preference is applied to your last generated set of recommendations, review the \n effectiveRecommendationPreferences value in the response of the \n GetAutoScalingGroupRecommendations or GetEC2InstanceRecommendations actions.

" + } + }, + "preferredResources": { + "target": "com.amazonaws.computeoptimizer#EffectivePreferredResources", + "traits": { + "smithy.api#documentation": "

\n The resource type values that are considered as candidates when generating rightsizing recommendations. \n This object resolves any wildcard expressions and returns the effective list of candidate resource type \n values. It also considers all applicable preferences that you set at the resource, account, and \n organization level.\n

\n

To validate that the preference is applied to your last generated set of recommendations, review the \n effectiveRecommendationPreferences value in the response of the GetAutoScalingGroupRecommendations \n or GetEC2InstanceRecommendations actions.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.computeoptimizer#GetEnrollmentStatus": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetEnrollmentStatusRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#GetEnrollmentStatusResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the enrollment (opt in) status of an account to the Compute Optimizer\n service.

\n

If the account is the management account of an organization, this action also confirms\n the enrollment status of member accounts of the organization. Use the GetEnrollmentStatusesForOrganization action to get detailed information\n about the enrollment status of member accounts of an organization.

" + } + }, + "com.amazonaws.computeoptimizer#GetEnrollmentStatusRequest": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.computeoptimizer#GetEnrollmentStatusResponse": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.computeoptimizer#Status", + "traits": { + "smithy.api#documentation": "

The enrollment status of the account.

" + } + }, + "statusReason": { + "target": "com.amazonaws.computeoptimizer#StatusReason", + "traits": { + "smithy.api#documentation": "

The reason for the enrollment status of the account.

\n

For example, an account might show a status of Pending because member\n accounts of an organization require more time to be enrolled in the service.

" + } + }, + "memberAccountsEnrolled": { + "target": "com.amazonaws.computeoptimizer#MemberAccountsEnrolled", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Confirms the enrollment status of member accounts of the organization, if the account\n is a management account of an organization.

" + } + }, + "lastUpdatedTimestamp": { + "target": "com.amazonaws.computeoptimizer#LastUpdatedTimestamp", + "traits": { + "smithy.api#documentation": "

The Unix epoch timestamp, in seconds, of when the account enrollment status was last\n updated.

" + } + }, + "numberOfMemberAccountsOptedIn": { + "target": "com.amazonaws.computeoptimizer#NumberOfMemberAccountsOptedIn", + "traits": { + "smithy.api#documentation": "

The count of organization member accounts that are opted in to the service, if your\n account is an organization management account.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.computeoptimizer#GetEnrollmentStatusesForOrganization": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetEnrollmentStatusesForOrganizationRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#GetEnrollmentStatusesForOrganizationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the Compute Optimizer enrollment (opt-in) status of organization member\n accounts, if your account is an organization management account.

\n

To get the enrollment status of standalone accounts, use the GetEnrollmentStatus action.

", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "accountEnrollmentStatuses", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.computeoptimizer#GetEnrollmentStatusesForOrganizationRequest": { + "type": "structure", + "members": { + "filters": { + "target": "com.amazonaws.computeoptimizer#EnrollmentFilters", + "traits": { + "smithy.api#documentation": "

An array of objects to specify a filter that returns a more specific list of account\n enrollment statuses.

" + } + }, + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

The token to advance to the next page of account enrollment statuses.

" + } + }, + "maxResults": { + "target": "com.amazonaws.computeoptimizer#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of account enrollment statuses to return with a single request. You\n can specify up to 100 statuses to return with each request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.computeoptimizer#GetEnrollmentStatusesForOrganizationResponse": { + "type": "structure", + "members": { + "accountEnrollmentStatuses": { + "target": "com.amazonaws.computeoptimizer#AccountEnrollmentStatuses", + "traits": { + "smithy.api#documentation": "

An array of objects that describe the enrollment statuses of organization member\n accounts.

" + } + }, + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

The token to use to advance to the next page of account enrollment statuses.

\n

This value is null when there are no more pages of account enrollment statuses to\n return.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendations": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendationsRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#LimitExceededException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns Lambda function recommendations.

\n

Compute Optimizer generates recommendations for functions that meet a specific set\n of requirements. For more information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.

", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "lambdaFunctionRecommendations", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendationsRequest": { + "type": "structure", + "members": { + "functionArns": { + "target": "com.amazonaws.computeoptimizer#FunctionArns", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the functions for which to return\n recommendations.

\n

You can specify a qualified or unqualified ARN. If you specify an unqualified ARN\n without a function version suffix, Compute Optimizer will return recommendations for the\n latest ($LATEST) version of the function. If you specify a qualified ARN\n with a version suffix, Compute Optimizer will return recommendations for the specified\n function version. For more information about using function versions, see Using\n versions in the Lambda Developer\n Guide.

" + } + }, + "accountIds": { + "target": "com.amazonaws.computeoptimizer#AccountIds", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return function\n recommendations.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return function recommendations.

\n

Only one account ID can be specified per request.

" + } + }, + "filters": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilters", + "traits": { + "smithy.api#documentation": "

An array of objects to specify a filter that returns a more specific list of function\n recommendations.

" + } + }, + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

The token to advance to the next page of function recommendations.

" + } + }, + "maxResults": { + "target": "com.amazonaws.computeoptimizer#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of function recommendations to return with a single request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendationsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

The token to use to advance to the next page of function recommendations.

\n

This value is null when there are no more pages of function recommendations to\n return.

" + } + }, + "lambdaFunctionRecommendations": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendations", + "traits": { + "smithy.api#documentation": "

An array of objects that describe function recommendations.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.computeoptimizer#GetLicenseRecommendations": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetLicenseRecommendationsRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#GetLicenseRecommendationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns license recommendations for Amazon EC2 instances that run on a specific license.

\n

Compute Optimizer generates recommendations for licenses that meet a specific set of requirements. For more\n information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.

" + } + }, + "com.amazonaws.computeoptimizer#GetLicenseRecommendationsRequest": { + "type": "structure", + "members": { + "resourceArns": { + "target": "com.amazonaws.computeoptimizer#ResourceArns", + "traits": { + "smithy.api#documentation": "

\n The ARN that identifies the Amazon EC2 instance.\n

\n

\n The following is the format of the ARN:\n

\n

\n arn:aws:ec2:region:aws_account_id:instance/instance-id\n

" + } + }, + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

\n The token to advance to the next page of license recommendations.\n

" + } + }, + "maxResults": { + "target": "com.amazonaws.computeoptimizer#MaxResults", + "traits": { + "smithy.api#documentation": "

\n The maximum number of license recommendations to return with a single request.\n

\n

\n To retrieve the remaining results, make another request with the returned\n nextToken value.\n

" + } + }, + "filters": { + "target": "com.amazonaws.computeoptimizer#LicenseRecommendationFilters", + "traits": { + "smithy.api#documentation": "

\n An array of objects to specify a filter that returns a more specific list of license recommendations.\n

" + } + }, + "accountIds": { + "target": "com.amazonaws.computeoptimizer#AccountIds", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return license recommendations.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return license recommendations.

\n

Only one account ID can be specified per request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.computeoptimizer#GetLicenseRecommendationsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

\n The token to use to advance to the next page of license recommendations.\n

" + } + }, + "licenseRecommendations": { + "target": "com.amazonaws.computeoptimizer#LicenseRecommendations", + "traits": { + "smithy.api#documentation": "

\n An array of objects that describe license recommendations.\n

" + } + }, + "errors": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", + "traits": { + "smithy.api#documentation": "

\n An array of objects that describe errors of the request.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendationProjectedMetrics": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendationProjectedMetricsRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendationProjectedMetricsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

\n Returns the projected metrics of Amazon RDS recommendations.\n

" + } + }, + "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendationProjectedMetricsRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.computeoptimizer#ResourceArn", + "traits": { + "smithy.api#documentation": "

\n The ARN that identifies the Amazon RDS.\n

\n

\n The following is the format of the ARN:\n

\n

\n arn:aws:rds:{region}:{accountId}:db:{resourceName}\n

", + "smithy.api#required": {} + } + }, + "stat": { + "target": "com.amazonaws.computeoptimizer#MetricStatistic", + "traits": { + "smithy.api#documentation": "

\n The statistic of the projected metrics.\n

", + "smithy.api#required": {} + } + }, + "period": { + "target": "com.amazonaws.computeoptimizer#Period", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The granularity, in seconds, of the projected metrics data points.\n

", + "smithy.api#required": {} + } + }, + "startTime": { + "target": "com.amazonaws.computeoptimizer#Timestamp", + "traits": { + "smithy.api#documentation": "

\n The timestamp of the first projected metrics data point to return.\n

", + "smithy.api#required": {} + } + }, + "endTime": { + "target": "com.amazonaws.computeoptimizer#Timestamp", + "traits": { + "smithy.api#documentation": "

\n The timestamp of the last projected metrics data point to return.\n

", + "smithy.api#required": {} + } + }, + "recommendationPreferences": { + "target": "com.amazonaws.computeoptimizer#RecommendationPreferences" + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendationProjectedMetricsResponse": { + "type": "structure", + "members": { + "recommendedOptionProjectedMetrics": { + "target": "com.amazonaws.computeoptimizer#RDSDatabaseRecommendedOptionProjectedMetrics", + "traits": { + "smithy.api#documentation": "

\n An array of objects that describes the projected metrics.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendations": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendationsRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

\n Returns Amazon RDS recommendations.\n

\n

Compute Optimizer generates recommendations for Amazon RDS that \n meet a specific set of requirements. For more\n information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.

" + } + }, + "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendationsRequest": { + "type": "structure", + "members": { + "resourceArns": { + "target": "com.amazonaws.computeoptimizer#ResourceArns", + "traits": { + "smithy.api#documentation": "

\n The ARN that identifies the Amazon RDS.\n

\n

\n The following is the format of the ARN:\n

\n

\n arn:aws:rds:{region}:{accountId}:db:{resourceName}\n

\n

The following is the format of a DB Cluster ARN:

\n

\n arn:aws:rds:{region}:{accountId}:cluster:{resourceName}\n

" + } + }, + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

\n The token to advance to the next page of Amazon RDS recommendations.\n

" + } + }, + "maxResults": { + "target": "com.amazonaws.computeoptimizer#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of Amazon RDS recommendations to return with a single\n request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + } + }, + "filters": { + "target": "com.amazonaws.computeoptimizer#RDSDBRecommendationFilters", + "traits": { + "smithy.api#documentation": "

\n An array of objects to specify a filter that returns a more specific list of Amazon RDS recommendations.\n

" + } + }, + "accountIds": { + "target": "com.amazonaws.computeoptimizer#AccountIds", + "traits": { + "smithy.api#documentation": "

\n Return the Amazon RDS recommendations to the specified Amazon Web Services account IDs.\n

\n

If your account is the management account or the delegated administrator \n of an organization, use this parameter to return the Amazon RDS recommendations to specific\n member accounts.

\n

You can only specify one account ID per request.

" + } + }, + "recommendationPreferences": { + "target": "com.amazonaws.computeoptimizer#RecommendationPreferences" + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.computeoptimizer#GetRDSDatabaseRecommendationsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

\n The token to advance to the next page of Amazon RDS recommendations.\n

" + } + }, + "rdsDBRecommendations": { + "target": "com.amazonaws.computeoptimizer#RDSDBRecommendations", + "traits": { + "smithy.api#documentation": "

\n An array of objects that describe the Amazon RDS recommendations.\n

" + } + }, + "errors": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", + "traits": { + "smithy.api#documentation": "

\n An array of objects that describe errors of the request.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.computeoptimizer#GetRecommendationError": { + "type": "structure", + "members": { + "identifier": { + "target": "com.amazonaws.computeoptimizer#Identifier", + "traits": { + "smithy.api#documentation": "

The ID of the error.

" + } + }, + "code": { + "target": "com.amazonaws.computeoptimizer#Code", + "traits": { + "smithy.api#documentation": "

The error code.

" + } + }, + "message": { + "target": "com.amazonaws.computeoptimizer#Message", + "traits": { + "smithy.api#documentation": "

The message, or reason, for the error.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes an error experienced when getting recommendations.

\n

For example, an error is returned if you request recommendations for an unsupported\n Auto Scaling group, or if you request recommendations for an instance of an\n unsupported instance family.

" + } + }, + "com.amazonaws.computeoptimizer#GetRecommendationErrors": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationError" + } + }, + "com.amazonaws.computeoptimizer#GetRecommendationPreferences": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationPreferencesRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationPreferencesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns existing recommendation preferences, such as enhanced infrastructure\n metrics.

\n

Use the scope parameter to specify which preferences to return. You can\n specify to return preferences for an organization, a specific account ID, or a specific\n EC2 instance or Auto Scaling group Amazon Resource Name (ARN).

\n

For more information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "recommendationPreferencesDetails", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.computeoptimizer#GetRecommendationPreferencesRequest": { + "type": "structure", + "members": { + "resourceType": { + "target": "com.amazonaws.computeoptimizer#ResourceType", + "traits": { + "smithy.api#documentation": "

The target resource type of the recommendation preference for which to return\n preferences.

\n

The Ec2Instance option encompasses standalone instances and instances\n that are part of Auto Scaling groups. The AutoScalingGroup option\n encompasses only instances that are part of an Auto Scaling group.

", + "smithy.api#required": {} + } + }, + "scope": { + "target": "com.amazonaws.computeoptimizer#Scope", + "traits": { + "smithy.api#documentation": "

An object that describes the scope of the recommendation preference to return.

\n

You can return recommendation preferences that are created at the organization level\n (for management accounts of an organization only), account level, and resource level.\n For more information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

" + } + }, + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

The token to advance to the next page of recommendation preferences.

" + } + }, + "maxResults": { + "target": "com.amazonaws.computeoptimizer#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of recommendation preferences to return with a single\n request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.computeoptimizer#GetRecommendationPreferencesResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

The token to use to advance to the next page of recommendation preferences.

\n

This value is null when there are no more pages of recommendation preferences to\n return.

" + } + }, + "recommendationPreferencesDetails": { + "target": "com.amazonaws.computeoptimizer#RecommendationPreferencesDetails", + "traits": { + "smithy.api#documentation": "

An array of objects that describe recommendation preferences.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.computeoptimizer#GetRecommendationSummaries": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationSummariesRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationSummariesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the optimization findings for an account.

\n

It returns the number of:

\n
    \n
  • \n

    Amazon EC2 instances in an account that are\n Underprovisioned, Overprovisioned, or\n Optimized.

    \n
  • \n
  • \n

    Auto Scaling groups in an account that are NotOptimized, or\n Optimized.

    \n
  • \n
  • \n

    Amazon EBS volumes in an account that are NotOptimized,\n or Optimized.

    \n
  • \n
  • \n

    Lambda functions in an account that are NotOptimized,\n or Optimized.

    \n
  • \n
  • \n

    Amazon ECS services in an account that are Underprovisioned, \n Overprovisioned, or Optimized.

    \n
  • \n
", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "recommendationSummaries", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.computeoptimizer#GetRecommendationSummariesRequest": { + "type": "structure", + "members": { + "accountIds": { + "target": "com.amazonaws.computeoptimizer#AccountIds", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return recommendation\n summaries.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return recommendation summaries.

\n

Only one account ID can be specified per request.

" + } + }, + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

The token to advance to the next page of recommendation summaries.

" + } + }, + "maxResults": { + "target": "com.amazonaws.computeoptimizer#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of recommendation summaries to return with a single request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.computeoptimizer#GetRecommendationSummariesResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

The token to use to advance to the next page of recommendation summaries.

\n

This value is null when there are no more pages of recommendation summaries to\n return.

" + } + }, + "recommendationSummaries": { + "target": "com.amazonaws.computeoptimizer#RecommendationSummaries", + "traits": { + "smithy.api#documentation": "

An array of objects that summarize a recommendation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.computeoptimizer#Gpu": { + "type": "structure", + "members": { + "gpuCount": { + "target": "com.amazonaws.computeoptimizer#GpuCount", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The number of GPUs for the instance type.\n

" + } + }, + "gpuMemorySizeInMiB": { + "target": "com.amazonaws.computeoptimizer#GpuMemorySizeInMiB", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The total size of the memory for the GPU accelerators for the instance type, in MiB.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the GPU accelerators for the instance type.\n

" + } + }, + "com.amazonaws.computeoptimizer#GpuCount": { + "type": "integer", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.computeoptimizer#GpuInfo": { + "type": "structure", + "members": { + "gpus": { + "target": "com.amazonaws.computeoptimizer#Gpus", + "traits": { + "smithy.api#documentation": "

\n Describes the GPU accelerators for the instance type.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the GPU accelerator settings for the instance type.\n

" + } + }, + "com.amazonaws.computeoptimizer#GpuMemorySizeInMiB": { + "type": "integer", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.computeoptimizer#Gpus": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#Gpu" + } + }, + "com.amazonaws.computeoptimizer#High": { + "type": "long", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.computeoptimizer#Identifier": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#Idle": { + "type": "enum", + "members": { + "TRUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "True" + } + }, + "FALSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "False" + } + } + } + }, + "com.amazonaws.computeoptimizer#IncludeMemberAccounts": { + "type": "boolean", + "traits": { + "smithy.api#default": false + } + }, + "com.amazonaws.computeoptimizer#InferredWorkloadSaving": { + "type": "structure", + "members": { + "inferredWorkloadTypes": { + "target": "com.amazonaws.computeoptimizer#InferredWorkloadTypes", + "traits": { + "smithy.api#documentation": "

The applications that might be running on the instance as inferred by Compute Optimizer.

\n

Compute Optimizer can infer if one of the following applications might be running on\n the instance:

\n
    \n
  • \n

    \n AmazonEmr - Infers that Amazon EMR might be running on\n the instance.

    \n
  • \n
  • \n

    \n ApacheCassandra - Infers that Apache Cassandra might be running\n on the instance.

    \n
  • \n
  • \n

    \n ApacheHadoop - Infers that Apache Hadoop might be running on the\n instance.

    \n
  • \n
  • \n

    \n Memcached - Infers that Memcached might be running on the\n instance.

    \n
  • \n
  • \n

    \n NGINX - Infers that NGINX might be running on the\n instance.

    \n
  • \n
  • \n

    \n PostgreSql - Infers that PostgreSQL might be running on the\n instance.

    \n
  • \n
  • \n

    \n Redis - Infers that Redis might be running on the\n instance.

    \n
  • \n
  • \n

    \n Kafka - Infers that Kafka might be running on the\n instance.

    \n
  • \n
  • \n

    \n SQLServer - Infers that SQLServer might be running on the\n instance.

    \n
  • \n
" + } + }, + "estimatedMonthlySavings": { + "target": "com.amazonaws.computeoptimizer#EstimatedMonthlySavings", + "traits": { + "smithy.api#documentation": "

An object that describes the estimated monthly savings amount possible by adopting Compute Optimizer recommendations for a given\n resource. This is based on the On-Demand instance pricing.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The estimated monthly savings after you adjust the configurations of your instances running on the \n inferred workload types to the recommended configurations. If the inferredWorkloadTypes\n list contains multiple entries, then the savings are the sum of the monthly savings from instances \n that run the exact combination of the inferred workload types.\n

" + } + }, + "com.amazonaws.computeoptimizer#InferredWorkloadSavings": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#InferredWorkloadSaving" + } + }, + "com.amazonaws.computeoptimizer#InferredWorkloadType": { + "type": "enum", + "members": { + "AMAZON_EMR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AmazonEmr" + } + }, + "APACHE_CASSANDRA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ApacheCassandra" + } + }, + "APACHE_HADOOP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ApacheHadoop" + } + }, + "MEMCACHED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Memcached" + } + }, + "NGINX": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Nginx" + } + }, + "POSTGRE_SQL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PostgreSql" + } + }, + "REDIS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Redis" + } + }, + "KAFKA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Kafka" + } + }, + "SQLSERVER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SQLServer" + } + } + } + }, + "com.amazonaws.computeoptimizer#InferredWorkloadTypes": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#InferredWorkloadType" + } + }, + "com.amazonaws.computeoptimizer#InferredWorkloadTypesPreference": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Active" + } + }, + "INACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Inactive" + } + } + } + }, + "com.amazonaws.computeoptimizer#InstanceArn": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#InstanceArns": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#InstanceArn" + } + }, + "com.amazonaws.computeoptimizer#InstanceEstimatedMonthlySavings": { + "type": "structure", + "members": { + "currency": { + "target": "com.amazonaws.computeoptimizer#Currency", + "traits": { + "smithy.api#documentation": "

\n The currency of the estimated monthly savings.\n

" + } + }, + "value": { + "target": "com.amazonaws.computeoptimizer#Value", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The value of the estimated monthly savings.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n An object that describes the estimated monthly savings possible by adopting Compute Optimizer’s Amazon EC2 instance \n recommendations. This is based on the Savings Plans and Reserved Instances pricing discounts.\n

" + } + }, + "com.amazonaws.computeoptimizer#InstanceIdle": { + "type": "enum", + "members": { + "TRUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "True" + } + }, + "FALSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "False" + } + } + } + }, + "com.amazonaws.computeoptimizer#InstanceName": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#InstanceRecommendation": { + "type": "structure", + "members": { + "instanceArn": { + "target": "com.amazonaws.computeoptimizer#InstanceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the current instance.

" + } + }, + "accountId": { + "target": "com.amazonaws.computeoptimizer#AccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account ID of the instance.

" + } + }, + "instanceName": { + "target": "com.amazonaws.computeoptimizer#InstanceName", + "traits": { + "smithy.api#documentation": "

The name of the current instance.

" + } + }, + "currentInstanceType": { + "target": "com.amazonaws.computeoptimizer#CurrentInstanceType", + "traits": { + "smithy.api#documentation": "

The instance type of the current instance.

" + } + }, + "finding": { + "target": "com.amazonaws.computeoptimizer#Finding", + "traits": { + "smithy.api#documentation": "

The finding classification of the instance.

\n

Findings for instances include:

\n
    \n
  • \n

    \n \n Underprovisioned\n —An instance is\n considered under-provisioned when at least one specification of your instance,\n such as CPU, memory, or network, does not meet the performance requirements of\n your workload. Under-provisioned instances may lead to poor application\n performance.

    \n
  • \n
  • \n

    \n \n Overprovisioned\n —An instance is\n considered over-provisioned when at least one specification of your instance,\n such as CPU, memory, or network, can be sized down while still meeting the\n performance requirements of your workload, and no specification is\n under-provisioned. Over-provisioned instances may lead to unnecessary\n infrastructure cost.

    \n
  • \n
  • \n

    \n \n Optimized\n —An instance is\n considered optimized when all specifications of your instance, such as CPU,\n memory, and network, meet the performance requirements of your workload and is\n not over provisioned. For optimized resources, Compute Optimizer might\n recommend a new generation instance type.

    \n
  • \n
\n \n

The valid values in your API responses appear as OVER_PROVISIONED, UNDER_PROVISIONED, or OPTIMIZED.

\n
" + } + }, + "findingReasonCodes": { + "target": "com.amazonaws.computeoptimizer#InstanceRecommendationFindingReasonCodes", + "traits": { + "smithy.api#documentation": "

The reason for the finding classification of the instance.

\n

Finding reason codes for instances include:

\n
    \n
  • \n

    \n \n CPUOverprovisioned\n — The\n instance’s CPU configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n CPUUtilization metric of the current instance during the\n look-back period.

    \n
  • \n
  • \n

    \n \n CPUUnderprovisioned\n — The\n instance’s CPU configuration doesn't meet the performance requirements of your\n workload and there is an alternative instance type that provides better CPU\n performance. This is identified by analyzing the CPUUtilization\n metric of the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n MemoryOverprovisioned\n — The\n instance’s memory configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n memory utilization metric of the current instance during the look-back\n period.

    \n
  • \n
  • \n

    \n \n MemoryUnderprovisioned\n — The\n instance’s memory configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better\n memory performance. This is identified by analyzing the memory utilization\n metric of the current instance during the look-back period.

    \n \n

    Memory utilization is analyzed only for resources that have the unified\n CloudWatch agent installed on them. For more information, see\n Enabling memory\n utilization with the Amazon CloudWatch Agent in the\n Compute Optimizer User Guide. On Linux\n instances, Compute Optimizer analyses the mem_used_percent\n metric in the CWAgent namespace, or the legacy\n MemoryUtilization metric in the System/Linux\n namespace. On Windows instances, Compute Optimizer analyses the Memory\n % Committed Bytes In Use metric in the CWAgent\n namespace.

    \n
    \n
  • \n
  • \n

    \n \n EBSThroughputOverprovisioned\n —\n The instance’s EBS throughput configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the VolumeReadBytes and VolumeWriteBytes metrics\n of EBS volumes attached to the current instance during the look-back\n period.

    \n
  • \n
  • \n

    \n \n EBSThroughputUnderprovisioned\n —\n The instance’s EBS throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better EBS throughput performance. This is identified by analyzing the\n VolumeReadBytes and VolumeWriteBytes metrics of EBS\n volumes attached to the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n EBSIOPSOverprovisioned\n — The\n instance’s EBS IOPS configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n VolumeReadOps and VolumeWriteOps metric of EBS\n volumes attached to the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n EBSIOPSUnderprovisioned\n — The\n instance’s EBS IOPS configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better EBS\n IOPS performance. This is identified by analyzing the\n VolumeReadOps and VolumeWriteOps metric of EBS\n volumes attached to the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n NetworkBandwidthOverprovisioned\n \n — The instance’s network bandwidth configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the NetworkIn and NetworkOut metrics of the\n current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n NetworkBandwidthUnderprovisioned\n \n — The instance’s network bandwidth configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better network bandwidth performance. This is identified by analyzing\n the NetworkIn and NetworkOut metrics of the current\n instance during the look-back period. This finding reason happens when the\n NetworkIn or NetworkOut performance of an instance\n is impacted.

    \n
  • \n
  • \n

    \n \n NetworkPPSOverprovisioned\n — The\n instance’s network PPS (packets per second) configuration can be sized down\n while still meeting the performance requirements of your workload. This is\n identified by analyzing the NetworkPacketsIn and\n NetworkPacketsIn metrics of the current instance during the\n look-back period.

    \n
  • \n
  • \n

    \n \n NetworkPPSUnderprovisioned\n — The\n instance’s network PPS (packets per second) configuration doesn't meet the\n performance requirements of your workload and there is an alternative instance\n type that provides better network PPS performance. This is identified by\n analyzing the NetworkPacketsIn and NetworkPacketsIn\n metrics of the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n DiskIOPSOverprovisioned\n — The\n instance’s disk IOPS configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n DiskReadOps and DiskWriteOps metrics of the\n current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n DiskIOPSUnderprovisioned\n — The\n instance’s disk IOPS configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better\n disk IOPS performance. This is identified by analyzing the\n DiskReadOps and DiskWriteOps metrics of the\n current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n DiskThroughputOverprovisioned\n —\n The instance’s disk throughput configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the DiskReadBytes and DiskWriteBytes metrics\n of the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n DiskThroughputUnderprovisioned\n —\n The instance’s disk throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better disk throughput performance. This is identified by analyzing the\n DiskReadBytes and DiskWriteBytes metrics of the\n current instance during the look-back period.

    \n
  • \n
\n \n

For more information about instance metrics, see List the\n available CloudWatch metrics for your instances in the\n Amazon Elastic Compute Cloud User Guide. For more information\n about EBS volume metrics, see Amazon CloudWatch\n metrics for Amazon EBS in the Amazon Elastic Compute Cloud\n User Guide.

\n
" + } + }, + "utilizationMetrics": { + "target": "com.amazonaws.computeoptimizer#UtilizationMetrics", + "traits": { + "smithy.api#documentation": "

An array of objects that describe the utilization metrics of the instance.

" + } + }, + "lookBackPeriodInDays": { + "target": "com.amazonaws.computeoptimizer#LookBackPeriodInDays", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The number of days for which utilization metrics were analyzed for the\n instance.

" + } + }, + "recommendationOptions": { + "target": "com.amazonaws.computeoptimizer#RecommendationOptions", + "traits": { + "smithy.api#documentation": "

An array of objects that describe the recommendation options for the instance.

" + } + }, + "recommendationSources": { + "target": "com.amazonaws.computeoptimizer#RecommendationSources", + "traits": { + "smithy.api#documentation": "

An array of objects that describe the source resource of the recommendation.

" + } + }, + "lastRefreshTimestamp": { + "target": "com.amazonaws.computeoptimizer#LastRefreshTimestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the instance recommendation was last generated.

" + } + }, + "currentPerformanceRisk": { + "target": "com.amazonaws.computeoptimizer#CurrentPerformanceRisk", + "traits": { + "smithy.api#documentation": "

The risk of the current instance not meeting the performance needs of its workloads.\n The higher the risk, the more likely the current instance cannot meet the performance\n requirements of its workload.

" + } + }, + "effectiveRecommendationPreferences": { + "target": "com.amazonaws.computeoptimizer#EffectiveRecommendationPreferences", + "traits": { + "smithy.api#documentation": "

An object that describes the effective recommendation preferences for the\n instance.

" + } + }, + "inferredWorkloadTypes": { + "target": "com.amazonaws.computeoptimizer#InferredWorkloadTypes", + "traits": { + "smithy.api#documentation": "

The applications that might be running on the instance as inferred by Compute Optimizer.

\n

Compute Optimizer can infer if one of the following applications might be running on\n the instance:

\n
    \n
  • \n

    \n AmazonEmr - Infers that Amazon EMR might be running on\n the instance.

    \n
  • \n
  • \n

    \n ApacheCassandra - Infers that Apache Cassandra might be running\n on the instance.

    \n
  • \n
  • \n

    \n ApacheHadoop - Infers that Apache Hadoop might be running on the\n instance.

    \n
  • \n
  • \n

    \n Memcached - Infers that Memcached might be running on the\n instance.

    \n
  • \n
  • \n

    \n NGINX - Infers that NGINX might be running on the\n instance.

    \n
  • \n
  • \n

    \n PostgreSql - Infers that PostgreSQL might be running on the\n instance.

    \n
  • \n
  • \n

    \n Redis - Infers that Redis might be running on the\n instance.

    \n
  • \n
  • \n

    \n Kafka - Infers that Kafka might be running on the\n instance.

    \n
  • \n
  • \n

    \n SQLServer - Infers that SQLServer might be running on the\n instance.

    \n
  • \n
" + } + }, + "instanceState": { + "target": "com.amazonaws.computeoptimizer#InstanceState", + "traits": { + "smithy.api#documentation": "

\n The state of the instance when the recommendation was generated.\n

" + } + }, + "tags": { + "target": "com.amazonaws.computeoptimizer#Tags", + "traits": { + "smithy.api#documentation": "

\n A list of tags assigned to your Amazon EC2 instance recommendations.\n

" + } + }, + "externalMetricStatus": { + "target": "com.amazonaws.computeoptimizer#ExternalMetricStatus", + "traits": { + "smithy.api#documentation": "

\n An object that describes Compute Optimizer's integration status with your external metrics provider.\n

" + } + }, + "currentInstanceGpuInfo": { + "target": "com.amazonaws.computeoptimizer#GpuInfo", + "traits": { + "smithy.api#documentation": "

\n Describes the GPU accelerator settings for the current instance type.\n

" + } + }, + "idle": { + "target": "com.amazonaws.computeoptimizer#InstanceIdle", + "traits": { + "smithy.api#documentation": "

\n Describes if an Amazon EC2 instance is idle.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes an Amazon EC2 instance recommendation.

" + } + }, + "com.amazonaws.computeoptimizer#InstanceRecommendationFindingReasonCode": { + "type": "enum", + "members": { + "CPU_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CPUOverprovisioned" + } + }, + "CPU_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CPUUnderprovisioned" + } + }, + "MEMORY_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MemoryOverprovisioned" + } + }, + "MEMORY_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MemoryUnderprovisioned" + } + }, + "EBS_THROUGHPUT_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EBSThroughputOverprovisioned" + } + }, + "EBS_THROUGHPUT_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EBSThroughputUnderprovisioned" + } + }, + "EBS_IOPS_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EBSIOPSOverprovisioned" + } + }, + "EBS_IOPS_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EBSIOPSUnderprovisioned" + } + }, + "NETWORK_BANDWIDTH_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NetworkBandwidthOverprovisioned" + } + }, + "NETWORK_BANDWIDTH_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NetworkBandwidthUnderprovisioned" + } + }, + "NETWORK_PPS_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NetworkPPSOverprovisioned" + } + }, + "NETWORK_PPS_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NetworkPPSUnderprovisioned" + } + }, + "DISK_IOPS_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DiskIOPSOverprovisioned" + } + }, + "DISK_IOPS_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DiskIOPSUnderprovisioned" + } + }, + "DISK_THROUGHPUT_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DiskThroughputOverprovisioned" + } + }, + "DISK_THROUGHPUT_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DiskThroughputUnderprovisioned" + } + }, + "GPU_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GPUUnderprovisioned" + } + }, + "GPU_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GPUOverprovisioned" + } + }, + "GPU_MEMORY_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GPUMemoryUnderprovisioned" + } + }, + "GPU_MEMORY_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GPUMemoryOverprovisioned" + } + } + } + }, + "com.amazonaws.computeoptimizer#InstanceRecommendationFindingReasonCodes": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#InstanceRecommendationFindingReasonCode" + } + }, + "com.amazonaws.computeoptimizer#InstanceRecommendationOption": { + "type": "structure", + "members": { + "instanceType": { + "target": "com.amazonaws.computeoptimizer#InstanceType", + "traits": { + "smithy.api#documentation": "

The instance type of the instance recommendation.

" + } + }, + "instanceGpuInfo": { + "target": "com.amazonaws.computeoptimizer#GpuInfo", + "traits": { + "smithy.api#documentation": "

\n Describes the GPU accelerator settings for the recommended instance type.\n

" + } + }, + "projectedUtilizationMetrics": { + "target": "com.amazonaws.computeoptimizer#ProjectedUtilizationMetrics", + "traits": { + "smithy.api#documentation": "

An array of objects that describe the projected utilization metrics of the instance\n recommendation option.

\n \n

The Cpu and Memory metrics are the only projected\n utilization metrics returned. Additionally, the Memory metric is\n returned only for resources that have the unified CloudWatch agent installed\n on them. For more information, see Enabling Memory\n Utilization with the CloudWatch Agent.

\n
" } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.computeoptimizer#GetLicenseRecommendationsResponse": { - "type": "structure", - "members": { - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + }, + "platformDifferences": { + "target": "com.amazonaws.computeoptimizer#PlatformDifferences", "traits": { - "smithy.api#documentation": "

\n The token to use to advance to the next page of license recommendations.\n

" + "smithy.api#documentation": "

Describes the configuration differences between the current instance and the\n recommended instance type. You should consider the configuration differences before\n migrating your workloads from the current instance to the recommended instance type. The\n Change the instance type guide for Linux and Change the instance type\n guide for Windows provide general guidance for getting started with an\n instance migration.

\n

Platform differences include:

\n
    \n
  • \n

    \n \n Hypervisor\n — The hypervisor of\n the recommended instance type is different than that of the current instance.\n For example, the recommended instance type uses a Nitro hypervisor and the\n current instance uses a Xen hypervisor. The differences that you should consider\n between these hypervisors are covered in the Nitro Hypervisor section of the\n Amazon EC2 frequently asked questions. For more information, see\n Instances built on the Nitro System in the Amazon EC2\n User Guide for Linux, or Instances built on the Nitro System in the Amazon EC2\n User Guide for Windows.

    \n
  • \n
  • \n

    \n \n NetworkInterface\n — The network\n interface of the recommended instance type is different than that of the current\n instance. For example, the recommended instance type supports enhanced\n networking and the current instance might not. To enable enhanced networking for\n the recommended instance type, you must install the Elastic Network Adapter\n (ENA) driver or the Intel 82599 Virtual Function driver. For more information,\n see Networking and storage features and Enhanced networking\n on Linux in the Amazon EC2 User Guide for\n Linux, or Networking and storage features and Enhanced\n networking on Windows in the Amazon EC2 User Guide for\n Windows.

    \n
  • \n
  • \n

    \n \n StorageInterface\n — The storage\n interface of the recommended instance type is different than that of the current\n instance. For example, the recommended instance type uses an NVMe storage\n interface and the current instance does not. To access NVMe volumes for the\n recommended instance type, you will need to install or upgrade the NVMe driver.\n For more information, see Networking and storage features and Amazon EBS and NVMe on\n Linux instances in the Amazon EC2 User Guide for\n Linux, or Networking and storage features and Amazon EBS and NVMe\n on Windows instances in the Amazon EC2 User Guide for\n Windows.

    \n
  • \n
  • \n

    \n \n InstanceStoreAvailability\n — The\n recommended instance type does not support instance store volumes and the\n current instance does. Before migrating, you might need to back up the data on\n your instance store volumes if you want to preserve them. For more information,\n see How do I back up an instance store volume on my Amazon EC2 instance\n to Amazon EBS? in the Amazon Web Services Premium\n Support Knowledge Base. For more information, see Networking and storage features and Amazon EC2\n instance store in the Amazon EC2 User Guide for\n Linux, or see Networking and storage features and Amazon EC2\n instance store in the Amazon EC2 User Guide for\n Windows.

    \n
  • \n
  • \n

    \n \n VirtualizationType\n — The\n recommended instance type uses the hardware virtual machine (HVM) virtualization\n type and the current instance uses the paravirtual (PV) virtualization type. For\n more information about the differences between these virtualization types, see\n Linux AMI\n virtualization types in the Amazon EC2 User Guide for\n Linux, or Windows AMI virtualization types in the Amazon EC2 User\n Guide for Windows.

    \n
  • \n
  • \n

    \n \n Architecture\n — The CPU\n architecture between the recommended instance type and the current instance is\n different. For example, the recommended instance type might use an Arm CPU\n architecture and the current instance type might use a different one, such as\n x86. Before migrating, you should consider recompiling the software on your\n instance for the new architecture. Alternatively, you might switch to an Amazon\n Machine Image (AMI) that supports the new architecture. For more information\n about the CPU architecture for each instance type, see Amazon EC2 Instance Types.

    \n
  • \n
" } }, - "licenseRecommendations": { - "target": "com.amazonaws.computeoptimizer#LicenseRecommendations", + "performanceRisk": { + "target": "com.amazonaws.computeoptimizer#PerformanceRisk", "traits": { - "smithy.api#documentation": "

\n An array of objects that describe license recommendations.\n

" + "smithy.api#default": 0, + "smithy.api#documentation": "

The performance risk of the instance recommendation option.

\n

Performance risk indicates the likelihood of the recommended instance type not meeting\n the resource needs of your workload. Compute Optimizer calculates an individual\n performance risk score for each specification of the recommended instance, including\n CPU, memory, EBS throughput, EBS IOPS, disk throughput, disk IOPS, network throughput,\n and network PPS.\n The performance\n risk of the recommended instance is calculated as the maximum performance risk score\n across the analyzed resource specifications.

\n

The value ranges from 0 - 4, with 0 meaning\n that the recommended resource is predicted to always provide enough hardware capability.\n The higher the performance risk is, the more likely you should validate whether the\n recommendation will meet the performance requirements of your workload before migrating\n your resource.

" } }, - "errors": { - "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", + "rank": { + "target": "com.amazonaws.computeoptimizer#Rank", "traits": { - "smithy.api#documentation": "

\n An array of objects that describe errors of the request.\n

" + "smithy.api#default": 0, + "smithy.api#documentation": "

The rank of the instance recommendation option.

\n

The top recommendation option is ranked as 1.

" } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.computeoptimizer#GetRecommendationError": { - "type": "structure", - "members": { - "identifier": { - "target": "com.amazonaws.computeoptimizer#Identifier", + }, + "savingsOpportunity": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunity", "traits": { - "smithy.api#documentation": "

The ID of the error.

" + "smithy.api#documentation": "

An object that describes the savings opportunity for the instance recommendation\n option. Savings opportunity includes the estimated monthly savings amount and\n percentage.

" } }, - "code": { - "target": "com.amazonaws.computeoptimizer#Code", + "savingsOpportunityAfterDiscounts": { + "target": "com.amazonaws.computeoptimizer#InstanceSavingsOpportunityAfterDiscounts", "traits": { - "smithy.api#documentation": "

The error code.

" + "smithy.api#documentation": "

\n An object that describes the savings opportunity for the instance recommendation option that includes Savings Plans and Reserved Instances \n discounts. Savings opportunity includes the estimated monthly savings and percentage.\n

" } }, - "message": { - "target": "com.amazonaws.computeoptimizer#Message", + "migrationEffort": { + "target": "com.amazonaws.computeoptimizer#MigrationEffort", "traits": { - "smithy.api#documentation": "

The message, or reason, for the error.

" + "smithy.api#documentation": "

The level of effort required to migrate from the current instance type to the\n recommended instance type.

\n

For example, the migration effort is Low if Amazon EMR is the\n inferred workload type and an Amazon Web Services Graviton instance type is recommended.\n The migration effort is Medium if a workload type couldn't be inferred but\n an Amazon Web Services Graviton instance type is recommended. The migration effort is\n VeryLow if both the current and recommended instance types are of the\n same CPU architecture.

" } } }, "traits": { - "smithy.api#documentation": "

Describes an error experienced when getting recommendations.

\n

For example, an error is returned if you request recommendations for an unsupported\n Auto Scaling group, or if you request recommendations for an instance of an\n unsupported instance family.

" + "smithy.api#documentation": "

Describes a recommendation option for an Amazon EC2 instance.

" } }, - "com.amazonaws.computeoptimizer#GetRecommendationErrors": { + "com.amazonaws.computeoptimizer#InstanceRecommendations": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#GetRecommendationError" + "target": "com.amazonaws.computeoptimizer#InstanceRecommendation" } }, - "com.amazonaws.computeoptimizer#GetRecommendationPreferences": { - "type": "operation", - "input": { - "target": "com.amazonaws.computeoptimizer#GetRecommendationPreferencesRequest" - }, - "output": { - "target": "com.amazonaws.computeoptimizer#GetRecommendationPreferencesResponse" - }, - "errors": [ - { - "target": "com.amazonaws.computeoptimizer#AccessDeniedException" - }, - { - "target": "com.amazonaws.computeoptimizer#InternalServerException" - }, - { - "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" - }, - { - "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" - }, - { - "target": "com.amazonaws.computeoptimizer#OptInRequiredException" - }, - { - "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.computeoptimizer#ThrottlingException" + "com.amazonaws.computeoptimizer#InstanceSavingsEstimationMode": { + "type": "structure", + "members": { + "source": { + "target": "com.amazonaws.computeoptimizer#InstanceSavingsEstimationModeSource", + "traits": { + "smithy.api#documentation": "

\n Describes the source for calculating the savings opportunity for Amazon EC2 instances.\n

" + } } - ], + }, "traits": { - "smithy.api#documentation": "

Returns existing recommendation preferences, such as enhanced infrastructure\n metrics.

\n

Use the scope parameter to specify which preferences to return. You can\n specify to return preferences for an organization, a specific account ID, or a specific\n EC2 instance or Auto Scaling group Amazon Resource Name (ARN).

\n

For more information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

", - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "items": "recommendationPreferencesDetails", - "pageSize": "maxResults" - } + "smithy.api#documentation": "

\n Describes the savings estimation mode used for calculating savings opportunity for Amazon EC2 instances.\n

" } }, - "com.amazonaws.computeoptimizer#GetRecommendationPreferencesRequest": { - "type": "structure", + "com.amazonaws.computeoptimizer#InstanceSavingsEstimationModeSource": { + "type": "enum", "members": { - "resourceType": { - "target": "com.amazonaws.computeoptimizer#ResourceType", - "traits": { - "smithy.api#documentation": "

The target resource type of the recommendation preference for which to return\n preferences.

\n

The Ec2Instance option encompasses standalone instances and instances\n that are part of Auto Scaling groups. The AutoScalingGroup option\n encompasses only instances that are part of an Auto Scaling group.

\n \n

The valid values for this parameter are Ec2Instance and\n AutoScalingGroup.

\n
", - "smithy.api#required": {} - } - }, - "scope": { - "target": "com.amazonaws.computeoptimizer#Scope", + "PUBLIC_PRICING": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An object that describes the scope of the recommendation preference to return.

\n

You can return recommendation preferences that are created at the organization level\n (for management accounts of an organization only), account level, and resource level.\n For more information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

" + "smithy.api#enumValue": "PublicPricing" } }, - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + "COST_EXPLORER_RIGHTSIZING": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The token to advance to the next page of recommendation preferences.

" + "smithy.api#enumValue": "CostExplorerRightsizing" } }, - "maxResults": { - "target": "com.amazonaws.computeoptimizer#MaxResults", + "COST_OPTIMIZATION_HUB": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The maximum number of recommendation preferences to return with a single\n request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + "smithy.api#enumValue": "CostOptimizationHub" } } - }, - "traits": { - "smithy.api#input": {} } }, - "com.amazonaws.computeoptimizer#GetRecommendationPreferencesResponse": { + "com.amazonaws.computeoptimizer#InstanceSavingsOpportunityAfterDiscounts": { "type": "structure", "members": { - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + "savingsOpportunityPercentage": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunityPercentage", "traits": { - "smithy.api#documentation": "

The token to use to advance to the next page of recommendation preferences.

\n

This value is null when there are no more pages of recommendation preferences to\n return.

" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The estimated monthly savings possible as a percentage of monthly cost after applying the Savings Plans and Reserved Instances discounts. \n This saving can be achieved by adopting Compute Optimizer’s EC2 instance recommendations.\n

" } }, - "recommendationPreferencesDetails": { - "target": "com.amazonaws.computeoptimizer#RecommendationPreferencesDetails", + "estimatedMonthlySavings": { + "target": "com.amazonaws.computeoptimizer#InstanceEstimatedMonthlySavings", "traits": { - "smithy.api#documentation": "

An array of objects that describe recommendation preferences.

" + "smithy.api#documentation": "

\n An object that describes the estimated monthly savings possible by adopting Compute Optimizer’s Amazon EC2 instance recommendations. This is \n based on pricing after applying the Savings Plans and Reserved Instances discounts.\n

" } } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

\n Describes the savings opportunity for instance recommendations after applying the Savings Plans and Reserved Instances discounts.\n

\n

Savings opportunity after discounts represents the estimated monthly savings you can achieve by \n implementing Compute Optimizer recommendations.

" } }, - "com.amazonaws.computeoptimizer#GetRecommendationSummaries": { - "type": "operation", - "input": { - "target": "com.amazonaws.computeoptimizer#GetRecommendationSummariesRequest" - }, - "output": { - "target": "com.amazonaws.computeoptimizer#GetRecommendationSummariesResponse" - }, - "errors": [ - { - "target": "com.amazonaws.computeoptimizer#AccessDeniedException" - }, - { - "target": "com.amazonaws.computeoptimizer#InternalServerException" - }, - { - "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" - }, - { - "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + "com.amazonaws.computeoptimizer#InstanceState": { + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "pending" + } }, - { - "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "running" + } }, - { - "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + "SHUTTING_DOWN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "shutting-down" + } }, - { - "target": "com.amazonaws.computeoptimizer#ThrottlingException" - } - ], - "traits": { - "smithy.api#documentation": "

Returns the optimization findings for an account.

\n

It returns the number of:

\n
    \n
  • \n

    Amazon EC2 instances in an account that are\n Underprovisioned, Overprovisioned, or\n Optimized.

    \n
  • \n
  • \n

    Auto Scaling groups in an account that are NotOptimized, or\n Optimized.

    \n
  • \n
  • \n

    Amazon EBS volumes in an account that are NotOptimized,\n or Optimized.

    \n
  • \n
  • \n

    Lambda functions in an account that are NotOptimized,\n or Optimized.

    \n
  • \n
  • \n

    Amazon ECS services in an account that are Underprovisioned, \n Overprovisioned, or Optimized.

    \n
  • \n
", - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "items": "recommendationSummaries", - "pageSize": "maxResults" - } - } - }, - "com.amazonaws.computeoptimizer#GetRecommendationSummariesRequest": { - "type": "structure", - "members": { - "accountIds": { - "target": "com.amazonaws.computeoptimizer#AccountIds", + "TERMINATED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Web Services account for which to return recommendation\n summaries.

\n

If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return recommendation summaries.

\n

Only one account ID can be specified per request.

" + "smithy.api#enumValue": "terminated" } }, - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", + "STOPPING": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The token to advance to the next page of recommendation summaries.

" + "smithy.api#enumValue": "stopping" } }, - "maxResults": { - "target": "com.amazonaws.computeoptimizer#MaxResults", + "STOPPED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The maximum number of recommendation summaries to return with a single request.

\n

To retrieve the remaining results, make another request with the returned\n nextToken value.

" + "smithy.api#enumValue": "stopped" } } + } + }, + "com.amazonaws.computeoptimizer#InstanceType": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.computeoptimizer#ErrorMessage" + } }, "traits": { - "smithy.api#input": {} + "smithy.api#documentation": "

An internal error has occurred. Try your call again.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500 } }, - "com.amazonaws.computeoptimizer#GetRecommendationSummariesResponse": { + "com.amazonaws.computeoptimizer#InvalidParameterValueException": { "type": "structure", "members": { - "nextToken": { - "target": "com.amazonaws.computeoptimizer#NextToken", - "traits": { - "smithy.api#documentation": "

The token to use to advance to the next page of recommendation summaries.

\n

This value is null when there are no more pages of recommendation summaries to\n return.

" - } - }, - "recommendationSummaries": { - "target": "com.amazonaws.computeoptimizer#RecommendationSummaries", - "traits": { - "smithy.api#documentation": "

An array of objects that summarize a recommendation.

" - } + "message": { + "target": "com.amazonaws.computeoptimizer#ErrorMessage" } }, "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

The value supplied for the input parameter is out of range or not valid.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 } }, - "com.amazonaws.computeoptimizer#Gpu": { + "com.amazonaws.computeoptimizer#JobFilter": { "type": "structure", "members": { - "gpuCount": { - "target": "com.amazonaws.computeoptimizer#GpuCount", + "name": { + "target": "com.amazonaws.computeoptimizer#JobFilterName", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n The number of GPUs for the instance type.\n

" + "smithy.api#documentation": "

The name of the filter.

\n

Specify ResourceType to return export jobs of a specific resource type\n (for example, Ec2Instance).

\n

Specify JobStatus to return export jobs with a specific status (e.g,\n Complete).

" } }, - "gpuMemorySizeInMiB": { - "target": "com.amazonaws.computeoptimizer#GpuMemorySizeInMiB", + "values": { + "target": "com.amazonaws.computeoptimizer#FilterValues", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n The total size of the memory for the GPU accelerators for the instance type, in MiB.\n

" + "smithy.api#documentation": "

The value of the filter.

\n

The valid values for this parameter are as follows, depending on what you specify for\n the name parameter:

\n
    \n
  • \n

    Specify Ec2Instance or AutoScalingGroup if you\n specify the name parameter as ResourceType. There is\n no filter for EBS volumes because volume recommendations cannot be exported at\n this time.

    \n
  • \n
  • \n

    Specify Queued, InProgress, Complete,\n or Failed if you specify the name parameter as\n JobStatus.

    \n
  • \n
" } } }, "traits": { - "smithy.api#documentation": "

\n Describes the GPU accelerators for the instance type.\n

" - } - }, - "com.amazonaws.computeoptimizer#GpuCount": { - "type": "integer", - "traits": { - "smithy.api#default": 0 + "smithy.api#documentation": "

Describes a filter that returns a more specific list of recommendation export jobs.\n Use this filter with the DescribeRecommendationExportJobs\n action.

\n

You can use EBSFilter with the GetEBSVolumeRecommendations action,\n LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, and Filter with\n the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

" } }, - "com.amazonaws.computeoptimizer#GpuInfo": { - "type": "structure", + "com.amazonaws.computeoptimizer#JobFilterName": { + "type": "enum", "members": { - "gpus": { - "target": "com.amazonaws.computeoptimizer#Gpus", + "RESOURCE_TYPE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n Describes the GPU accelerators for the instance type.\n

" + "smithy.api#enumValue": "ResourceType" + } + }, + "JOB_STATUS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "JobStatus" } } - }, - "traits": { - "smithy.api#documentation": "

\n Describes the GPU accelerator settings for the instance type.\n

" - } - }, - "com.amazonaws.computeoptimizer#GpuMemorySizeInMiB": { - "type": "integer", - "traits": { - "smithy.api#default": 0 } }, - "com.amazonaws.computeoptimizer#Gpus": { + "com.amazonaws.computeoptimizer#JobFilters": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#Gpu" - } - }, - "com.amazonaws.computeoptimizer#High": { - "type": "long", - "traits": { - "smithy.api#default": 0 + "target": "com.amazonaws.computeoptimizer#JobFilter" } }, - "com.amazonaws.computeoptimizer#Identifier": { + "com.amazonaws.computeoptimizer#JobId": { "type": "string" }, - "com.amazonaws.computeoptimizer#IncludeMemberAccounts": { - "type": "boolean", - "traits": { - "smithy.api#default": false + "com.amazonaws.computeoptimizer#JobIds": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#JobId" } }, - "com.amazonaws.computeoptimizer#InferredWorkloadSaving": { - "type": "structure", + "com.amazonaws.computeoptimizer#JobStatus": { + "type": "enum", "members": { - "inferredWorkloadTypes": { - "target": "com.amazonaws.computeoptimizer#InferredWorkloadTypes", + "QUEUED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The applications that might be running on the instance as inferred by Compute Optimizer.

\n

Compute Optimizer can infer if one of the following applications might be running on\n the instance:

\n
    \n
  • \n

    \n AmazonEmr - Infers that Amazon EMR might be running on\n the instance.

    \n
  • \n
  • \n

    \n ApacheCassandra - Infers that Apache Cassandra might be running\n on the instance.

    \n
  • \n
  • \n

    \n ApacheHadoop - Infers that Apache Hadoop might be running on the\n instance.

    \n
  • \n
  • \n

    \n Memcached - Infers that Memcached might be running on the\n instance.

    \n
  • \n
  • \n

    \n NGINX - Infers that NGINX might be running on the\n instance.

    \n
  • \n
  • \n

    \n PostgreSql - Infers that PostgreSQL might be running on the\n instance.

    \n
  • \n
  • \n

    \n Redis - Infers that Redis might be running on the\n instance.

    \n
  • \n
  • \n

    \n Kafka - Infers that Kafka might be running on the\n instance.

    \n
  • \n
  • \n

    \n SQLServer - Infers that SQLServer might be running on the\n instance.

    \n
  • \n
" + "smithy.api#enumValue": "Queued" } }, - "estimatedMonthlySavings": { - "target": "com.amazonaws.computeoptimizer#EstimatedMonthlySavings", + "IN_PROGRESS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An object that describes the estimated monthly savings amount possible by adopting Compute Optimizer recommendations for a given\n resource. This is based on the On-Demand instance pricing.

" + "smithy.api#enumValue": "InProgress" + } + }, + "COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Complete" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" } } - }, - "traits": { - "smithy.api#documentation": "

\n The estimated monthly savings after you adjust the configurations of your instances running on the \n inferred workload types to the recommended configurations. If the inferredWorkloadTypes\n list contains multiple entries, then the savings are the sum of the monthly savings from instances \n that run the exact combination of the inferred workload types.\n

" } }, - "com.amazonaws.computeoptimizer#InferredWorkloadSavings": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#InferredWorkloadSaving" + "com.amazonaws.computeoptimizer#LambdaEffectiveRecommendationPreferences": { + "type": "structure", + "members": { + "savingsEstimationMode": { + "target": "com.amazonaws.computeoptimizer#LambdaSavingsEstimationMode", + "traits": { + "smithy.api#documentation": "

\n Describes the savings estimation mode applied for calculating savings opportunity for Lambda functions.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the effective recommendation preferences for Lambda functions.\n

" } }, - "com.amazonaws.computeoptimizer#InferredWorkloadType": { - "type": "enum", + "com.amazonaws.computeoptimizer#LambdaEstimatedMonthlySavings": { + "type": "structure", "members": { - "AMAZON_EMR": { - "target": "smithy.api#Unit", + "currency": { + "target": "com.amazonaws.computeoptimizer#Currency", "traits": { - "smithy.api#enumValue": "AmazonEmr" + "smithy.api#documentation": "

\n The currency of the estimated monthly savings.\n

" } }, - "APACHE_CASSANDRA": { - "target": "smithy.api#Unit", + "value": { + "target": "com.amazonaws.computeoptimizer#Value", "traits": { - "smithy.api#enumValue": "ApacheCassandra" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The value of the estimated monthly savings.\n

" } - }, - "APACHE_HADOOP": { + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the estimated monthly savings possible for Lambda functions by adopting Compute Optimizer recommendations. This is based \n on Lambda functions pricing after applying Savings Plans discounts.\n

" + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionMemoryMetricName": { + "type": "enum", + "members": { + "DURATION": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "ApacheHadoop" + "smithy.api#enumValue": "Duration" } - }, - "MEMCACHED": { + } + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionMemoryMetricStatistic": { + "type": "enum", + "members": { + "LOWER_BOUND": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Memcached" + "smithy.api#enumValue": "LowerBound" } }, - "NGINX": { + "UPPER_BOUND": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Nginx" + "smithy.api#enumValue": "UpperBound" } }, - "POSTGRE_SQL": { + "EXPECTED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "PostgreSql" + "smithy.api#enumValue": "Expected" } - }, - "REDIS": { - "target": "smithy.api#Unit", + } + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionMemoryProjectedMetric": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryMetricName", "traits": { - "smithy.api#enumValue": "Redis" + "smithy.api#documentation": "

The name of the projected utilization metric.

" } }, - "KAFKA": { - "target": "smithy.api#Unit", + "statistic": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryMetricStatistic", "traits": { - "smithy.api#enumValue": "Kafka" + "smithy.api#documentation": "

The statistic of the projected utilization metric.

" } }, - "SQLSERVER": { - "target": "smithy.api#Unit", + "value": { + "target": "com.amazonaws.computeoptimizer#MetricValue", "traits": { - "smithy.api#enumValue": "SQLServer" + "smithy.api#default": 0, + "smithy.api#documentation": "

The values of the projected utilization metrics.

" } } + }, + "traits": { + "smithy.api#documentation": "

Describes a projected utilization metric of an Lambda function\n recommendation option.

" } }, - "com.amazonaws.computeoptimizer#InferredWorkloadTypes": { + "com.amazonaws.computeoptimizer#LambdaFunctionMemoryProjectedMetrics": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#InferredWorkloadType" + "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryProjectedMetric" } }, - "com.amazonaws.computeoptimizer#InferredWorkloadTypesPreference": { - "type": "enum", + "com.amazonaws.computeoptimizer#LambdaFunctionMemoryRecommendationOption": { + "type": "structure", "members": { - "ACTIVE": { - "target": "smithy.api#Unit", + "rank": { + "target": "com.amazonaws.computeoptimizer#Rank", "traits": { - "smithy.api#enumValue": "Active" + "smithy.api#default": 0, + "smithy.api#documentation": "

The rank of the function recommendation option.

\n

The top recommendation option is ranked as 1.

" } }, - "INACTIVE": { - "target": "smithy.api#Unit", + "memorySize": { + "target": "com.amazonaws.computeoptimizer#MemorySize", "traits": { - "smithy.api#enumValue": "Inactive" + "smithy.api#default": 0, + "smithy.api#documentation": "

The memory size, in MB, of the function recommendation option.

" + } + }, + "projectedUtilizationMetrics": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryProjectedMetrics", + "traits": { + "smithy.api#documentation": "

An array of objects that describe the projected utilization metrics of the function\n recommendation option.

" + } + }, + "savingsOpportunity": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunity", + "traits": { + "smithy.api#documentation": "

An object that describes the savings opportunity for the Lambda function\n recommendation option. Savings opportunity includes the estimated monthly savings amount\n and percentage.

" + } + }, + "savingsOpportunityAfterDiscounts": { + "target": "com.amazonaws.computeoptimizer#LambdaSavingsOpportunityAfterDiscounts", + "traits": { + "smithy.api#documentation": "

\n An object that describes the savings opportunity for the Lambda recommendation option which includes Saving Plans \n discounts. Savings opportunity includes the estimated monthly savings and percentage.\n

" } } + }, + "traits": { + "smithy.api#documentation": "

Describes a recommendation option for an Lambda function.

" } }, - "com.amazonaws.computeoptimizer#InstanceArn": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#InstanceArns": { + "com.amazonaws.computeoptimizer#LambdaFunctionMemoryRecommendationOptions": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#InstanceArn" + "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryRecommendationOption" } }, - "com.amazonaws.computeoptimizer#InstanceEstimatedMonthlySavings": { - "type": "structure", + "com.amazonaws.computeoptimizer#LambdaFunctionMetricName": { + "type": "enum", "members": { - "currency": { - "target": "com.amazonaws.computeoptimizer#Currency", + "DURATION": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n The currency of the estimated monthly savings.\n

" + "smithy.api#enumValue": "Duration" } }, - "value": { - "target": "com.amazonaws.computeoptimizer#Value", + "MEMORY": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n The value of the estimated monthly savings.\n

" + "smithy.api#enumValue": "Memory" } } - }, - "traits": { - "smithy.api#documentation": "

\n An object that describes the estimated monthly savings possible by adopting Compute Optimizer’s Amazon EC2 instance \n recommendations. This is based on the Savings Plans and Reserved Instances pricing discounts.\n

" } }, - "com.amazonaws.computeoptimizer#InstanceIdle": { + "com.amazonaws.computeoptimizer#LambdaFunctionMetricStatistic": { "type": "enum", "members": { - "TRUE": { + "MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "True" + "smithy.api#enumValue": "Maximum" } }, - "FALSE": { + "AVERAGE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "False" + "smithy.api#enumValue": "Average" } } } }, - "com.amazonaws.computeoptimizer#InstanceName": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#InstanceRecommendation": { + "com.amazonaws.computeoptimizer#LambdaFunctionRecommendation": { "type": "structure", "members": { - "instanceArn": { - "target": "com.amazonaws.computeoptimizer#InstanceArn", + "functionArn": { + "target": "com.amazonaws.computeoptimizer#FunctionArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the current instance.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the current function.

" + } + }, + "functionVersion": { + "target": "com.amazonaws.computeoptimizer#FunctionVersion", + "traits": { + "smithy.api#documentation": "

The version number of the current function.

" } }, "accountId": { "target": "com.amazonaws.computeoptimizer#AccountId", "traits": { - "smithy.api#documentation": "

The Amazon Web Services account ID of the instance.

" + "smithy.api#documentation": "

The Amazon Web Services account ID of the function.

" } }, - "instanceName": { - "target": "com.amazonaws.computeoptimizer#InstanceName", + "currentMemorySize": { + "target": "com.amazonaws.computeoptimizer#MemorySize", "traits": { - "smithy.api#documentation": "

The name of the current instance.

" + "smithy.api#default": 0, + "smithy.api#documentation": "

The amount of memory, in MB, that's allocated to the current function.

" } }, - "currentInstanceType": { - "target": "com.amazonaws.computeoptimizer#CurrentInstanceType", + "numberOfInvocations": { + "target": "com.amazonaws.computeoptimizer#NumberOfInvocations", "traits": { - "smithy.api#documentation": "

The instance type of the current instance.

" + "smithy.api#default": 0, + "smithy.api#documentation": "

The number of times your function code was applied during the look-back period.

" + } + }, + "utilizationMetrics": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionUtilizationMetrics", + "traits": { + "smithy.api#documentation": "

An array of objects that describe the utilization metrics of the function.

" + } + }, + "lookbackPeriodInDays": { + "target": "com.amazonaws.computeoptimizer#LookBackPeriodInDays", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The number of days for which utilization metrics were analyzed for the\n function.

" + } + }, + "lastRefreshTimestamp": { + "target": "com.amazonaws.computeoptimizer#LastRefreshTimestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the function recommendation was last generated.

" } }, "finding": { - "target": "com.amazonaws.computeoptimizer#Finding", + "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFinding", "traits": { - "smithy.api#documentation": "

The finding classification of the instance.

\n

Findings for instances include:

\n
    \n
  • \n

    \n \n Underprovisioned\n —An instance is\n considered under-provisioned when at least one specification of your instance,\n such as CPU, memory, or network, does not meet the performance requirements of\n your workload. Under-provisioned instances may lead to poor application\n performance.

    \n
  • \n
  • \n

    \n \n Overprovisioned\n —An instance is\n considered over-provisioned when at least one specification of your instance,\n such as CPU, memory, or network, can be sized down while still meeting the\n performance requirements of your workload, and no specification is\n under-provisioned. Over-provisioned instances may lead to unnecessary\n infrastructure cost.

    \n
  • \n
  • \n

    \n \n Optimized\n —An instance is\n considered optimized when all specifications of your instance, such as CPU,\n memory, and network, meet the performance requirements of your workload and is\n not over provisioned. For optimized resources, Compute Optimizer might\n recommend a new generation instance type.

    \n
  • \n
" + "smithy.api#documentation": "

The finding classification of the function.

\n

Findings for functions include:

\n
    \n
  • \n

    \n \n Optimized\n — The function is\n correctly provisioned to run your workload based on its current configuration\n and its utilization history. This finding classification does not include\n finding reason codes.

    \n
  • \n
  • \n

    \n \n NotOptimized\n — The function is\n performing at a higher level (over-provisioned) or at a lower level\n (under-provisioned) than required for your workload because its current\n configuration is not optimal. Over-provisioned resources might lead to\n unnecessary infrastructure cost, and under-provisioned resources might lead to\n poor application performance. This finding classification can include the\n MemoryUnderprovisioned and MemoryUnderprovisioned\n finding reason codes.

    \n
  • \n
  • \n

    \n \n Unavailable\n — Compute Optimizer\n was unable to generate a recommendation for the function. This could be because\n the function has not accumulated sufficient metric data, or the function does\n not qualify for a recommendation. This finding classification can include the\n InsufficientData and Inconclusive finding reason\n codes.

    \n \n

    Functions with a finding of unavailable are not returned unless you\n specify the filter parameter with a value of\n Unavailable in your\n GetLambdaFunctionRecommendations request.

    \n
    \n
  • \n
" } }, "findingReasonCodes": { - "target": "com.amazonaws.computeoptimizer#InstanceRecommendationFindingReasonCodes", + "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFindingReasonCodes", "traits": { - "smithy.api#documentation": "

The reason for the finding classification of the instance.

\n

Finding reason codes for instances include:

\n
    \n
  • \n

    \n \n CPUOverprovisioned\n — The\n instance’s CPU configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n CPUUtilization metric of the current instance during the\n look-back period.

    \n
  • \n
  • \n

    \n \n CPUUnderprovisioned\n — The\n instance’s CPU configuration doesn't meet the performance requirements of your\n workload and there is an alternative instance type that provides better CPU\n performance. This is identified by analyzing the CPUUtilization\n metric of the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n MemoryOverprovisioned\n — The\n instance’s memory configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n memory utilization metric of the current instance during the look-back\n period.

    \n
  • \n
  • \n

    \n \n MemoryUnderprovisioned\n — The\n instance’s memory configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better\n memory performance. This is identified by analyzing the memory utilization\n metric of the current instance during the look-back period.

    \n \n

    Memory utilization is analyzed only for resources that have the unified\n CloudWatch agent installed on them. For more information, see\n Enabling memory\n utilization with the Amazon CloudWatch Agent in the\n Compute Optimizer User Guide. On Linux\n instances, Compute Optimizer analyses the mem_used_percent\n metric in the CWAgent namespace, or the legacy\n MemoryUtilization metric in the System/Linux\n namespace. On Windows instances, Compute Optimizer analyses the Memory\n % Committed Bytes In Use metric in the CWAgent\n namespace.

    \n
    \n
  • \n
  • \n

    \n \n EBSThroughputOverprovisioned\n —\n The instance’s EBS throughput configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the VolumeReadBytes and VolumeWriteBytes metrics\n of EBS volumes attached to the current instance during the look-back\n period.

    \n
  • \n
  • \n

    \n \n EBSThroughputUnderprovisioned\n —\n The instance’s EBS throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better EBS throughput performance. This is identified by analyzing the\n VolumeReadBytes and VolumeWriteBytes metrics of EBS\n volumes attached to the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n EBSIOPSOverprovisioned\n — The\n instance’s EBS IOPS configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n VolumeReadOps and VolumeWriteOps metric of EBS\n volumes attached to the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n EBSIOPSUnderprovisioned\n — The\n instance’s EBS IOPS configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better EBS\n IOPS performance. This is identified by analyzing the\n VolumeReadOps and VolumeWriteOps metric of EBS\n volumes attached to the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n NetworkBandwidthOverprovisioned\n \n — The instance’s network bandwidth configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the NetworkIn and NetworkOut metrics of the\n current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n NetworkBandwidthUnderprovisioned\n \n — The instance’s network bandwidth configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better network bandwidth performance. This is identified by analyzing\n the NetworkIn and NetworkOut metrics of the current\n instance during the look-back period. This finding reason happens when the\n NetworkIn or NetworkOut performance of an instance\n is impacted.

    \n
  • \n
  • \n

    \n \n NetworkPPSOverprovisioned\n — The\n instance’s network PPS (packets per second) configuration can be sized down\n while still meeting the performance requirements of your workload. This is\n identified by analyzing the NetworkPacketsIn and\n NetworkPacketsIn metrics of the current instance during the\n look-back period.

    \n
  • \n
  • \n

    \n \n NetworkPPSUnderprovisioned\n — The\n instance’s network PPS (packets per second) configuration doesn't meet the\n performance requirements of your workload and there is an alternative instance\n type that provides better network PPS performance. This is identified by\n analyzing the NetworkPacketsIn and NetworkPacketsIn\n metrics of the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n DiskIOPSOverprovisioned\n — The\n instance’s disk IOPS configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n DiskReadOps and DiskWriteOps metrics of the\n current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n DiskIOPSUnderprovisioned\n — The\n instance’s disk IOPS configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better\n disk IOPS performance. This is identified by analyzing the\n DiskReadOps and DiskWriteOps metrics of the\n current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n DiskThroughputOverprovisioned\n —\n The instance’s disk throughput configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the DiskReadBytes and DiskWriteBytes metrics\n of the current instance during the look-back period.

    \n
  • \n
  • \n

    \n \n DiskThroughputUnderprovisioned\n —\n The instance’s disk throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better disk throughput performance. This is identified by analyzing the\n DiskReadBytes and DiskWriteBytes metrics of the\n current instance during the look-back period.

    \n
  • \n
\n \n

For more information about instance metrics, see List the\n available CloudWatch metrics for your instances in the\n Amazon Elastic Compute Cloud User Guide. For more information\n about EBS volume metrics, see Amazon CloudWatch\n metrics for Amazon EBS in the Amazon Elastic Compute Cloud\n User Guide.

\n
" + "smithy.api#documentation": "

The reason for the finding classification of the function.

\n \n

Functions that have a finding classification of Optimized don't have\n a finding reason code.

\n
\n

Finding reason codes for functions include:

\n
    \n
  • \n

    \n \n MemoryOverprovisioned\n — The\n function is over-provisioned when its memory configuration can be sized down\n while still meeting the performance requirements of your workload. An\n over-provisioned function might lead to unnecessary infrastructure cost. This\n finding reason code is part of the NotOptimized finding\n classification.

    \n
  • \n
  • \n

    \n \n MemoryUnderprovisioned\n — The\n function is under-provisioned when its memory configuration doesn't meet the\n performance requirements of the workload. An under-provisioned function might\n lead to poor application performance. This finding reason code is part of the\n NotOptimized finding classification.

    \n
  • \n
  • \n

    \n \n InsufficientData\n — The function\n does not have sufficient metric data for Compute Optimizer to generate a\n recommendation. For more information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide. This finding reason code is part of the\n Unavailable finding classification.

    \n
  • \n
  • \n

    \n \n Inconclusive\n — The function does\n not qualify for a recommendation because Compute Optimizer cannot generate a\n recommendation with a high degree of confidence. This finding reason code is\n part of the Unavailable finding classification.

    \n
  • \n
" } }, - "utilizationMetrics": { - "target": "com.amazonaws.computeoptimizer#UtilizationMetrics", + "memorySizeRecommendationOptions": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryRecommendationOptions", "traits": { - "smithy.api#documentation": "

An array of objects that describe the utilization metrics of the instance.

" + "smithy.api#documentation": "

An array of objects that describe the memory configuration recommendation options for\n the function.

" } }, - "lookBackPeriodInDays": { - "target": "com.amazonaws.computeoptimizer#LookBackPeriodInDays", + "currentPerformanceRisk": { + "target": "com.amazonaws.computeoptimizer#CurrentPerformanceRisk", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The number of days for which utilization metrics were analyzed for the\n instance.

" + "smithy.api#documentation": "

The risk of the current Lambda function not meeting the performance needs\n of its workloads. The higher the risk, the more likely the current Lambda\n function requires more memory.

" } }, - "recommendationOptions": { - "target": "com.amazonaws.computeoptimizer#RecommendationOptions", + "effectiveRecommendationPreferences": { + "target": "com.amazonaws.computeoptimizer#LambdaEffectiveRecommendationPreferences", "traits": { - "smithy.api#documentation": "

An array of objects that describe the recommendation options for the instance.

" + "smithy.api#documentation": "

\n Describes the effective recommendation preferences for Lambda functions.\n

" } }, - "recommendationSources": { - "target": "com.amazonaws.computeoptimizer#RecommendationSources", + "tags": { + "target": "com.amazonaws.computeoptimizer#Tags", "traits": { - "smithy.api#documentation": "

An array of objects that describe the source resource of the recommendation.

" + "smithy.api#documentation": "

\n A list of tags assigned to your Lambda function recommendations.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes an Lambda function recommendation.

" + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilter": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilterName", + "traits": { + "smithy.api#documentation": "

The name of the filter.

\n

Specify Finding to return recommendations with a specific finding\n classification (for example, NotOptimized).

\n

Specify FindingReasonCode to return recommendations with a specific\n finding reason code (for example, MemoryUnderprovisioned).

\n

You can filter your Lambda function recommendations by tag:key \n and tag-key tags.

\n

A tag:key is a key and value combination of a tag assigned to your \n Lambda function recommendations. Use the tag key in the filter name and the tag value \n as the filter value. For example, to find all Lambda function recommendations that have \n a tag with the key of Owner and the value of TeamA, \n specify tag:Owner for the filter name and TeamA for the filter value.

\n

A tag-key is the key of a tag assigned to your Lambda function recommendations. Use \n this filter to find all of your Lambda function recommendations that have a tag with a \n specific key. This doesn’t consider the tag value. For example, you can find \n your Lambda function recommendations with a tag key value of Owner or without any tag \n keys assigned.

" } }, - "lastRefreshTimestamp": { - "target": "com.amazonaws.computeoptimizer#LastRefreshTimestamp", + "values": { + "target": "com.amazonaws.computeoptimizer#FilterValues", "traits": { - "smithy.api#documentation": "

The timestamp of when the instance recommendation was last generated.

" + "smithy.api#documentation": "

The value of the filter.

\n

The valid values for this parameter are as follows, depending on what you specify for\n the name parameter:

\n
    \n
  • \n

    Specify Optimized, NotOptimized, or\n Unavailable if you specify the name parameter as\n Finding.

    \n
  • \n
  • \n

    Specify MemoryOverprovisioned,\n MemoryUnderprovisioned, InsufficientData, or\n Inconclusive if you specify the name parameter as\n FindingReasonCode.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes a filter that returns a more specific list of Lambda\n function recommendations. Use this filter with the GetLambdaFunctionRecommendations action.

\n

You can use EBSFilter with the GetEBSVolumeRecommendations action, JobFilter with the\n DescribeRecommendationExportJobs action, and Filter\n with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

" + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilterName": { + "type": "enum", + "members": { + "FINDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Finding" } }, - "currentPerformanceRisk": { - "target": "com.amazonaws.computeoptimizer#CurrentPerformanceRisk", + "FINDING_REASON_CODE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The risk of the current instance not meeting the performance needs of its workloads.\n The higher the risk, the more likely the current instance cannot meet the performance\n requirements of its workload.

" + "smithy.api#enumValue": "FindingReasonCode" + } + } + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilter" + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFinding": { + "type": "enum", + "members": { + "OPTIMIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Optimized" } }, - "effectiveRecommendationPreferences": { - "target": "com.amazonaws.computeoptimizer#EffectiveRecommendationPreferences", + "NOT_OPTIMIZED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An object that describes the effective recommendation preferences for the\n instance.

" + "smithy.api#enumValue": "NotOptimized" } }, - "inferredWorkloadTypes": { - "target": "com.amazonaws.computeoptimizer#InferredWorkloadTypes", + "UNAVAILABLE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The applications that might be running on the instance as inferred by Compute Optimizer.

\n

Compute Optimizer can infer if one of the following applications might be running on\n the instance:

\n
    \n
  • \n

    \n AmazonEmr - Infers that Amazon EMR might be running on\n the instance.

    \n
  • \n
  • \n

    \n ApacheCassandra - Infers that Apache Cassandra might be running\n on the instance.

    \n
  • \n
  • \n

    \n ApacheHadoop - Infers that Apache Hadoop might be running on the\n instance.

    \n
  • \n
  • \n

    \n Memcached - Infers that Memcached might be running on the\n instance.

    \n
  • \n
  • \n

    \n NGINX - Infers that NGINX might be running on the\n instance.

    \n
  • \n
  • \n

    \n PostgreSql - Infers that PostgreSQL might be running on the\n instance.

    \n
  • \n
  • \n

    \n Redis - Infers that Redis might be running on the\n instance.

    \n
  • \n
  • \n

    \n Kafka - Infers that Kafka might be running on the\n instance.

    \n
  • \n
  • \n

    \n SQLServer - Infers that SQLServer might be running on the\n instance.

    \n
  • \n
" + "smithy.api#enumValue": "Unavailable" + } + } + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFindingReasonCode": { + "type": "enum", + "members": { + "MEMORY_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MemoryOverprovisioned" } }, - "instanceState": { - "target": "com.amazonaws.computeoptimizer#InstanceState", + "MEMORY_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n The state of the instance when the recommendation was generated.\n

" + "smithy.api#enumValue": "MemoryUnderprovisioned" } }, - "tags": { - "target": "com.amazonaws.computeoptimizer#Tags", + "INSUFFICIENT_DATA": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n A list of tags assigned to your Amazon EC2 instance recommendations.\n

" + "smithy.api#enumValue": "InsufficientData" } }, - "externalMetricStatus": { - "target": "com.amazonaws.computeoptimizer#ExternalMetricStatus", + "INCONCLUSIVE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n An object that describes Compute Optimizer's integration status with your external metrics provider.\n

" + "smithy.api#enumValue": "Inconclusive" + } + } + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFindingReasonCodes": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFindingReasonCode" + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionRecommendations": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendation" + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionUtilizationMetric": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionMetricName", + "traits": { + "smithy.api#documentation": "

The name of the utilization metric.

\n

The following utilization metrics are available:

\n
    \n
  • \n

    \n Duration - The amount of time that your function code spends\n processing an event.

    \n
  • \n
  • \n

    \n Memory - The amount of memory used per invocation.

    \n
  • \n
" } }, - "currentInstanceGpuInfo": { - "target": "com.amazonaws.computeoptimizer#GpuInfo", + "statistic": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionMetricStatistic", "traits": { - "smithy.api#documentation": "

\n Describes the GPU accelerator settings for the current instance type.\n

" + "smithy.api#documentation": "

The statistic of the utilization metric.

\n

The Compute Optimizer API, Command Line Interface (CLI), and SDKs\n return utilization metrics using only the Maximum statistic, which is the\n highest value observed during the specified period.

\n

The Compute Optimizer console displays graphs for some utilization metrics using the\n Average statistic, which is the value of Sum /\n SampleCount during the specified period. For more information, see\n Viewing resource\n recommendations in the Compute Optimizer User\n Guide. You can also get averaged utilization metric data for your resources\n using Amazon CloudWatch. For more information, see the Amazon CloudWatch\n User Guide.

" + } + }, + "value": { + "target": "com.amazonaws.computeoptimizer#MetricValue", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The value of the utilization metric.

" } - }, - "idle": { - "target": "com.amazonaws.computeoptimizer#InstanceIdle", + } + }, + "traits": { + "smithy.api#documentation": "

Describes a utilization metric of an Lambda function.

" + } + }, + "com.amazonaws.computeoptimizer#LambdaFunctionUtilizationMetrics": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#LambdaFunctionUtilizationMetric" + } + }, + "com.amazonaws.computeoptimizer#LambdaSavingsEstimationMode": { + "type": "structure", + "members": { + "source": { + "target": "com.amazonaws.computeoptimizer#LambdaSavingsEstimationModeSource", "traits": { - "smithy.api#documentation": "

\n Describes if an Amazon EC2 instance is idle.\n

" + "smithy.api#documentation": "

\n Describes the source for calculation of savings opportunity for Lambda functions.\n

" } } }, "traits": { - "smithy.api#documentation": "

Describes an Amazon EC2 instance recommendation.

" + "smithy.api#documentation": "

\n Describes the savings estimation used for calculating savings opportunity for Lambda functions.\n

" } }, - "com.amazonaws.computeoptimizer#InstanceRecommendationFindingReasonCode": { + "com.amazonaws.computeoptimizer#LambdaSavingsEstimationModeSource": { "type": "enum", "members": { - "CPU_OVER_PROVISIONED": { + "PUBLIC_PRICING": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CPUOverprovisioned" + "smithy.api#enumValue": "PublicPricing" } }, - "CPU_UNDER_PROVISIONED": { + "COST_EXPLORER_RIGHTSIZING": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CPUUnderprovisioned" + "smithy.api#enumValue": "CostExplorerRightsizing" } }, - "MEMORY_OVER_PROVISIONED": { + "COST_OPTIMIZATION_HUB": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "MemoryOverprovisioned" + "smithy.api#enumValue": "CostOptimizationHub" } - }, - "MEMORY_UNDER_PROVISIONED": { - "target": "smithy.api#Unit", + } + } + }, + "com.amazonaws.computeoptimizer#LambdaSavingsOpportunityAfterDiscounts": { + "type": "structure", + "members": { + "savingsOpportunityPercentage": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunityPercentage", "traits": { - "smithy.api#enumValue": "MemoryUnderprovisioned" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s Lambda \n function recommendations. This includes any applicable Savings Plans discounts.\n

" } }, - "EBS_THROUGHPUT_OVER_PROVISIONED": { - "target": "smithy.api#Unit", + "estimatedMonthlySavings": { + "target": "com.amazonaws.computeoptimizer#LambdaEstimatedMonthlySavings", "traits": { - "smithy.api#enumValue": "EBSThroughputOverprovisioned" + "smithy.api#documentation": "

\n The estimated monthly savings possible by adopting Compute Optimizer’s Lambda function recommendations. This \n includes any applicable Savings Plans discounts.\n

" } - }, - "EBS_THROUGHPUT_UNDER_PROVISIONED": { - "target": "smithy.api#Unit", + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the savings opportunity for Lambda functions recommendations after applying Savings Plans discounts.\n

\n

Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. \n You can achieve this by implementing a given Compute Optimizer recommendation.

" + } + }, + "com.amazonaws.computeoptimizer#LastRefreshTimestamp": { + "type": "timestamp" + }, + "com.amazonaws.computeoptimizer#LastUpdatedTimestamp": { + "type": "timestamp" + }, + "com.amazonaws.computeoptimizer#LicenseConfiguration": { + "type": "structure", + "members": { + "numberOfCores": { + "target": "com.amazonaws.computeoptimizer#NumberOfCores", "traits": { - "smithy.api#enumValue": "EBSThroughputUnderprovisioned" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The current number of cores associated with the instance.\n

" } }, - "EBS_IOPS_OVER_PROVISIONED": { - "target": "smithy.api#Unit", + "instanceType": { + "target": "com.amazonaws.computeoptimizer#InstanceType", "traits": { - "smithy.api#enumValue": "EBSIOPSOverprovisioned" + "smithy.api#documentation": "

\n The instance type used in the license.\n

" } }, - "EBS_IOPS_UNDER_PROVISIONED": { - "target": "smithy.api#Unit", + "operatingSystem": { + "target": "com.amazonaws.computeoptimizer#OperatingSystem", "traits": { - "smithy.api#enumValue": "EBSIOPSUnderprovisioned" + "smithy.api#documentation": "

\n The operating system of the instance. \n

" } }, - "NETWORK_BANDWIDTH_OVER_PROVISIONED": { - "target": "smithy.api#Unit", + "licenseEdition": { + "target": "com.amazonaws.computeoptimizer#LicenseEdition", "traits": { - "smithy.api#enumValue": "NetworkBandwidthOverprovisioned" + "smithy.api#documentation": "

\n The edition of the license for the application that runs on the instance.\n

" } }, - "NETWORK_BANDWIDTH_UNDER_PROVISIONED": { - "target": "smithy.api#Unit", + "licenseName": { + "target": "com.amazonaws.computeoptimizer#LicenseName", "traits": { - "smithy.api#enumValue": "NetworkBandwidthUnderprovisioned" + "smithy.api#documentation": "

\n The name of the license for the application that runs on the instance.\n

" } }, - "NETWORK_PPS_OVER_PROVISIONED": { - "target": "smithy.api#Unit", + "licenseModel": { + "target": "com.amazonaws.computeoptimizer#LicenseModel", "traits": { - "smithy.api#enumValue": "NetworkPPSOverprovisioned" + "smithy.api#documentation": "

\n The license type associated with the instance.\n

" } }, - "NETWORK_PPS_UNDER_PROVISIONED": { - "target": "smithy.api#Unit", + "licenseVersion": { + "target": "com.amazonaws.computeoptimizer#LicenseVersion", "traits": { - "smithy.api#enumValue": "NetworkPPSUnderprovisioned" + "smithy.api#documentation": "

\n The version of the license for the application that runs on the instance.\n

" } }, - "DISK_IOPS_OVER_PROVISIONED": { - "target": "smithy.api#Unit", + "metricsSource": { + "target": "com.amazonaws.computeoptimizer#MetricsSource", "traits": { - "smithy.api#enumValue": "DiskIOPSOverprovisioned" + "smithy.api#documentation": "

\n The list of metric sources required to generate recommendations for commercial software licenses.\n

" } - }, - "DISK_IOPS_UNDER_PROVISIONED": { + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the configuration of a license for an Amazon EC2 instance.\n

" + } + }, + "com.amazonaws.computeoptimizer#LicenseEdition": { + "type": "enum", + "members": { + "ENTERPRISE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "DiskIOPSUnderprovisioned" + "smithy.api#enumValue": "Enterprise" } }, - "DISK_THROUGHPUT_OVER_PROVISIONED": { + "STANDARD": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "DiskThroughputOverprovisioned" + "smithy.api#enumValue": "Standard" } }, - "DISK_THROUGHPUT_UNDER_PROVISIONED": { + "FREE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "DiskThroughputUnderprovisioned" + "smithy.api#enumValue": "Free" } }, - "GPU_UNDER_PROVISIONED": { + "NO_LICENSE_EDITION_FOUND": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "GPUUnderprovisioned" + "smithy.api#enumValue": "NoLicenseEditionFound" } - }, - "GPU_OVER_PROVISIONED": { + } + } + }, + "com.amazonaws.computeoptimizer#LicenseFinding": { + "type": "enum", + "members": { + "INSUFFICIENT_METRICS": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "GPUOverprovisioned" + "smithy.api#enumValue": "InsufficientMetrics" } }, - "GPU_MEMORY_UNDER_PROVISIONED": { + "OPTIMIZED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "GPUMemoryUnderprovisioned" + "smithy.api#enumValue": "Optimized" } }, - "GPU_MEMORY_OVER_PROVISIONED": { + "NOT_OPTIMIZED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "GPUMemoryOverprovisioned" + "smithy.api#enumValue": "NotOptimized" } } } }, - "com.amazonaws.computeoptimizer#InstanceRecommendationFindingReasonCodes": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#InstanceRecommendationFindingReasonCode" - } - }, - "com.amazonaws.computeoptimizer#InstanceRecommendationOption": { - "type": "structure", + "com.amazonaws.computeoptimizer#LicenseFindingReasonCode": { + "type": "enum", "members": { - "instanceType": { - "target": "com.amazonaws.computeoptimizer#InstanceType", - "traits": { - "smithy.api#documentation": "

The instance type of the instance recommendation.

" - } - }, - "projectedUtilizationMetrics": { - "target": "com.amazonaws.computeoptimizer#ProjectedUtilizationMetrics", - "traits": { - "smithy.api#documentation": "

An array of objects that describe the projected utilization metrics of the instance\n recommendation option.

\n \n

The Cpu and Memory metrics are the only projected\n utilization metrics returned. Additionally, the Memory metric is\n returned only for resources that have the unified CloudWatch agent installed\n on them. For more information, see Enabling Memory\n Utilization with the CloudWatch Agent.

\n
" - } - }, - "platformDifferences": { - "target": "com.amazonaws.computeoptimizer#PlatformDifferences", - "traits": { - "smithy.api#documentation": "

Describes the configuration differences between the current instance and the\n recommended instance type. You should consider the configuration differences before\n migrating your workloads from the current instance to the recommended instance type. The\n Change the instance type guide for Linux and Change the instance type\n guide for Windows provide general guidance for getting started with an\n instance migration.

\n

Platform differences include:

\n
    \n
  • \n

    \n \n Hypervisor\n — The hypervisor of\n the recommended instance type is different than that of the current instance.\n For example, the recommended instance type uses a Nitro hypervisor and the\n current instance uses a Xen hypervisor. The differences that you should consider\n between these hypervisors are covered in the Nitro Hypervisor section of the\n Amazon EC2 frequently asked questions. For more information, see\n Instances built on the Nitro System in the Amazon EC2\n User Guide for Linux, or Instances built on the Nitro System in the Amazon EC2\n User Guide for Windows.

    \n
  • \n
  • \n

    \n \n NetworkInterface\n — The network\n interface of the recommended instance type is different than that of the current\n instance. For example, the recommended instance type supports enhanced\n networking and the current instance might not. To enable enhanced networking for\n the recommended instance type, you must install the Elastic Network Adapter\n (ENA) driver or the Intel 82599 Virtual Function driver. For more information,\n see Networking and storage features and Enhanced networking\n on Linux in the Amazon EC2 User Guide for\n Linux, or Networking and storage features and Enhanced\n networking on Windows in the Amazon EC2 User Guide for\n Windows.

    \n
  • \n
  • \n

    \n \n StorageInterface\n — The storage\n interface of the recommended instance type is different than that of the current\n instance. For example, the recommended instance type uses an NVMe storage\n interface and the current instance does not. To access NVMe volumes for the\n recommended instance type, you will need to install or upgrade the NVMe driver.\n For more information, see Networking and storage features and Amazon EBS and NVMe on\n Linux instances in the Amazon EC2 User Guide for\n Linux, or Networking and storage features and Amazon EBS and NVMe\n on Windows instances in the Amazon EC2 User Guide for\n Windows.

    \n
  • \n
  • \n

    \n \n InstanceStoreAvailability\n — The\n recommended instance type does not support instance store volumes and the\n current instance does. Before migrating, you might need to back up the data on\n your instance store volumes if you want to preserve them. For more information,\n see How do I back up an instance store volume on my Amazon EC2 instance\n to Amazon EBS? in the Amazon Web Services Premium\n Support Knowledge Base. For more information, see Networking and storage features and Amazon EC2\n instance store in the Amazon EC2 User Guide for\n Linux, or see Networking and storage features and Amazon EC2\n instance store in the Amazon EC2 User Guide for\n Windows.

    \n
  • \n
  • \n

    \n \n VirtualizationType\n — The\n recommended instance type uses the hardware virtual machine (HVM) virtualization\n type and the current instance uses the paravirtual (PV) virtualization type. For\n more information about the differences between these virtualization types, see\n Linux AMI\n virtualization types in the Amazon EC2 User Guide for\n Linux, or Windows AMI virtualization types in the Amazon EC2 User\n Guide for Windows.

    \n
  • \n
  • \n

    \n \n Architecture\n — The CPU\n architecture between the recommended instance type and the current instance is\n different. For example, the recommended instance type might use an Arm CPU\n architecture and the current instance type might use a different one, such as\n x86. Before migrating, you should consider recompiling the software on your\n instance for the new architecture. Alternatively, you might switch to an Amazon\n Machine Image (AMI) that supports the new architecture. For more information\n about the CPU architecture for each instance type, see Amazon EC2 Instance Types.

    \n
  • \n
" - } - }, - "performanceRisk": { - "target": "com.amazonaws.computeoptimizer#PerformanceRisk", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The performance risk of the instance recommendation option.

\n

Performance risk indicates the likelihood of the recommended instance type not meeting\n the resource needs of your workload. Compute Optimizer calculates an individual\n performance risk score for each specification of the recommended instance, including\n CPU, memory, EBS throughput, EBS IOPS, disk throughput, disk IOPS, network throughput,\n and network PPS.\n The performance\n risk of the recommended instance is calculated as the maximum performance risk score\n across the analyzed resource specifications.

\n

The value ranges from 0 - 4, with 0 meaning\n that the recommended resource is predicted to always provide enough hardware capability.\n The higher the performance risk is, the more likely you should validate whether the\n recommendation will meet the performance requirements of your workload before migrating\n your resource.

" - } - }, - "rank": { - "target": "com.amazonaws.computeoptimizer#Rank", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The rank of the instance recommendation option.

\n

The top recommendation option is ranked as 1.

" - } - }, - "savingsOpportunity": { - "target": "com.amazonaws.computeoptimizer#SavingsOpportunity", + "CW_APP_INSIGHTS_DISABLED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An object that describes the savings opportunity for the instance recommendation\n option. Savings opportunity includes the estimated monthly savings amount and\n percentage.

" + "smithy.api#enumValue": "InvalidCloudWatchApplicationInsightsSetup" } }, - "migrationEffort": { - "target": "com.amazonaws.computeoptimizer#MigrationEffort", + "CW_APP_INSIGHTS_ERROR": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The level of effort required to migrate from the current instance type to the\n recommended instance type.

\n

For example, the migration effort is Low if Amazon EMR is the\n inferred workload type and an Amazon Web Services Graviton instance type is recommended.\n The migration effort is Medium if a workload type couldn't be inferred but\n an Amazon Web Services Graviton instance type is recommended. The migration effort is\n VeryLow if both the current and recommended instance types are of the\n same CPU architecture.

" + "smithy.api#enumValue": "CloudWatchApplicationInsightsError" } }, - "instanceGpuInfo": { - "target": "com.amazonaws.computeoptimizer#GpuInfo", + "LICENSE_OVER_PROVISIONED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n Describes the GPU accelerator settings for the recommended instance type.\n

" + "smithy.api#enumValue": "LicenseOverprovisioned" } }, - "savingsOpportunityAfterDiscounts": { - "target": "com.amazonaws.computeoptimizer#InstanceSavingsOpportunityAfterDiscounts", + "OPTIMIZED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n An object that describes the savings opportunity for the instance recommendation option that includes Savings Plans and Reserved Instances \n discounts. Savings opportunity includes the estimated monthly savings and percentage.\n

" + "smithy.api#enumValue": "Optimized" } } - }, - "traits": { - "smithy.api#documentation": "

Describes a recommendation option for an Amazon EC2 instance.

" - } - }, - "com.amazonaws.computeoptimizer#InstanceRecommendations": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#InstanceRecommendation" } - }, - "com.amazonaws.computeoptimizer#InstanceSavingsEstimationMode": { - "type": "structure", - "members": { - "source": { - "target": "com.amazonaws.computeoptimizer#InstanceSavingsEstimationModeSource", - "traits": { - "smithy.api#documentation": "

\n Describes the source for calculating the savings opportunity for Amazon EC2 instances.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Describes the savings estimation mode used for calculating savings opportunity for Amazon EC2 instances.\n

" + }, + "com.amazonaws.computeoptimizer#LicenseFindingReasonCodes": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#LicenseFindingReasonCode" } }, - "com.amazonaws.computeoptimizer#InstanceSavingsEstimationModeSource": { + "com.amazonaws.computeoptimizer#LicenseModel": { "type": "enum", "members": { - "PUBLIC_PRICING": { + "LICENSE_INCLUDED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "PublicPricing" + "smithy.api#enumValue": "LicenseIncluded" } }, - "COST_EXPLORER_RIGHTSIZING": { + "BRING_YOUR_OWN_LICENSE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CostExplorerRightsizing" + "smithy.api#enumValue": "BringYourOwnLicense" } - }, - "COST_OPTIMIZATION_HUB": { + } + } + }, + "com.amazonaws.computeoptimizer#LicenseName": { + "type": "enum", + "members": { + "SQLSERVER": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CostOptimizationHub" + "smithy.api#enumValue": "SQLServer" } } } }, - "com.amazonaws.computeoptimizer#InstanceSavingsOpportunityAfterDiscounts": { + "com.amazonaws.computeoptimizer#LicenseRecommendation": { "type": "structure", "members": { - "savingsOpportunityPercentage": { - "target": "com.amazonaws.computeoptimizer#SavingsOpportunityPercentage", + "resourceArn": { + "target": "com.amazonaws.computeoptimizer#ResourceArn", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n The estimated monthly savings possible as a percentage of monthly cost after applying the Savings Plans and Reserved Instances discounts. \n This saving can be achieved by adopting Compute Optimizer’s EC2 instance recommendations.\n

" + "smithy.api#documentation": "

\n The ARN that identifies the Amazon EC2 instance.\n

" } }, - "estimatedMonthlySavings": { - "target": "com.amazonaws.computeoptimizer#InstanceEstimatedMonthlySavings", + "accountId": { + "target": "com.amazonaws.computeoptimizer#AccountId", "traits": { - "smithy.api#documentation": "

\n An object that describes the estimated monthly savings possible by adopting Compute Optimizer’s Amazon EC2 instance recommendations. This is \n based on pricing after applying the Savings Plans and Reserved Instances discounts.\n

" + "smithy.api#documentation": "

\n The Amazon Web Services account ID of the license. \n

" } - } - }, - "traits": { - "smithy.api#documentation": "

\n Describes the savings opportunity for instance recommendations after applying the Savings Plans and Reserved Instances discounts.\n

\n

Savings opportunity after discounts represents the estimated monthly savings you can achieve by \n implementing Compute Optimizer recommendations.

" - } - }, - "com.amazonaws.computeoptimizer#InstanceState": { - "type": "enum", - "members": { - "PENDING": { - "target": "smithy.api#Unit", + }, + "currentLicenseConfiguration": { + "target": "com.amazonaws.computeoptimizer#LicenseConfiguration", "traits": { - "smithy.api#enumValue": "pending" + "smithy.api#documentation": "

\n An object that describes the current configuration of an instance that runs on a license.\n

" } }, - "RUNNING": { - "target": "smithy.api#Unit", + "lookbackPeriodInDays": { + "target": "com.amazonaws.computeoptimizer#LookBackPeriodInDays", "traits": { - "smithy.api#enumValue": "running" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The number of days for which utilization metrics were analyzed for an instance that runs on a license.\n

" } }, - "SHUTTING_DOWN": { - "target": "smithy.api#Unit", + "lastRefreshTimestamp": { + "target": "com.amazonaws.computeoptimizer#LastRefreshTimestamp", "traits": { - "smithy.api#enumValue": "shutting-down" + "smithy.api#documentation": "

\n The timestamp of when the license recommendation was last generated.\n

" } }, - "TERMINATED": { - "target": "smithy.api#Unit", + "finding": { + "target": "com.amazonaws.computeoptimizer#LicenseFinding", "traits": { - "smithy.api#enumValue": "terminated" + "smithy.api#documentation": "

\n The finding classification for an instance that runs on a license.\n

\n

Findings include:

\n
    \n
  • \n

    \n InsufficentMetrics — When Compute Optimizer detects that your CloudWatch\n Application Insights isn't enabled or is enabled with insufficient permissions.

    \n
  • \n
  • \n

    \n NotOptimized — When Compute Optimizer detects that your EC2 infrastructure \n isn't using any of the SQL server license features you're paying for, a license is considered \n not optimized.

    \n
  • \n
  • \n

    \n Optimized — When Compute Optimizer detects that all specifications of your \n license meet the performance requirements of your workload.

    \n
  • \n
" } }, - "STOPPING": { - "target": "smithy.api#Unit", + "findingReasonCodes": { + "target": "com.amazonaws.computeoptimizer#LicenseFindingReasonCodes", "traits": { - "smithy.api#enumValue": "stopping" + "smithy.api#documentation": "

\n The reason for the finding classification for an instance that runs on a license.\n

\n

Finding reason codes include:

\n
    \n
  • \n

    \n Optimized — All specifications of your \n license meet the performance requirements of your workload.

    \n
  • \n
  • \n

    \n LicenseOverprovisioned — A license is considered over-provisioned when your license can be \n downgraded while still meeting the performance requirements of your workload.

    \n
  • \n
  • \n

    \n InvalidCloudwatchApplicationInsights — CloudWatch\n Application Insights isn't configured properly.

    \n
  • \n
  • \n

    \n CloudwatchApplicationInsightsError — There is a CloudWatch\n Application Insights error.

    \n
  • \n
" } }, - "STOPPED": { - "target": "smithy.api#Unit", + "licenseRecommendationOptions": { + "target": "com.amazonaws.computeoptimizer#LicenseRecommendationOptions", "traits": { - "smithy.api#enumValue": "stopped" + "smithy.api#documentation": "

\n An array of objects that describe the license recommendation options.\n

" + } + }, + "tags": { + "target": "com.amazonaws.computeoptimizer#Tags", + "traits": { + "smithy.api#documentation": "

\n A list of tags assigned to an EC2 instance.\n

" } - } - } - }, - "com.amazonaws.computeoptimizer#InstanceType": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#InternalServerException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.computeoptimizer#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

An internal error has occurred. Try your call again.

", - "smithy.api#error": "server", - "smithy.api#httpError": 500 - } - }, - "com.amazonaws.computeoptimizer#InvalidParameterValueException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.computeoptimizer#ErrorMessage" } }, "traits": { - "smithy.api#documentation": "

The value supplied for the input parameter is out of range or not valid.

", - "smithy.api#error": "client", - "smithy.api#httpError": 400 + "smithy.api#documentation": "

\n Describes a license recommendation for an EC2 instance.\n

" } }, - "com.amazonaws.computeoptimizer#JobFilter": { + "com.amazonaws.computeoptimizer#LicenseRecommendationFilter": { "type": "structure", "members": { "name": { - "target": "com.amazonaws.computeoptimizer#JobFilterName", + "target": "com.amazonaws.computeoptimizer#LicenseRecommendationFilterName", "traits": { - "smithy.api#documentation": "

The name of the filter.

\n

Specify ResourceType to return export jobs of a specific resource type\n (for example, Ec2Instance).

\n

Specify JobStatus to return export jobs with a specific status (e.g,\n Complete).

" + "smithy.api#documentation": "

The name of the filter.

\n

Specify Finding to return recommendations with a specific finding\n classification.

\n

Specify FindingReasonCode to return recommendations with a specific\n finding reason code.

\n

You can filter your license recommendations by tag:key \n and tag-key tags.

\n

A tag:key is a key and value combination of a tag assigned to your \n license recommendations. Use the tag key in the filter name and the tag value \n as the filter value. For example, to find all license recommendations that have \n a tag with the key of Owner and the value of TeamA, \n specify tag:Owner for the filter name and TeamA for the filter value.

\n

A tag-key is the key of a tag assigned to your license recommendations. Use \n this filter to find all of your license recommendations that have a tag with a \n specific key. This doesn’t consider the tag value. For example, you can find \n your license recommendations with a tag key value of Owner or without any tag \n keys assigned.

" } }, "values": { "target": "com.amazonaws.computeoptimizer#FilterValues", "traits": { - "smithy.api#documentation": "

The value of the filter.

\n

The valid values for this parameter are as follows, depending on what you specify for\n the name parameter:

\n
    \n
  • \n

    Specify Ec2Instance or AutoScalingGroup if you\n specify the name parameter as ResourceType. There is\n no filter for EBS volumes because volume recommendations cannot be exported at\n this time.

    \n
  • \n
  • \n

    Specify Queued, InProgress, Complete,\n or Failed if you specify the name parameter as\n JobStatus.

    \n
  • \n
" + "smithy.api#documentation": "

The value of the filter.

\n

The valid values for this parameter are as follows, depending on what you specify for\n the name parameter:

\n
    \n
  • \n

    If you specify the name parameter as\n Finding, then specify Optimized, NotOptimized, or\n InsufficentMetrics.

    \n
  • \n
  • \n

    If you specify the name parameter as\n FindingReasonCode, then specify Optimized,\n LicenseOverprovisioned, InvalidCloudwatchApplicationInsights, or\n CloudwatchApplicationInsightsError.

    \n
  • \n
" } } }, "traits": { - "smithy.api#documentation": "

Describes a filter that returns a more specific list of recommendation export jobs.\n Use this filter with the DescribeRecommendationExportJobs\n action.

\n

You can use EBSFilter with the GetEBSVolumeRecommendations action,\n LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, and Filter with\n the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

" + "smithy.api#documentation": "

\n Describes a filter that returns a more specific list of license recommendations. Use this filter \n with the GetLicenseRecommendation action.\n

" } }, - "com.amazonaws.computeoptimizer#JobFilterName": { + "com.amazonaws.computeoptimizer#LicenseRecommendationFilterName": { "type": "enum", "members": { - "RESOURCE_TYPE": { + "LICENSE_FINDING": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "ResourceType" + "smithy.api#enumValue": "Finding" } }, - "JOB_STATUS": { + "LICENSE_FINDING_REASON_CODE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "JobStatus" + "smithy.api#enumValue": "FindingReasonCode" + } + }, + "LICENSE_NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LicenseName" } } } }, - "com.amazonaws.computeoptimizer#JobFilters": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#JobFilter" - } - }, - "com.amazonaws.computeoptimizer#JobId": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#JobIds": { + "com.amazonaws.computeoptimizer#LicenseRecommendationFilters": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#JobId" + "target": "com.amazonaws.computeoptimizer#LicenseRecommendationFilter" } }, - "com.amazonaws.computeoptimizer#JobStatus": { - "type": "enum", + "com.amazonaws.computeoptimizer#LicenseRecommendationOption": { + "type": "structure", "members": { - "QUEUED": { - "target": "smithy.api#Unit", + "rank": { + "target": "com.amazonaws.computeoptimizer#Rank", "traits": { - "smithy.api#enumValue": "Queued" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The rank of the license recommendation option.\n

\n

\n The top recommendation option is ranked as 1.\n

" } }, - "IN_PROGRESS": { - "target": "smithy.api#Unit", + "operatingSystem": { + "target": "com.amazonaws.computeoptimizer#OperatingSystem", "traits": { - "smithy.api#enumValue": "InProgress" + "smithy.api#documentation": "

\n The operating system of a license recommendation option.\n

" } }, - "COMPLETE": { - "target": "smithy.api#Unit", + "licenseEdition": { + "target": "com.amazonaws.computeoptimizer#LicenseEdition", "traits": { - "smithy.api#enumValue": "Complete" + "smithy.api#documentation": "

\n The recommended edition of the license for the application that runs on the instance.\n

" } }, - "FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "Failed" - } - } - } - }, - "com.amazonaws.computeoptimizer#LambdaEffectiveRecommendationPreferences": { - "type": "structure", - "members": { - "savingsEstimationMode": { - "target": "com.amazonaws.computeoptimizer#LambdaSavingsEstimationMode", + "licenseModel": { + "target": "com.amazonaws.computeoptimizer#LicenseModel", "traits": { - "smithy.api#documentation": "

\n Describes the savings estimation mode applied for calculating savings opportunity for Lambda functions.\n

" + "smithy.api#documentation": "

\n The recommended license type associated with the instance.\n

" } + }, + "savingsOpportunity": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunity" } }, "traits": { - "smithy.api#documentation": "

\n Describes the effective recommendation preferences for Lambda functions.\n

" + "smithy.api#documentation": "

\n Describes the recommendation options for licenses.\n

" } }, - "com.amazonaws.computeoptimizer#LambdaEstimatedMonthlySavings": { + "com.amazonaws.computeoptimizer#LicenseRecommendationOptions": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#LicenseRecommendationOption" + } + }, + "com.amazonaws.computeoptimizer#LicenseRecommendations": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#LicenseRecommendation" + } + }, + "com.amazonaws.computeoptimizer#LicenseVersion": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#LimitExceededException": { "type": "structure", "members": { - "currency": { - "target": "com.amazonaws.computeoptimizer#Currency", - "traits": { - "smithy.api#documentation": "

\n The currency of the estimated monthly savings.\n

" - } - }, - "value": { - "target": "com.amazonaws.computeoptimizer#Value", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n The value of the estimated monthly savings.\n

" - } + "message": { + "target": "com.amazonaws.computeoptimizer#ErrorMessage" } }, "traits": { - "smithy.api#documentation": "

\n Describes the estimated monthly savings possible for Lambda functions by adopting Compute Optimizer recommendations. This is based \n on Lambda functions pricing after applying Savings Plans discounts.\n

" + "smithy.api#documentation": "

The request exceeds a limit of the service.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 } }, - "com.amazonaws.computeoptimizer#LambdaFunctionMemoryMetricName": { - "type": "enum", - "members": { - "DURATION": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "Duration" - } - } + "com.amazonaws.computeoptimizer#LookBackPeriodInDays": { + "type": "double", + "traits": { + "smithy.api#default": 0 } }, - "com.amazonaws.computeoptimizer#LambdaFunctionMemoryMetricStatistic": { + "com.amazonaws.computeoptimizer#LookBackPeriodPreference": { "type": "enum", "members": { - "LOWER_BOUND": { + "DAYS_14": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "LowerBound" + "smithy.api#enumValue": "DAYS_14" } }, - "UPPER_BOUND": { + "DAYS_32": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "UpperBound" + "smithy.api#enumValue": "DAYS_32" } }, - "EXPECTED": { + "DAYS_93": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Expected" + "smithy.api#enumValue": "DAYS_93" } } } }, - "com.amazonaws.computeoptimizer#LambdaFunctionMemoryProjectedMetric": { - "type": "structure", - "members": { - "name": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryMetricName", - "traits": { - "smithy.api#documentation": "

The name of the projected utilization metric.

" - } - }, - "statistic": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryMetricStatistic", - "traits": { - "smithy.api#documentation": "

The statistic of the projected utilization metric.

" - } - }, - "value": { - "target": "com.amazonaws.computeoptimizer#MetricValue", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The values of the projected utilization metrics.

" - } + "com.amazonaws.computeoptimizer#Low": { + "type": "long", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.computeoptimizer#LowerBoundValue": { + "type": "double", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.computeoptimizer#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 1000 } - }, + } + }, + "com.amazonaws.computeoptimizer#MaxSize": { + "type": "integer", "traits": { - "smithy.api#documentation": "

Describes a projected utilization metric of an Lambda function\n recommendation option.

" + "smithy.api#default": 0 } }, - "com.amazonaws.computeoptimizer#LambdaFunctionMemoryProjectedMetrics": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryProjectedMetric" + "com.amazonaws.computeoptimizer#Medium": { + "type": "long", + "traits": { + "smithy.api#default": 0 } }, - "com.amazonaws.computeoptimizer#LambdaFunctionMemoryRecommendationOption": { + "com.amazonaws.computeoptimizer#MemberAccountsEnrolled": { + "type": "boolean", + "traits": { + "smithy.api#default": false + } + }, + "com.amazonaws.computeoptimizer#MemorySize": { + "type": "integer", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.computeoptimizer#MemorySizeConfiguration": { "type": "structure", "members": { - "rank": { - "target": "com.amazonaws.computeoptimizer#Rank", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The rank of the function recommendation option.

\n

The top recommendation option is ranked as 1.

" - } - }, - "memorySize": { - "target": "com.amazonaws.computeoptimizer#MemorySize", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The memory size, in MB, of the function recommendation option.

" - } - }, - "projectedUtilizationMetrics": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryProjectedMetrics", - "traits": { - "smithy.api#documentation": "

An array of objects that describe the projected utilization metrics of the function\n recommendation option.

" - } - }, - "savingsOpportunity": { - "target": "com.amazonaws.computeoptimizer#SavingsOpportunity", + "memory": { + "target": "com.amazonaws.computeoptimizer#NullableMemory", "traits": { - "smithy.api#documentation": "

An object that describes the savings opportunity for the Lambda function\n recommendation option. Savings opportunity includes the estimated monthly savings amount\n and percentage.

" + "smithy.api#documentation": "

\n The amount of memory in the container.\n

" } }, - "savingsOpportunityAfterDiscounts": { - "target": "com.amazonaws.computeoptimizer#LambdaSavingsOpportunityAfterDiscounts", + "memoryReservation": { + "target": "com.amazonaws.computeoptimizer#NullableMemoryReservation", "traits": { - "smithy.api#documentation": "

\n An object that describes the savings opportunity for the Lambda recommendation option which includes Saving Plans \n discounts. Savings opportunity includes the estimated monthly savings and percentage.\n

" + "smithy.api#documentation": "

\n The limit of memory reserve for the container.\n

" } } }, "traits": { - "smithy.api#documentation": "

Describes a recommendation option for an Lambda function.

" + "smithy.api#documentation": "

\n The memory size configurations of a container.\n

" } }, - "com.amazonaws.computeoptimizer#LambdaFunctionMemoryRecommendationOptions": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryRecommendationOption" - } + "com.amazonaws.computeoptimizer#Message": { + "type": "string" }, - "com.amazonaws.computeoptimizer#LambdaFunctionMetricName": { + "com.amazonaws.computeoptimizer#MetadataKey": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#MetricName": { "type": "enum", "members": { - "DURATION": { + "CPU": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Duration" + "smithy.api#enumValue": "Cpu" } }, "MEMORY": { @@ -7341,1293 +8877,1287 @@ "traits": { "smithy.api#enumValue": "Memory" } - } - } - }, - "com.amazonaws.computeoptimizer#LambdaFunctionMetricStatistic": { - "type": "enum", - "members": { - "MAXIMUM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "Maximum" - } }, - "AVERAGE": { + "EBS_READ_OPS_PER_SECOND": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Average" - } - } - } - }, - "com.amazonaws.computeoptimizer#LambdaFunctionRecommendation": { - "type": "structure", - "members": { - "functionArn": { - "target": "com.amazonaws.computeoptimizer#FunctionArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the current function.

" + "smithy.api#enumValue": "EBS_READ_OPS_PER_SECOND" } }, - "functionVersion": { - "target": "com.amazonaws.computeoptimizer#FunctionVersion", + "EBS_WRITE_OPS_PER_SECOND": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The version number of the current function.

" + "smithy.api#enumValue": "EBS_WRITE_OPS_PER_SECOND" } }, - "accountId": { - "target": "com.amazonaws.computeoptimizer#AccountId", + "EBS_READ_BYTES_PER_SECOND": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The Amazon Web Services account ID of the function.

" + "smithy.api#enumValue": "EBS_READ_BYTES_PER_SECOND" } }, - "currentMemorySize": { - "target": "com.amazonaws.computeoptimizer#MemorySize", + "EBS_WRITE_BYTES_PER_SECOND": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The amount of memory, in MB, that's allocated to the current function.

" + "smithy.api#enumValue": "EBS_WRITE_BYTES_PER_SECOND" } }, - "numberOfInvocations": { - "target": "com.amazonaws.computeoptimizer#NumberOfInvocations", + "DISK_READ_OPS_PER_SECOND": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The number of times your function code was applied during the look-back period.

" + "smithy.api#enumValue": "DISK_READ_OPS_PER_SECOND" } }, - "utilizationMetrics": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionUtilizationMetrics", + "DISK_WRITE_OPS_PER_SECOND": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An array of objects that describe the utilization metrics of the function.

" + "smithy.api#enumValue": "DISK_WRITE_OPS_PER_SECOND" } }, - "lookbackPeriodInDays": { - "target": "com.amazonaws.computeoptimizer#LookBackPeriodInDays", + "DISK_READ_BYTES_PER_SECOND": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The number of days for which utilization metrics were analyzed for the\n function.

" + "smithy.api#enumValue": "DISK_READ_BYTES_PER_SECOND" } }, - "lastRefreshTimestamp": { - "target": "com.amazonaws.computeoptimizer#LastRefreshTimestamp", + "DISK_WRITE_BYTES_PER_SECOND": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The timestamp of when the function recommendation was last generated.

" + "smithy.api#enumValue": "DISK_WRITE_BYTES_PER_SECOND" } }, - "finding": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFinding", + "NETWORK_IN_BYTES_PER_SECOND": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The finding classification of the function.

\n

Findings for functions include:

\n
    \n
  • \n

    \n \n Optimized\n — The function is\n correctly provisioned to run your workload based on its current configuration\n and its utilization history. This finding classification does not include\n finding reason codes.

    \n
  • \n
  • \n

    \n \n NotOptimized\n — The function is\n performing at a higher level (over-provisioned) or at a lower level\n (under-provisioned) than required for your workload because its current\n configuration is not optimal. Over-provisioned resources might lead to\n unnecessary infrastructure cost, and under-provisioned resources might lead to\n poor application performance. This finding classification can include the\n MemoryUnderprovisioned and MemoryUnderprovisioned\n finding reason codes.

    \n
  • \n
  • \n

    \n \n Unavailable\n — Compute Optimizer\n was unable to generate a recommendation for the function. This could be because\n the function has not accumulated sufficient metric data, or the function does\n not qualify for a recommendation. This finding classification can include the\n InsufficientData and Inconclusive finding reason\n codes.

    \n \n

    Functions with a finding of unavailable are not returned unless you\n specify the filter parameter with a value of\n Unavailable in your\n GetLambdaFunctionRecommendations request.

    \n
    \n
  • \n
" + "smithy.api#enumValue": "NETWORK_IN_BYTES_PER_SECOND" } }, - "findingReasonCodes": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFindingReasonCodes", + "NETWORK_OUT_BYTES_PER_SECOND": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The reason for the finding classification of the function.

\n \n

Functions that have a finding classification of Optimized don't have\n a finding reason code.

\n
\n

Finding reason codes for functions include:

\n
    \n
  • \n

    \n \n MemoryOverprovisioned\n — The\n function is over-provisioned when its memory configuration can be sized down\n while still meeting the performance requirements of your workload. An\n over-provisioned function might lead to unnecessary infrastructure cost. This\n finding reason code is part of the NotOptimized finding\n classification.

    \n
  • \n
  • \n

    \n \n MemoryUnderprovisioned\n — The\n function is under-provisioned when its memory configuration doesn't meet the\n performance requirements of the workload. An under-provisioned function might\n lead to poor application performance. This finding reason code is part of the\n NotOptimized finding classification.

    \n
  • \n
  • \n

    \n \n InsufficientData\n — The function\n does not have sufficient metric data for Compute Optimizer to generate a\n recommendation. For more information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide. This finding reason code is part of the\n Unavailable finding classification.

    \n
  • \n
  • \n

    \n \n Inconclusive\n — The function does\n not qualify for a recommendation because Compute Optimizer cannot generate a\n recommendation with a high degree of confidence. This finding reason code is\n part of the Unavailable finding classification.

    \n
  • \n
" + "smithy.api#enumValue": "NETWORK_OUT_BYTES_PER_SECOND" } }, - "memorySizeRecommendationOptions": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionMemoryRecommendationOptions", + "NETWORK_PACKETS_IN_PER_SECOND": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An array of objects that describe the memory configuration recommendation options for\n the function.

" + "smithy.api#enumValue": "NETWORK_PACKETS_IN_PER_SECOND" } }, - "currentPerformanceRisk": { - "target": "com.amazonaws.computeoptimizer#CurrentPerformanceRisk", + "NETWORK_PACKETS_OUT_PER_SECOND": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The risk of the current Lambda function not meeting the performance needs\n of its workloads. The higher the risk, the more likely the current Lambda\n function requires more memory.

" + "smithy.api#enumValue": "NETWORK_PACKETS_OUT_PER_SECOND" } }, - "tags": { - "target": "com.amazonaws.computeoptimizer#Tags", + "GPU_PERCENTAGE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n A list of tags assigned to your Lambda function recommendations.\n

" + "smithy.api#enumValue": "GPU_PERCENTAGE" } }, - "effectiveRecommendationPreferences": { - "target": "com.amazonaws.computeoptimizer#LambdaEffectiveRecommendationPreferences", + "GPU_MEMORY_PERCENTAGE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n Describes the effective recommendation preferences for Lambda functions.\n

" + "smithy.api#enumValue": "GPU_MEMORY_PERCENTAGE" } } - }, - "traits": { - "smithy.api#documentation": "

Describes an Lambda function recommendation.

" } }, - "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilter": { + "com.amazonaws.computeoptimizer#MetricProviderArn": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#MetricSource": { "type": "structure", "members": { - "name": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilterName", + "provider": { + "target": "com.amazonaws.computeoptimizer#MetricSourceProvider", "traits": { - "smithy.api#documentation": "

The name of the filter.

\n

Specify Finding to return recommendations with a specific finding\n classification (for example, NotOptimized).

\n

Specify FindingReasonCode to return recommendations with a specific\n finding reason code (for example, MemoryUnderprovisioned).

\n

You can filter your Lambda function recommendations by tag:key \n and tag-key tags.

\n

A tag:key is a key and value combination of a tag assigned to your \n Lambda function recommendations. Use the tag key in the filter name and the tag value \n as the filter value. For example, to find all Lambda function recommendations that have \n a tag with the key of Owner and the value of TeamA, \n specify tag:Owner for the filter name and TeamA for the filter value.

\n

A tag-key is the key of a tag assigned to your Lambda function recommendations. Use \n this filter to find all of your Lambda function recommendations that have a tag with a \n specific key. This doesn’t consider the tag value. For example, you can find \n your Lambda function recommendations with a tag key value of Owner or without any tag \n keys assigned.

" + "smithy.api#documentation": "

\n The name of the metric source provider.\n

" } }, - "values": { - "target": "com.amazonaws.computeoptimizer#FilterValues", + "providerArn": { + "target": "com.amazonaws.computeoptimizer#MetricProviderArn", "traits": { - "smithy.api#documentation": "

The value of the filter.

\n

The valid values for this parameter are as follows, depending on what you specify for\n the name parameter:

\n
    \n
  • \n

    Specify Optimized, NotOptimized, or\n Unavailable if you specify the name parameter as\n Finding.

    \n
  • \n
  • \n

    Specify MemoryOverprovisioned,\n MemoryUnderprovisioned, InsufficientData, or\n Inconclusive if you specify the name parameter as\n FindingReasonCode.

    \n
  • \n
" + "smithy.api#documentation": "

\n The ARN of the metric source provider.\n

" } } }, "traits": { - "smithy.api#documentation": "

Describes a filter that returns a more specific list of Lambda\n function recommendations. Use this filter with the GetLambdaFunctionRecommendations action.

\n

You can use EBSFilter with the GetEBSVolumeRecommendations action, JobFilter with the\n DescribeRecommendationExportJobs action, and Filter\n with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

" + "smithy.api#documentation": "

\n The list of metric sources required to generate recommendations for commercial software licenses.\n

" } }, - "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilterName": { + "com.amazonaws.computeoptimizer#MetricSourceProvider": { "type": "enum", "members": { - "FINDING": { + "CloudWatchAppInsights": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Finding" + "smithy.api#enumValue": "CloudWatchApplicationInsights" + } + } + } + }, + "com.amazonaws.computeoptimizer#MetricStatistic": { + "type": "enum", + "members": { + "MAXIMUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Maximum" } }, - "FINDING_REASON_CODE": { + "AVERAGE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "FindingReasonCode" + "smithy.api#enumValue": "Average" } } } }, - "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilters": { + "com.amazonaws.computeoptimizer#MetricValue": { + "type": "double", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.computeoptimizer#MetricValues": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilter" + "target": "com.amazonaws.computeoptimizer#MetricValue" } }, - "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFinding": { + "com.amazonaws.computeoptimizer#MetricsSource": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#MetricSource" + } + }, + "com.amazonaws.computeoptimizer#MigrationEffort": { "type": "enum", "members": { - "OPTIMIZED": { + "VERY_LOW": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Optimized" + "smithy.api#enumValue": "VeryLow" } }, - "NOT_OPTIMIZED": { + "LOW": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "NotOptimized" + "smithy.api#enumValue": "Low" } }, - "UNAVAILABLE": { + "MEDIUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Unavailable" + "smithy.api#enumValue": "Medium" + } + }, + "HIGH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "High" } } } }, - "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFindingReasonCode": { + "com.amazonaws.computeoptimizer#MinSize": { + "type": "integer", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.computeoptimizer#MissingAuthenticationToken": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.computeoptimizer#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The request must contain either a valid (registered) Amazon Web Services access key ID\n or X.509 certificate.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.computeoptimizer#NextToken": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#NullableCpu": { + "type": "integer" + }, + "com.amazonaws.computeoptimizer#NullableIOPS": { + "type": "integer" + }, + "com.amazonaws.computeoptimizer#NullableMaxAllocatedStorage": { + "type": "integer" + }, + "com.amazonaws.computeoptimizer#NullableMemory": { + "type": "integer" + }, + "com.amazonaws.computeoptimizer#NullableMemoryReservation": { + "type": "integer" + }, + "com.amazonaws.computeoptimizer#NullableStorageThroughput": { + "type": "integer" + }, + "com.amazonaws.computeoptimizer#NumberOfCores": { + "type": "integer", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.computeoptimizer#NumberOfInvocations": { + "type": "long", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.computeoptimizer#NumberOfMemberAccountsOptedIn": { + "type": "integer" + }, + "com.amazonaws.computeoptimizer#OperatingSystem": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#OptInRequiredException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.computeoptimizer#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The account is not opted in to Compute Optimizer.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.computeoptimizer#PerformanceRisk": { + "type": "double", + "traits": { + "smithy.api#default": 0, + "smithy.api#range": { + "min": 0, + "max": 4 + } + } + }, + "com.amazonaws.computeoptimizer#Period": { + "type": "integer", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.computeoptimizer#PlatformDifference": { "type": "enum", "members": { - "MEMORY_OVER_PROVISIONED": { + "HYPERVISOR": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "MemoryOverprovisioned" + "smithy.api#enumValue": "Hypervisor" } }, - "MEMORY_UNDER_PROVISIONED": { + "NETWORK_INTERFACE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "MemoryUnderprovisioned" + "smithy.api#enumValue": "NetworkInterface" } }, - "INSUFFICIENT_DATA": { + "STORAGE_INTERFACE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "InsufficientData" + "smithy.api#enumValue": "StorageInterface" } }, - "INCONCLUSIVE": { + "INSTANCE_STORE_AVAILABILITY": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Inconclusive" + "smithy.api#enumValue": "InstanceStoreAvailability" + } + }, + "VIRTUALIZATION_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VirtualizationType" + } + }, + "ARCHITECTURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Architecture" } } } }, - "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFindingReasonCodes": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFindingReasonCode" - } - }, - "com.amazonaws.computeoptimizer#LambdaFunctionRecommendations": { + "com.amazonaws.computeoptimizer#PlatformDifferences": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendation" + "target": "com.amazonaws.computeoptimizer#PlatformDifference" } }, - "com.amazonaws.computeoptimizer#LambdaFunctionUtilizationMetric": { + "com.amazonaws.computeoptimizer#PreferredResource": { "type": "structure", "members": { "name": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionMetricName", + "target": "com.amazonaws.computeoptimizer#PreferredResourceName", "traits": { - "smithy.api#documentation": "

The name of the utilization metric.

\n

The following utilization metrics are available:

\n
    \n
  • \n

    \n Duration - The amount of time that your function code spends\n processing an event.

    \n
  • \n
  • \n

    \n Memory - The amount of memory used per invocation.

    \n
  • \n
" + "smithy.api#documentation": "

\n The type of preferred resource to customize.\n

\n \n

Compute Optimizer only supports the customization of Ec2InstanceTypes.

\n
" } }, - "statistic": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionMetricStatistic", + "includeList": { + "target": "com.amazonaws.computeoptimizer#PreferredResourceValues", "traits": { - "smithy.api#documentation": "

The statistic of the utilization metric.

\n

The Compute Optimizer API, Command Line Interface (CLI), and SDKs\n return utilization metrics using only the Maximum statistic, which is the\n highest value observed during the specified period.

\n

The Compute Optimizer console displays graphs for some utilization metrics using the\n Average statistic, which is the value of Sum /\n SampleCount during the specified period. For more information, see\n Viewing resource\n recommendations in the Compute Optimizer User\n Guide. You can also get averaged utilization metric data for your resources\n using Amazon CloudWatch. For more information, see the Amazon CloudWatch\n User Guide.

" + "smithy.api#documentation": "

\n The preferred resource type values to include in the recommendation candidates. You can specify the exact resource type value, \n such as m5.large, or use wild card expressions, such as m5. If this isn’t specified, all supported resources are included by default. \n You can specify up to 1000 values in this list.\n

" } }, - "value": { - "target": "com.amazonaws.computeoptimizer#MetricValue", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The value of the utilization metric.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

Describes a utilization metric of an Lambda function.

" - } - }, - "com.amazonaws.computeoptimizer#LambdaFunctionUtilizationMetrics": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#LambdaFunctionUtilizationMetric" - } - }, - "com.amazonaws.computeoptimizer#LambdaSavingsEstimationMode": { - "type": "structure", - "members": { - "source": { - "target": "com.amazonaws.computeoptimizer#LambdaSavingsEstimationModeSource", + "excludeList": { + "target": "com.amazonaws.computeoptimizer#PreferredResourceValues", "traits": { - "smithy.api#documentation": "

\n Describes the source for calculation of savings opportunity for Lambda functions.\n

" + "smithy.api#documentation": "

\n The preferred resource type values to exclude from the recommendation candidates. If this isn’t specified, all supported \n resources are included by default. You can specify up to 1000 values in this list.\n

" } } }, "traits": { - "smithy.api#documentation": "

\n Describes the savings estimation used for calculating savings opportunity for Lambda functions.\n

" + "smithy.api#documentation": "

\n The preference to control which resource type values are considered when generating rightsizing recommendations. \n You can specify this preference as a combination of include and exclude lists. You must specify either an \n includeList or excludeList. If the preference is an empty set of resource type values, \n an error occurs. For more information, see \n Rightsizing recommendation preferences in the Compute Optimizer User\n Guide.\n

\n \n
    \n
  • \n

    This preference is only available for the Amazon EC2 instance and Auto Scaling group resource types.

    \n
  • \n
  • \n

    Compute Optimizer only supports the customization of Ec2InstanceTypes.

    \n
  • \n
\n
" } }, - "com.amazonaws.computeoptimizer#LambdaSavingsEstimationModeSource": { + "com.amazonaws.computeoptimizer#PreferredResourceName": { "type": "enum", "members": { - "PUBLIC_PRICING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PublicPricing" - } - }, - "COST_EXPLORER_RIGHTSIZING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CostExplorerRightsizing" - } - }, - "COST_OPTIMIZATION_HUB": { + "EC2_INSTANCE_TYPES": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "CostOptimizationHub" + "smithy.api#enumValue": "Ec2InstanceTypes" } } } }, - "com.amazonaws.computeoptimizer#LambdaSavingsOpportunityAfterDiscounts": { + "com.amazonaws.computeoptimizer#PreferredResourceValue": { + "type": "string" + }, + "com.amazonaws.computeoptimizer#PreferredResourceValues": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#PreferredResourceValue" + } + }, + "com.amazonaws.computeoptimizer#PreferredResources": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#PreferredResource" + } + }, + "com.amazonaws.computeoptimizer#ProjectedMetric": { "type": "structure", "members": { - "savingsOpportunityPercentage": { - "target": "com.amazonaws.computeoptimizer#SavingsOpportunityPercentage", + "name": { + "target": "com.amazonaws.computeoptimizer#MetricName", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s Lambda \n function recommendations. This includes any applicable Savings Plans discounts.\n

" + "smithy.api#documentation": "

The name of the projected utilization metric.

\n

The following projected utilization metrics are returned:

\n
    \n
  • \n

    \n Cpu - The projected percentage of allocated EC2 compute units\n that would be in use on the recommendation option had you used that resource\n during the analyzed period. This metric identifies the processing power required\n to run an application on the recommendation option.

    \n

    Depending on the instance type, tools in your operating system can show a\n lower percentage than CloudWatch when the instance is not allocated a full\n processor core.

    \n
  • \n
  • \n

    \n Memory - The percentage of memory that would be in use on the\n recommendation option had you used that resource during the analyzed period.\n This metric identifies the amount of memory required to run an application on\n the recommendation option.

    \n

    Units: Percent

    \n \n

    The Memory metric is only returned for resources with\n the unified CloudWatch agent installed on them. For more information,\n see Enabling Memory\n Utilization with the CloudWatch Agent.

    \n
    \n
  • \n
  • \n

    \n GPU - The projected percentage of allocated GPUs if you adjust your\n configurations to Compute Optimizer's recommendation option.

    \n
  • \n
  • \n

    \n GPU_MEMORY - The projected percentage of total GPU memory if you adjust your\n configurations to Compute Optimizer's recommendation option.

    \n \n

    The GPU and GPU_MEMORY metrics are only returned for resources \n with the unified CloudWatch Agent installed on them. For more information, see \n Enabling NVIDIA GPU \n utilization with the CloudWatch Agent.

    \n
    \n
  • \n
" } }, - "estimatedMonthlySavings": { - "target": "com.amazonaws.computeoptimizer#LambdaEstimatedMonthlySavings", + "timestamps": { + "target": "com.amazonaws.computeoptimizer#Timestamps", "traits": { - "smithy.api#documentation": "

\n The estimated monthly savings possible by adopting Compute Optimizer’s Lambda function recommendations. This \n includes any applicable Savings Plans discounts.\n

" + "smithy.api#documentation": "

The timestamps of the projected utilization metric.

" + } + }, + "values": { + "target": "com.amazonaws.computeoptimizer#MetricValues", + "traits": { + "smithy.api#documentation": "

The values of the projected utilization metrics.

" } } }, "traits": { - "smithy.api#documentation": "

\n Describes the savings opportunity for Lambda functions recommendations after applying Savings Plans discounts.\n

\n

Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. \n You can achieve this by implementing a given Compute Optimizer recommendation.

" + "smithy.api#documentation": "

Describes a projected utilization metric of a recommendation option, such as an\n Amazon EC2 instance. This represents the projected utilization of a\n recommendation option had you used that resource during the analyzed period.

\n

Compare the utilization metric data of your resource against its projected utilization\n metric data to determine the performance difference between your current resource and\n the recommended option.

\n \n

The Cpu, Memory, GPU, and GPU_MEMORY metrics \n are the only projected utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, these\n metrics are only returned for resources with the unified\n CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent and\n Enabling NVIDIA GPU \n utilization with the CloudWatch Agent.

\n
" } }, - "com.amazonaws.computeoptimizer#LastRefreshTimestamp": { - "type": "timestamp" + "com.amazonaws.computeoptimizer#ProjectedMetrics": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#ProjectedMetric" + } }, - "com.amazonaws.computeoptimizer#LastUpdatedTimestamp": { - "type": "timestamp" + "com.amazonaws.computeoptimizer#ProjectedUtilizationMetrics": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#UtilizationMetric" + } }, - "com.amazonaws.computeoptimizer#LicenseConfiguration": { + "com.amazonaws.computeoptimizer#PutRecommendationPreferences": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#PutRecommendationPreferencesRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#PutRecommendationPreferencesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a new recommendation preference or updates an existing recommendation\n preference, such as enhanced infrastructure metrics.

\n

For more information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

" + } + }, + "com.amazonaws.computeoptimizer#PutRecommendationPreferencesRequest": { "type": "structure", "members": { - "numberOfCores": { - "target": "com.amazonaws.computeoptimizer#NumberOfCores", + "resourceType": { + "target": "com.amazonaws.computeoptimizer#ResourceType", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n The current number of cores associated with the instance.\n

" + "smithy.api#documentation": "

The target resource type of the recommendation preference to create.

\n

The Ec2Instance option encompasses standalone instances and instances\n that are part of Auto Scaling groups. The AutoScalingGroup option\n encompasses only instances that are part of an Auto Scaling group.

", + "smithy.api#required": {} } }, - "instanceType": { - "target": "com.amazonaws.computeoptimizer#InstanceType", + "scope": { + "target": "com.amazonaws.computeoptimizer#Scope", "traits": { - "smithy.api#documentation": "

\n The instance type used in the license.\n

" + "smithy.api#documentation": "

An object that describes the scope of the recommendation preference to create.

\n

You can create recommendation preferences at the organization level (for management\n accounts of an organization only), account level, and resource level. For more\n information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

\n \n

You cannot create recommendation preferences for Auto Scaling groups at the\n organization and account levels. You can create recommendation preferences for\n Auto Scaling groups only at the resource level by specifying a scope name\n of ResourceArn and a scope value of the Auto Scaling group Amazon\n Resource Name (ARN). This will configure the preference for all instances that are\n part of the specified Auto Scaling group. You also cannot create recommendation\n preferences at the resource level for instances that are part of an Auto Scaling group. You can create recommendation preferences at the resource level only for\n standalone instances.

\n
" } }, - "operatingSystem": { - "target": "com.amazonaws.computeoptimizer#OperatingSystem", + "enhancedInfrastructureMetrics": { + "target": "com.amazonaws.computeoptimizer#EnhancedInfrastructureMetrics", "traits": { - "smithy.api#documentation": "

\n The operating system of the instance. \n

" + "smithy.api#documentation": "

The status of the enhanced infrastructure metrics recommendation preference to create\n or update.

\n

Specify the Active status to activate the preference, or specify\n Inactive to deactivate the preference.

\n

For more information, see Enhanced\n infrastructure metrics in the Compute Optimizer User\n Guide.

" } }, - "licenseEdition": { - "target": "com.amazonaws.computeoptimizer#LicenseEdition", + "inferredWorkloadTypes": { + "target": "com.amazonaws.computeoptimizer#InferredWorkloadTypesPreference", "traits": { - "smithy.api#documentation": "

\n The edition of the license for the application that runs on the instance.\n

" + "smithy.api#documentation": "

The status of the inferred workload types recommendation preference to create or\n update.

\n \n

The inferred workload type feature is active by default. To deactivate it, create\n a recommendation preference.

\n
\n

Specify the Inactive status to deactivate the feature, or specify\n Active to activate it.

\n

For more information, see Inferred workload\n types in the Compute Optimizer User Guide.

" } }, - "licenseName": { - "target": "com.amazonaws.computeoptimizer#LicenseName", + "externalMetricsPreference": { + "target": "com.amazonaws.computeoptimizer#ExternalMetricsPreference", "traits": { - "smithy.api#documentation": "

\n The name of the license for the application that runs on the instance.\n

" + "smithy.api#documentation": "

The provider of the external metrics recommendation preference to create or\n update.

\n

Specify a valid provider in the source field to activate the preference.\n To delete this preference, see the DeleteRecommendationPreferences\n action.

\n

This preference can only be set for the Ec2Instance resource type.

\n

For more information, see External metrics\n ingestion in the Compute Optimizer User\n Guide.

" } }, - "licenseModel": { - "target": "com.amazonaws.computeoptimizer#LicenseModel", + "lookBackPeriod": { + "target": "com.amazonaws.computeoptimizer#LookBackPeriodPreference", "traits": { - "smithy.api#documentation": "

\n The license type associated with the instance.\n

" + "smithy.api#documentation": "

\n The preference to control the number of days the utilization metrics of the Amazon Web Services resource are analyzed. \n When this preference isn't specified, we use the default value DAYS_14.\n

\n \n

You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types.

\n
" } }, - "licenseVersion": { - "target": "com.amazonaws.computeoptimizer#LicenseVersion", + "utilizationPreferences": { + "target": "com.amazonaws.computeoptimizer#UtilizationPreferences", "traits": { - "smithy.api#documentation": "

\n The version of the license for the application that runs on the instance.\n

" + "smithy.api#documentation": "

\n The preference to control the resource’s CPU utilization threshold, CPU utilization headroom, and memory utilization headroom. When this \n preference isn't specified, we use the following default values.\n

\n

CPU utilization:

\n
    \n
  • \n

    \n P99_5 for threshold

    \n
  • \n
  • \n

    \n PERCENT_20 for headroom

    \n
  • \n
\n

Memory utilization:

\n
    \n
  • \n

    \n PERCENT_20 for headroom

    \n
  • \n
\n \n
    \n
  • \n

    You can only set CPU and memory utilization preferences for the Amazon EC2 instance resource type.

    \n
  • \n
  • \n

    The threshold setting isn’t available for memory utilization.

    \n
  • \n
\n
" } }, - "metricsSource": { - "target": "com.amazonaws.computeoptimizer#MetricsSource", + "preferredResources": { + "target": "com.amazonaws.computeoptimizer#PreferredResources", "traits": { - "smithy.api#documentation": "

\n The list of metric sources required to generate recommendations for commercial software licenses.\n

" + "smithy.api#documentation": "

\n The preference to control which resource type values are considered when generating rightsizing recommendations. \n You can specify this preference as a combination of include and exclude lists. You must specify either an \n includeList or excludeList. If the preference is an empty set of resource type values, \n an error occurs.\n

\n \n

You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types.

\n
" + } + }, + "savingsEstimationMode": { + "target": "com.amazonaws.computeoptimizer#SavingsEstimationMode", + "traits": { + "smithy.api#documentation": "

\n The status of the savings estimation mode preference to create or update.\n

\n

Specify the AfterDiscounts status to activate the preference, or specify BeforeDiscounts to deactivate the preference.

\n

Only the account manager or delegated administrator of your organization can activate this preference.

\n

For more information, see \n Savings estimation mode in the Compute Optimizer User Guide.

" } } }, "traits": { - "smithy.api#documentation": "

\n Describes the configuration of a license for an Amazon EC2 instance.\n

" + "smithy.api#input": {} } }, - "com.amazonaws.computeoptimizer#LicenseEdition": { - "type": "enum", - "members": { - "ENTERPRISE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "Enterprise" - } - }, - "STANDARD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "Standard" - } - }, - "FREE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "Free" - } - }, - "NO_LICENSE_EDITION_FOUND": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NoLicenseEditionFound" - } - } + "com.amazonaws.computeoptimizer#PutRecommendationPreferencesResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} } }, - "com.amazonaws.computeoptimizer#LicenseFinding": { - "type": "enum", + "com.amazonaws.computeoptimizer#RDSDBInstanceRecommendationOption": { + "type": "structure", "members": { - "INSUFFICIENT_METRICS": { - "target": "smithy.api#Unit", + "dbInstanceClass": { + "target": "com.amazonaws.computeoptimizer#DBInstanceClass", "traits": { - "smithy.api#enumValue": "InsufficientMetrics" + "smithy.api#documentation": "

\n Describes the DB instance class recommendation option for your Amazon RDS instance.\n

" } }, - "OPTIMIZED": { - "target": "smithy.api#Unit", + "projectedUtilizationMetrics": { + "target": "com.amazonaws.computeoptimizer#RDSDBProjectedUtilizationMetrics", "traits": { - "smithy.api#enumValue": "Optimized" + "smithy.api#documentation": "

\n An array of objects that describe the projected utilization metrics of the RDS instance recommendation option.\n

" } }, - "NOT_OPTIMIZED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NotOptimized" - } - } - } - }, - "com.amazonaws.computeoptimizer#LicenseFindingReasonCode": { - "type": "enum", - "members": { - "CW_APP_INSIGHTS_DISABLED": { - "target": "smithy.api#Unit", + "performanceRisk": { + "target": "com.amazonaws.computeoptimizer#PerformanceRisk", "traits": { - "smithy.api#enumValue": "InvalidCloudWatchApplicationInsightsSetup" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The performance risk of the RDS instance recommendation option.\n

" } }, - "CW_APP_INSIGHTS_ERROR": { - "target": "smithy.api#Unit", + "rank": { + "target": "com.amazonaws.computeoptimizer#Rank", "traits": { - "smithy.api#enumValue": "CloudWatchApplicationInsightsError" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The rank identifier of the RDS instance recommendation option.\n

" } }, - "LICENSE_OVER_PROVISIONED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LicenseOverprovisioned" - } + "savingsOpportunity": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunity" }, - "OPTIMIZED": { - "target": "smithy.api#Unit", + "savingsOpportunityAfterDiscounts": { + "target": "com.amazonaws.computeoptimizer#RDSInstanceSavingsOpportunityAfterDiscounts", "traits": { - "smithy.api#enumValue": "Optimized" + "smithy.api#documentation": "

\n Describes the savings opportunity for Amazon RDS recommendations or for the recommendation option.\n

\n

Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. \n You can achieve this by implementing a given Compute Optimizer recommendation.

" } } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the recommendation options for an Amazon RDS instance.\n

" } }, - "com.amazonaws.computeoptimizer#LicenseFindingReasonCodes": { + "com.amazonaws.computeoptimizer#RDSDBInstanceRecommendationOptions": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#LicenseFindingReasonCode" - } - }, - "com.amazonaws.computeoptimizer#LicenseModel": { - "type": "enum", - "members": { - "LICENSE_INCLUDED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LicenseIncluded" - } - }, - "BRING_YOUR_OWN_LICENSE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BringYourOwnLicense" - } - } + "target": "com.amazonaws.computeoptimizer#RDSDBInstanceRecommendationOption" } }, - "com.amazonaws.computeoptimizer#LicenseName": { + "com.amazonaws.computeoptimizer#RDSDBMetricName": { "type": "enum", "members": { - "SQLSERVER": { + "CPU": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "SQLServer" - } - } - } - }, - "com.amazonaws.computeoptimizer#LicenseRecommendation": { - "type": "structure", - "members": { - "resourceArn": { - "target": "com.amazonaws.computeoptimizer#ResourceArn", - "traits": { - "smithy.api#documentation": "

\n The ARN that identifies the Amazon EC2 instance.\n

" - } - }, - "accountId": { - "target": "com.amazonaws.computeoptimizer#AccountId", - "traits": { - "smithy.api#documentation": "

\n The Amazon Web Services account ID of the license. \n

" + "smithy.api#enumValue": "CPU" } }, - "currentLicenseConfiguration": { - "target": "com.amazonaws.computeoptimizer#LicenseConfiguration", + "MEMORY": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n An object that describes the current configuration of an instance that runs on a license.\n

" + "smithy.api#enumValue": "Memory" } }, - "lookbackPeriodInDays": { - "target": "com.amazonaws.computeoptimizer#LookBackPeriodInDays", + "EBS_VOLUME_STORAGE_SPACE_UTILIZATION": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n The number of days for which utilization metrics were analyzed for an instance that runs on a license.\n

" + "smithy.api#enumValue": "EBSVolumeStorageSpaceUtilization" } }, - "lastRefreshTimestamp": { - "target": "com.amazonaws.computeoptimizer#LastRefreshTimestamp", + "NETWORK_RECEIVE_THROUGHPUT": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n The timestamp of when the license recommendation was last generated.\n

" + "smithy.api#enumValue": "NetworkReceiveThroughput" } }, - "finding": { - "target": "com.amazonaws.computeoptimizer#LicenseFinding", + "NETWORK_TRANSMIT_THROUGHPUT": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n The finding classification for an instance that runs on a license.\n

\n

Findings include:

\n
    \n
  • \n

    \n InsufficentMetrics — When Compute Optimizer detects that your CloudWatch\n Application Insights isn't enabled or is enabled with insufficient permissions.

    \n
  • \n
  • \n

    \n NotOptimized — When Compute Optimizer detects that your EC2 infrastructure \n isn't using any of the SQL server license features you're paying for, a license is considered \n not optimized.

    \n
  • \n
  • \n

    \n Optimized — When Compute Optimizer detects that all specifications of your \n license meet the performance requirements of your workload.

    \n
  • \n
" + "smithy.api#enumValue": "NetworkTransmitThroughput" } }, - "findingReasonCodes": { - "target": "com.amazonaws.computeoptimizer#LicenseFindingReasonCodes", + "EBS_VOLUME_READ_IOPS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n The reason for the finding classification for an instance that runs on a license.\n

\n

Finding reason codes include:

\n
    \n
  • \n

    \n Optimized — All specifications of your \n license meet the performance requirements of your workload.

    \n
  • \n
  • \n

    \n LicenseOverprovisioned — A license is considered over-provisioned when your license can be \n downgraded while still meeting the performance requirements of your workload.

    \n
  • \n
  • \n

    \n InvalidCloudwatchApplicationInsights — CloudWatch\n Application Insights isn't configured properly.

    \n
  • \n
  • \n

    \n CloudwatchApplicationInsightsError — There is a CloudWatch\n Application Insights error.

    \n
  • \n
" + "smithy.api#enumValue": "EBSVolumeReadIOPS" } }, - "licenseRecommendationOptions": { - "target": "com.amazonaws.computeoptimizer#LicenseRecommendationOptions", + "EBS_VOLUME_WRITE_IOPS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n An array of objects that describe the license recommendation options.\n

" + "smithy.api#enumValue": "EBSVolumeWriteIOPS" } }, - "tags": { - "target": "com.amazonaws.computeoptimizer#Tags", + "EBS_VOLUME_READ_THROUGHPUT": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n A list of tags assigned to an EC2 instance.\n

" + "smithy.api#enumValue": "EBSVolumeReadThroughput" } - } - }, - "traits": { - "smithy.api#documentation": "

\n Describes a license recommendation for an EC2 instance.\n

" - } - }, - "com.amazonaws.computeoptimizer#LicenseRecommendationFilter": { - "type": "structure", - "members": { - "name": { - "target": "com.amazonaws.computeoptimizer#LicenseRecommendationFilterName", + }, + "EBS_VOLUME_WRITE_THROUGHPUT": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The name of the filter.

\n

Specify Finding to return recommendations with a specific finding\n classification.

\n

Specify FindingReasonCode to return recommendations with a specific\n finding reason code.

\n

You can filter your license recommendations by tag:key \n and tag-key tags.

\n

A tag:key is a key and value combination of a tag assigned to your \n license recommendations. Use the tag key in the filter name and the tag value \n as the filter value. For example, to find all license recommendations that have \n a tag with the key of Owner and the value of TeamA, \n specify tag:Owner for the filter name and TeamA for the filter value.

\n

A tag-key is the key of a tag assigned to your license recommendations. Use \n this filter to find all of your license recommendations that have a tag with a \n specific key. This doesn’t consider the tag value. For example, you can find \n your license recommendations with a tag key value of Owner or without any tag \n keys assigned.

" + "smithy.api#enumValue": "EBSVolumeWriteThroughput" } }, - "values": { - "target": "com.amazonaws.computeoptimizer#FilterValues", + "DATABASE_CONNECTIONS": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The value of the filter.

\n

The valid values for this parameter are as follows, depending on what you specify for\n the name parameter:

\n
    \n
  • \n

    If you specify the name parameter as\n Finding, then specify Optimized, NotOptimized, or\n InsufficentMetrics.

    \n
  • \n
  • \n

    If you specify the name parameter as\n FindingReasonCode, then specify Optimized,\n LicenseOverprovisioned, InvalidCloudwatchApplicationInsights, or\n CloudwatchApplicationInsightsError.

    \n
  • \n
" + "smithy.api#enumValue": "DatabaseConnections" } } - }, - "traits": { - "smithy.api#documentation": "

\n Describes a filter that returns a more specific list of license recommendations. Use this filter \n with the GetLicenseRecommendation action.\n

" } }, - "com.amazonaws.computeoptimizer#LicenseRecommendationFilterName": { + "com.amazonaws.computeoptimizer#RDSDBMetricStatistic": { "type": "enum", "members": { - "LICENSE_FINDING": { + "MAXIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Finding" + "smithy.api#enumValue": "Maximum" } }, - "LICENSE_FINDING_REASON_CODE": { + "MINIMUM": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "FindingReasonCode" + "smithy.api#enumValue": "Minimum" } }, - "LICENSE_NAME": { + "AVERAGE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "LicenseName" + "smithy.api#enumValue": "Average" } } } }, - "com.amazonaws.computeoptimizer#LicenseRecommendationFilters": { + "com.amazonaws.computeoptimizer#RDSDBProjectedUtilizationMetrics": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#LicenseRecommendationFilter" + "target": "com.amazonaws.computeoptimizer#RDSDBUtilizationMetric" } }, - "com.amazonaws.computeoptimizer#LicenseRecommendationOption": { + "com.amazonaws.computeoptimizer#RDSDBRecommendation": { "type": "structure", "members": { - "rank": { - "target": "com.amazonaws.computeoptimizer#Rank", + "resourceArn": { + "target": "com.amazonaws.computeoptimizer#ResourceArn", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n The rank of the license recommendation option.\n

\n

\n The top recommendation option is ranked as 1.\n

" + "smithy.api#documentation": "

\n The ARN of the current Amazon RDS.\n

\n

\n The following is the format of the ARN:\n

\n

\n arn:aws:rds:{region}:{accountId}:db:{resourceName}\n

" } }, - "operatingSystem": { - "target": "com.amazonaws.computeoptimizer#OperatingSystem", + "accountId": { + "target": "com.amazonaws.computeoptimizer#AccountId", "traits": { - "smithy.api#documentation": "

\n The operating system of a license recommendation option.\n

" + "smithy.api#documentation": "

\n The Amazon Web Services account ID of the Amazon RDS.\n

" } }, - "licenseEdition": { - "target": "com.amazonaws.computeoptimizer#LicenseEdition", + "engine": { + "target": "com.amazonaws.computeoptimizer#Engine", "traits": { - "smithy.api#documentation": "

\n The recommended edition of the license for the application that runs on the instance.\n

" + "smithy.api#documentation": "

\n The engine of the RDS instance.\n

" } }, - "licenseModel": { - "target": "com.amazonaws.computeoptimizer#LicenseModel", + "engineVersion": { + "target": "com.amazonaws.computeoptimizer#EngineVersion", "traits": { - "smithy.api#documentation": "

\n The recommended license type associated with the instance.\n

" + "smithy.api#documentation": "

\n The database engine version.\n

" } }, - "savingsOpportunity": { - "target": "com.amazonaws.computeoptimizer#SavingsOpportunity" - } - }, - "traits": { - "smithy.api#documentation": "

\n Describes the recommendation options for licenses.\n

" - } - }, - "com.amazonaws.computeoptimizer#LicenseRecommendationOptions": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#LicenseRecommendationOption" - } - }, - "com.amazonaws.computeoptimizer#LicenseRecommendations": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#LicenseRecommendation" - } - }, - "com.amazonaws.computeoptimizer#LicenseVersion": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#LimitExceededException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.computeoptimizer#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

The request exceeds a limit of the service.

", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.computeoptimizer#LookBackPeriodInDays": { - "type": "double", - "traits": { - "smithy.api#default": 0 - } - }, - "com.amazonaws.computeoptimizer#LookBackPeriodPreference": { - "type": "enum", - "members": { - "DAYS_14": { - "target": "smithy.api#Unit", + "currentDBInstanceClass": { + "target": "com.amazonaws.computeoptimizer#CurrentDBInstanceClass", "traits": { - "smithy.api#enumValue": "DAYS_14" + "smithy.api#documentation": "

\n The DB instance class of the current RDS instance.\n

" } }, - "DAYS_32": { - "target": "smithy.api#Unit", + "currentStorageConfiguration": { + "target": "com.amazonaws.computeoptimizer#DBStorageConfiguration", "traits": { - "smithy.api#enumValue": "DAYS_32" + "smithy.api#documentation": "

\n The configuration of the current RDS storage.\n

" } }, - "DAYS_93": { - "target": "smithy.api#Unit", + "idle": { + "target": "com.amazonaws.computeoptimizer#Idle", "traits": { - "smithy.api#enumValue": "DAYS_93" + "smithy.api#documentation": "

\n This indicates if the RDS instance is idle or not.\n

" } - } - } - }, - "com.amazonaws.computeoptimizer#Low": { - "type": "long", - "traits": { - "smithy.api#default": 0 - } - }, - "com.amazonaws.computeoptimizer#LowerBoundValue": { - "type": "double", - "traits": { - "smithy.api#default": 0 - } - }, - "com.amazonaws.computeoptimizer#MaxResults": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 0, - "max": 1000 - } - } - }, - "com.amazonaws.computeoptimizer#MaxSize": { - "type": "integer", - "traits": { - "smithy.api#default": 0 - } - }, - "com.amazonaws.computeoptimizer#Medium": { - "type": "long", - "traits": { - "smithy.api#default": 0 - } - }, - "com.amazonaws.computeoptimizer#MemberAccountsEnrolled": { - "type": "boolean", - "traits": { - "smithy.api#default": false - } - }, - "com.amazonaws.computeoptimizer#MemorySize": { - "type": "integer", - "traits": { - "smithy.api#default": 0 - } - }, - "com.amazonaws.computeoptimizer#MemorySizeConfiguration": { - "type": "structure", - "members": { - "memory": { - "target": "com.amazonaws.computeoptimizer#NullableMemory", + }, + "instanceFinding": { + "target": "com.amazonaws.computeoptimizer#RDSInstanceFinding", "traits": { - "smithy.api#documentation": "

\n The amount of memory in the container.\n

" + "smithy.api#documentation": "

\n The finding classification of an Amazon RDS instance.\n

\n

Findings for Amazon RDS instance include:

\n
    \n
  • \n

    \n \n Underprovisioned\n — \n When Compute Optimizer detects that there’s not enough resource specifications, an Amazon RDS \n is considered under-provisioned.

    \n
  • \n
  • \n

    \n \n Overprovisioned\n — \n When Compute Optimizer detects that there’s excessive resource specifications, an Amazon RDS \n is considered over-provisioned.

    \n
  • \n
  • \n

    \n \n Optimized\n — \n When the specifications of your Amazon RDS instance meet the performance requirements \n of your workload, the service is considered optimized.

    \n
  • \n
" } }, - "memoryReservation": { - "target": "com.amazonaws.computeoptimizer#NullableMemoryReservation", + "storageFinding": { + "target": "com.amazonaws.computeoptimizer#RDSStorageFinding", "traits": { - "smithy.api#documentation": "

\n The limit of memory reserve for the container.\n

" + "smithy.api#documentation": "

\n The finding classification of Amazon RDS storage.\n

\n

Findings for Amazon RDS instance include:

\n
    \n
  • \n

    \n \n Underprovisioned\n — \n When Compute Optimizer detects that there’s not enough storage, an Amazon RDS \n is considered under-provisioned.

    \n
  • \n
  • \n

    \n \n Overprovisioned\n — \n When Compute Optimizer detects that there’s excessive storage, an Amazon RDS \n is considered over-provisioned.

    \n
  • \n
  • \n

    \n \n Optimized\n — \n When the storage of your Amazon RDS meet the performance requirements \n of your workload, the service is considered optimized.

    \n
  • \n
" } - } - }, - "traits": { - "smithy.api#documentation": "

\n The memory size configurations of a container.\n

" - } - }, - "com.amazonaws.computeoptimizer#Message": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#MetadataKey": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#MetricName": { - "type": "enum", - "members": { - "CPU": { - "target": "smithy.api#Unit", + }, + "instanceFindingReasonCodes": { + "target": "com.amazonaws.computeoptimizer#RDSInstanceFindingReasonCodes", "traits": { - "smithy.api#enumValue": "Cpu" + "smithy.api#documentation": "

\n The reason for the finding classification of an Amazon RDS instance.\n

" } }, - "MEMORY": { - "target": "smithy.api#Unit", + "storageFindingReasonCodes": { + "target": "com.amazonaws.computeoptimizer#RDSStorageFindingReasonCodes", "traits": { - "smithy.api#enumValue": "Memory" + "smithy.api#documentation": "

\n The reason for the finding classification of Amazon RDS storage.\n

" } }, - "EBS_READ_OPS_PER_SECOND": { - "target": "smithy.api#Unit", + "instanceRecommendationOptions": { + "target": "com.amazonaws.computeoptimizer#RDSDBInstanceRecommendationOptions", "traits": { - "smithy.api#enumValue": "EBS_READ_OPS_PER_SECOND" + "smithy.api#documentation": "

\n An array of objects that describe the recommendation options for the Amazon RDS instance.\n

" } }, - "EBS_WRITE_OPS_PER_SECOND": { - "target": "smithy.api#Unit", + "storageRecommendationOptions": { + "target": "com.amazonaws.computeoptimizer#RDSDBStorageRecommendationOptions", "traits": { - "smithy.api#enumValue": "EBS_WRITE_OPS_PER_SECOND" + "smithy.api#documentation": "

\n An array of objects that describe the recommendation options for Amazon RDS storage.\n

" } }, - "EBS_READ_BYTES_PER_SECOND": { - "target": "smithy.api#Unit", + "utilizationMetrics": { + "target": "com.amazonaws.computeoptimizer#RDSDBUtilizationMetrics", "traits": { - "smithy.api#enumValue": "EBS_READ_BYTES_PER_SECOND" + "smithy.api#documentation": "

\n An array of objects that describe the utilization metrics of the Amazon RDS.\n

" } }, - "EBS_WRITE_BYTES_PER_SECOND": { - "target": "smithy.api#Unit", + "effectiveRecommendationPreferences": { + "target": "com.amazonaws.computeoptimizer#RDSEffectiveRecommendationPreferences", "traits": { - "smithy.api#enumValue": "EBS_WRITE_BYTES_PER_SECOND" + "smithy.api#documentation": "

\n Describes the effective recommendation preferences for Amazon RDS.\n

" } }, - "DISK_READ_OPS_PER_SECOND": { - "target": "smithy.api#Unit", + "lookbackPeriodInDays": { + "target": "com.amazonaws.computeoptimizer#LookBackPeriodInDays", "traits": { - "smithy.api#enumValue": "DISK_READ_OPS_PER_SECOND" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The number of days the Amazon RDS utilization metrics were analyzed.\n

" } }, - "DISK_WRITE_OPS_PER_SECOND": { - "target": "smithy.api#Unit", + "lastRefreshTimestamp": { + "target": "com.amazonaws.computeoptimizer#LastRefreshTimestamp", "traits": { - "smithy.api#enumValue": "DISK_WRITE_OPS_PER_SECOND" + "smithy.api#documentation": "

\n The timestamp of when the Amazon RDS recommendation was last generated.\n

" } }, - "DISK_READ_BYTES_PER_SECOND": { - "target": "smithy.api#Unit", + "tags": { + "target": "com.amazonaws.computeoptimizer#Tags", "traits": { - "smithy.api#enumValue": "DISK_READ_BYTES_PER_SECOND" + "smithy.api#documentation": "

\n A list of tags assigned to your Amazon RDS recommendations.\n

" } - }, - "DISK_WRITE_BYTES_PER_SECOND": { - "target": "smithy.api#Unit", + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes an Amazon RDS recommendation.\n

" + } + }, + "com.amazonaws.computeoptimizer#RDSDBRecommendationFilter": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.computeoptimizer#RDSDBRecommendationFilterName", "traits": { - "smithy.api#enumValue": "DISK_WRITE_BYTES_PER_SECOND" + "smithy.api#documentation": "

\n The name of the filter.\n

\n

\n Specify Finding to return recommendations with a specific finding classification.\n

\n

You can filter your Amazon RDS recommendations by tag:key \n and tag-key tags.

\n

A tag:key is a key and value combination of a tag assigned to your \n Amazon RDS recommendations. Use the tag key in the filter name and the tag value \n as the filter value. For example, to find all Amazon RDS service recommendations that have \n a tag with the key of Owner and the value of TeamA, \n specify tag:Owner for the filter name and TeamA for the filter value.

\n

A tag-key is the key of a tag assigned to your Amazon RDS recommendations. Use \n this filter to find all of your Amazon RDS recommendations that have a tag with a \n specific key. This doesn’t consider the tag value. For example, you can find \n your Amazon RDS service recommendations with a tag key value of Owner or without any tag \n keys assigned.

" } }, - "NETWORK_IN_BYTES_PER_SECOND": { - "target": "smithy.api#Unit", + "values": { + "target": "com.amazonaws.computeoptimizer#FilterValues", "traits": { - "smithy.api#enumValue": "NETWORK_IN_BYTES_PER_SECOND" + "smithy.api#documentation": "

\n The value of the filter.\n

" } - }, - "NETWORK_OUT_BYTES_PER_SECOND": { + } + }, + "traits": { + "smithy.api#documentation": "

\n Describes a filter that returns a more specific list of Amazon RDS \n recommendations. Use this filter with the GetECSServiceRecommendations action.\n

" + } + }, + "com.amazonaws.computeoptimizer#RDSDBRecommendationFilterName": { + "type": "enum", + "members": { + "INSTANCE_FINDING": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "NETWORK_OUT_BYTES_PER_SECOND" + "smithy.api#enumValue": "InstanceFinding" } }, - "NETWORK_PACKETS_IN_PER_SECOND": { + "INSTANCE_FINDING_REASON_CODE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "NETWORK_PACKETS_IN_PER_SECOND" + "smithy.api#enumValue": "InstanceFindingReasonCode" } }, - "NETWORK_PACKETS_OUT_PER_SECOND": { + "STORAGE_FINDING": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "NETWORK_PACKETS_OUT_PER_SECOND" + "smithy.api#enumValue": "StorageFinding" } }, - "GPU_PERCENTAGE": { + "STORAGE_FINDING_REASON_CODE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "GPU_PERCENTAGE" + "smithy.api#enumValue": "StorageFindingReasonCode" } }, - "GPU_MEMORY_PERCENTAGE": { + "IDLE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "GPU_MEMORY_PERCENTAGE" + "smithy.api#enumValue": "Idle" } } } }, - "com.amazonaws.computeoptimizer#MetricProviderArn": { - "type": "string" + "com.amazonaws.computeoptimizer#RDSDBRecommendationFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#RDSDBRecommendationFilter" + } }, - "com.amazonaws.computeoptimizer#MetricSource": { + "com.amazonaws.computeoptimizer#RDSDBRecommendations": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#RDSDBRecommendation" + } + }, + "com.amazonaws.computeoptimizer#RDSDBStorageRecommendationOption": { "type": "structure", "members": { - "provider": { - "target": "com.amazonaws.computeoptimizer#MetricSourceProvider", + "storageConfiguration": { + "target": "com.amazonaws.computeoptimizer#DBStorageConfiguration", "traits": { - "smithy.api#documentation": "

\n The name of the metric source provider.\n

" + "smithy.api#documentation": "

\n The recommended storage configuration.\n

" } }, - "providerArn": { - "target": "com.amazonaws.computeoptimizer#MetricProviderArn", + "rank": { + "target": "com.amazonaws.computeoptimizer#Rank", "traits": { - "smithy.api#documentation": "

\n The ARN of the metric source provider.\n

" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The rank identifier of the RDS storage recommendation option.\n

" + } + }, + "savingsOpportunity": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunity" + }, + "savingsOpportunityAfterDiscounts": { + "target": "com.amazonaws.computeoptimizer#RDSStorageSavingsOpportunityAfterDiscounts", + "traits": { + "smithy.api#documentation": "

\n Describes the savings opportunity for Amazon RDS storage recommendations or for the recommendation option.\n

\n

\n Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. You can achieve\n this by implementing a given Compute Optimizer recommendation.\n

" } } }, "traits": { - "smithy.api#documentation": "

\n The list of metric sources required to generate recommendations for commercial software licenses.\n

" + "smithy.api#documentation": "

\n Describes the recommendation options for Amazon RDS storage.\n

" } }, - "com.amazonaws.computeoptimizer#MetricSourceProvider": { - "type": "enum", - "members": { - "CloudWatchAppInsights": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CloudWatchApplicationInsights" - } - } + "com.amazonaws.computeoptimizer#RDSDBStorageRecommendationOptions": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#RDSDBStorageRecommendationOption" } }, - "com.amazonaws.computeoptimizer#MetricStatistic": { - "type": "enum", + "com.amazonaws.computeoptimizer#RDSDBUtilizationMetric": { + "type": "structure", "members": { - "MAXIMUM": { - "target": "smithy.api#Unit", + "name": { + "target": "com.amazonaws.computeoptimizer#RDSDBMetricName", "traits": { - "smithy.api#enumValue": "Maximum" + "smithy.api#documentation": "

\n The name of the utilization metric.\n

" } }, - "AVERAGE": { - "target": "smithy.api#Unit", + "statistic": { + "target": "com.amazonaws.computeoptimizer#RDSDBMetricStatistic", "traits": { - "smithy.api#enumValue": "Average" + "smithy.api#documentation": "

\n The statistic of the utilization metric.\n

\n

The Compute Optimizer API, Command Line Interface (CLI), and SDKs\n return utilization metrics using only the Maximum statistic, which is the\n highest value observed during the specified period.

\n

The Compute Optimizer console displays graphs for some utilization metrics using the\n Average statistic, which is the value of Sum /\n SampleCount during the specified period. For more information, see\n Viewing resource\n recommendations in the Compute Optimizer User\n Guide. You can also get averaged utilization metric data for your resources\n using Amazon CloudWatch. For more information, see the Amazon CloudWatch\n User Guide.

" + } + }, + "value": { + "target": "com.amazonaws.computeoptimizer#MetricValue", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The value of the utilization metric.\n

" } } - } - }, - "com.amazonaws.computeoptimizer#MetricValue": { - "type": "double", + }, "traits": { - "smithy.api#default": 0 - } - }, - "com.amazonaws.computeoptimizer#MetricValues": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#MetricValue" + "smithy.api#documentation": "

\n Describes the utilization metric of an Amazon RDS.\n

\n

\n To determine the performance difference between your current Amazon RDS and the recommended option, \n compare the utilization metric data of your service against its projected utilization metric data.\n

" } }, - "com.amazonaws.computeoptimizer#MetricsSource": { + "com.amazonaws.computeoptimizer#RDSDBUtilizationMetrics": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#MetricSource" + "target": "com.amazonaws.computeoptimizer#RDSDBUtilizationMetric" } }, - "com.amazonaws.computeoptimizer#MigrationEffort": { - "type": "enum", + "com.amazonaws.computeoptimizer#RDSDatabaseProjectedMetric": { + "type": "structure", "members": { - "VERY_LOW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "VeryLow" - } - }, - "LOW": { - "target": "smithy.api#Unit", + "name": { + "target": "com.amazonaws.computeoptimizer#RDSDBMetricName", "traits": { - "smithy.api#enumValue": "Low" + "smithy.api#documentation": "

\n The name of the projected metric.\n

" } }, - "MEDIUM": { - "target": "smithy.api#Unit", + "timestamps": { + "target": "com.amazonaws.computeoptimizer#Timestamps", "traits": { - "smithy.api#enumValue": "Medium" + "smithy.api#documentation": "

\n The timestamps of the projected metric.\n

" } }, - "HIGH": { - "target": "smithy.api#Unit", + "values": { + "target": "com.amazonaws.computeoptimizer#MetricValues", "traits": { - "smithy.api#enumValue": "High" + "smithy.api#documentation": "

\n The values for the projected metric.\n

" } } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the projected metrics of an Amazon RDS recommendation option.\n

\n

\n To determine the performance difference between your current Amazon RDS \n and the recommended option, compare the metric data of your service against \n its projected metric data.\n

" } }, - "com.amazonaws.computeoptimizer#MinSize": { - "type": "integer", - "traits": { - "smithy.api#default": 0 + "com.amazonaws.computeoptimizer#RDSDatabaseProjectedMetrics": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#RDSDatabaseProjectedMetric" } }, - "com.amazonaws.computeoptimizer#MissingAuthenticationToken": { + "com.amazonaws.computeoptimizer#RDSDatabaseRecommendedOptionProjectedMetric": { "type": "structure", "members": { - "message": { - "target": "com.amazonaws.computeoptimizer#ErrorMessage" + "recommendedDBInstanceClass": { + "target": "com.amazonaws.computeoptimizer#RecommendedDBInstanceClass", + "traits": { + "smithy.api#documentation": "

\n The recommended DB instance class for the Amazon RDS.\n

" + } + }, + "rank": { + "target": "com.amazonaws.computeoptimizer#Rank", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The rank identifier of the RDS instance recommendation option.\n

" + } + }, + "projectedMetrics": { + "target": "com.amazonaws.computeoptimizer#RDSDatabaseProjectedMetrics", + "traits": { + "smithy.api#documentation": "

\n An array of objects that describe the projected metric.\n

" + } } }, "traits": { - "smithy.api#documentation": "

The request must contain either a valid (registered) Amazon Web Services access key ID\n or X.509 certificate.

", - "smithy.api#error": "client", - "smithy.api#httpError": 403 + "smithy.api#documentation": "

\n Describes the projected metrics of an Amazon RDS recommendation option.\n

\n

\n To determine the performance difference between your current Amazon RDS and the recommended option, compare \n the metric data of your service against its projected metric data.\n

" } }, - "com.amazonaws.computeoptimizer#NextToken": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#NullableCpu": { - "type": "integer" - }, - "com.amazonaws.computeoptimizer#NullableMemory": { - "type": "integer" - }, - "com.amazonaws.computeoptimizer#NullableMemoryReservation": { - "type": "integer" - }, - "com.amazonaws.computeoptimizer#NumberOfCores": { - "type": "integer", - "traits": { - "smithy.api#default": 0 + "com.amazonaws.computeoptimizer#RDSDatabaseRecommendedOptionProjectedMetrics": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#RDSDatabaseRecommendedOptionProjectedMetric" } }, - "com.amazonaws.computeoptimizer#NumberOfInvocations": { - "type": "long", + "com.amazonaws.computeoptimizer#RDSEffectiveRecommendationPreferences": { + "type": "structure", + "members": { + "cpuVendorArchitectures": { + "target": "com.amazonaws.computeoptimizer#CpuVendorArchitectures", + "traits": { + "smithy.api#documentation": "

\n Describes the CPU vendor and architecture for Amazon RDS recommendations.\n

" + } + }, + "enhancedInfrastructureMetrics": { + "target": "com.amazonaws.computeoptimizer#EnhancedInfrastructureMetrics", + "traits": { + "smithy.api#documentation": "

Describes the activation status of the enhanced infrastructure metrics\n preference.\n

\n

A status of Active confirms that the preference is applied in the latest\n recommendation refresh, and a status of Inactive confirms that it's not yet\n applied to recommendations.\n

\n

For more information, see Enhanced\n infrastructure metrics in the Compute Optimizer User\n Guide.\n

" + } + }, + "lookBackPeriod": { + "target": "com.amazonaws.computeoptimizer#LookBackPeriodPreference", + "traits": { + "smithy.api#documentation": "

\n The number of days the utilization metrics of the Amazon RDS are analyzed.\n

" + } + }, + "savingsEstimationMode": { + "target": "com.amazonaws.computeoptimizer#RDSSavingsEstimationMode", + "traits": { + "smithy.api#documentation": "

\n Describes the savings estimation mode preference applied for calculating savings opportunity for Amazon RDS.\n

" + } + } + }, "traits": { - "smithy.api#default": 0 + "smithy.api#documentation": "

\n Describes the effective recommendation preferences for Amazon RDS.\n

" } }, - "com.amazonaws.computeoptimizer#NumberOfMemberAccountsOptedIn": { - "type": "integer" - }, - "com.amazonaws.computeoptimizer#OperatingSystem": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#OptInRequiredException": { + "com.amazonaws.computeoptimizer#RDSInstanceEstimatedMonthlySavings": { "type": "structure", "members": { - "message": { - "target": "com.amazonaws.computeoptimizer#ErrorMessage" + "currency": { + "target": "com.amazonaws.computeoptimizer#Currency", + "traits": { + "smithy.api#documentation": "

\n The currency of the estimated monthly savings.\n

" + } + }, + "value": { + "target": "com.amazonaws.computeoptimizer#Value", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The value of the estimated monthly savings for Amazon RDS instances.\n

" + } } }, "traits": { - "smithy.api#documentation": "

The account is not opted in to Compute Optimizer.

", - "smithy.api#error": "client", - "smithy.api#httpError": 403 + "smithy.api#documentation": "

\n Describes the estimated monthly savings possible for Amazon RDS instances by adopting \n Compute Optimizer recommendations. This is based on Amazon RDS pricing after applying \n Savings Plans discounts.\n

" } }, - "com.amazonaws.computeoptimizer#PerformanceRisk": { - "type": "double", - "traits": { - "smithy.api#default": 0, - "smithy.api#range": { - "min": 0, - "max": 4 + "com.amazonaws.computeoptimizer#RDSInstanceFinding": { + "type": "enum", + "members": { + "OPTIMIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Optimized" + } + }, + "UNDER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Underprovisioned" + } + }, + "OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Overprovisioned" + } } } }, - "com.amazonaws.computeoptimizer#Period": { - "type": "integer", - "traits": { - "smithy.api#default": 0 - } - }, - "com.amazonaws.computeoptimizer#PlatformDifference": { + "com.amazonaws.computeoptimizer#RDSInstanceFindingReasonCode": { "type": "enum", "members": { - "HYPERVISOR": { + "CPU_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CPUOverprovisioned" + } + }, + "NETWORK_BANDWIDTH_OVER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NetworkBandwidthOverprovisioned" + } + }, + "EBS_IOPS_OVER_PROVISIONED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Hypervisor" + "smithy.api#enumValue": "EBSIOPSOverprovisioned" } }, - "NETWORK_INTERFACE": { + "EBS_THROUGHPUT_OVER_PROVISIONED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "NetworkInterface" + "smithy.api#enumValue": "EBSThroughputOverprovisioned" } }, - "STORAGE_INTERFACE": { + "CPU_UNDER_PROVISIONED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "StorageInterface" + "smithy.api#enumValue": "CPUUnderprovisioned" } }, - "INSTANCE_STORE_AVAILABILITY": { + "NETWORK_BANDWIDTH_UNDER_PROVISIONED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "InstanceStoreAvailability" + "smithy.api#enumValue": "NetworkBandwidthUnderprovisioned" } }, - "VIRTUALIZATION_TYPE": { + "EBS_THROUGHPUT_UNDER_PROVISIONED": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "VirtualizationType" + "smithy.api#enumValue": "EBSThroughputUnderprovisioned" } }, - "ARCHITECTURE": { + "NEW_GENERATION_DB_INSTANCE_CLASS_AVAILABLE": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "Architecture" + "smithy.api#enumValue": "NewGenerationDBInstanceClassAvailable" + } + }, + "NEW_ENGINE_VERSION_AVAILABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NewEngineVersionAvailable" } } } }, - "com.amazonaws.computeoptimizer#PlatformDifferences": { + "com.amazonaws.computeoptimizer#RDSInstanceFindingReasonCodes": { "type": "list", "member": { - "target": "com.amazonaws.computeoptimizer#PlatformDifference" + "target": "com.amazonaws.computeoptimizer#RDSInstanceFindingReasonCode" } }, - "com.amazonaws.computeoptimizer#PreferredResource": { + "com.amazonaws.computeoptimizer#RDSInstanceSavingsOpportunityAfterDiscounts": { "type": "structure", "members": { - "name": { - "target": "com.amazonaws.computeoptimizer#PreferredResourceName", - "traits": { - "smithy.api#documentation": "

\n The type of preferred resource to customize.\n

\n \n

Compute Optimizer only supports the customization of Ec2InstanceTypes.

\n
" - } - }, - "includeList": { - "target": "com.amazonaws.computeoptimizer#PreferredResourceValues", + "savingsOpportunityPercentage": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunityPercentage", "traits": { - "smithy.api#documentation": "

\n The preferred resource type values to include in the recommendation candidates. You can specify the exact resource type value, \n such as m5.large, or use wild card expressions, such as m5. If this isn’t specified, all supported resources are included by default. \n You can specify up to 1000 values in this list.\n

" + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s \n Amazon RDS instance recommendations. This includes any applicable Savings Plans discounts.\n

" } }, - "excludeList": { - "target": "com.amazonaws.computeoptimizer#PreferredResourceValues", + "estimatedMonthlySavings": { + "target": "com.amazonaws.computeoptimizer#RDSInstanceEstimatedMonthlySavings", "traits": { - "smithy.api#documentation": "

\n The preferred resource type values to exclude from the recommendation candidates. If this isn’t specified, all supported \n resources are included by default. You can specify up to 1000 values in this list.\n

" + "smithy.api#documentation": "

\n The estimated monthly savings possible by adopting Compute Optimizer’s Amazon RDS instance recommendations. \n This includes any applicable Savings Plans discounts.\n

" } } }, "traits": { - "smithy.api#documentation": "

\n The preference to control which resource type values are considered when generating rightsizing recommendations. \n You can specify this preference as a combination of include and exclude lists. You must specify either an \n includeList or excludeList. If the preference is an empty set of resource type values, \n an error occurs. For more information, see \n Rightsizing recommendation preferences in the Compute Optimizer User\n Guide.\n

\n \n
    \n
  • \n

    This preference is only available for the Amazon EC2 instance and Auto Scaling group resource types.

    \n
  • \n
  • \n

    Compute Optimizer only supports the customization of Ec2InstanceTypes.

    \n
  • \n
\n
" + "smithy.api#documentation": "

\n Describes the savings opportunity for Amazon RDS instance recommendations after applying Savings Plans discounts.\n

\n

\n Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. \n You can achieve this by implementing a given Compute Optimizer recommendation.\n

" } }, - "com.amazonaws.computeoptimizer#PreferredResourceName": { - "type": "enum", + "com.amazonaws.computeoptimizer#RDSSavingsEstimationMode": { + "type": "structure", "members": { - "EC2_INSTANCE_TYPES": { - "target": "smithy.api#Unit", + "source": { + "target": "com.amazonaws.computeoptimizer#RDSSavingsEstimationModeSource", "traits": { - "smithy.api#enumValue": "Ec2InstanceTypes" + "smithy.api#documentation": "

\n Describes the source for calculating the savings opportunity for Amazon RDS.\n

" } } + }, + "traits": { + "smithy.api#documentation": "

\n Describes the savings estimation mode used for calculating savings opportunity for Amazon RDS.\n

" } }, - "com.amazonaws.computeoptimizer#PreferredResourceValue": { - "type": "string" - }, - "com.amazonaws.computeoptimizer#PreferredResourceValues": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#PreferredResourceValue" - } - }, - "com.amazonaws.computeoptimizer#PreferredResources": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#PreferredResource" - } - }, - "com.amazonaws.computeoptimizer#ProjectedMetric": { - "type": "structure", + "com.amazonaws.computeoptimizer#RDSSavingsEstimationModeSource": { + "type": "enum", "members": { - "name": { - "target": "com.amazonaws.computeoptimizer#MetricName", + "PUBLIC_PRICING": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The name of the projected utilization metric.

\n

The following projected utilization metrics are returned:

\n
    \n
  • \n

    \n Cpu - The projected percentage of allocated EC2 compute units\n that would be in use on the recommendation option had you used that resource\n during the analyzed period. This metric identifies the processing power required\n to run an application on the recommendation option.

    \n

    Depending on the instance type, tools in your operating system can show a\n lower percentage than CloudWatch when the instance is not allocated a full\n processor core.

    \n
  • \n
  • \n

    \n Memory - The percentage of memory that would be in use on the\n recommendation option had you used that resource during the analyzed period.\n This metric identifies the amount of memory required to run an application on\n the recommendation option.

    \n

    Units: Percent

    \n \n

    The Memory metric is only returned for resources with\n the unified CloudWatch agent installed on them. For more information,\n see Enabling Memory\n Utilization with the CloudWatch Agent.

    \n
    \n
  • \n
  • \n

    \n GPU - The projected percentage of allocated GPUs if you adjust your\n configurations to Compute Optimizer's recommendation option.

    \n
  • \n
  • \n

    \n GPU_MEMORY - The projected percentage of total GPU memory if you adjust your\n configurations to Compute Optimizer's recommendation option.

    \n \n

    The GPU and GPU_MEMORY metrics are only returned for resources \n with the unified CloudWatch Agent installed on them. For more information, see \n Enabling NVIDIA GPU \n utilization with the CloudWatch Agent.

    \n
    \n
  • \n
" + "smithy.api#enumValue": "PublicPricing" } }, - "timestamps": { - "target": "com.amazonaws.computeoptimizer#Timestamps", + "COST_EXPLORER_RIGHTSIZING": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The timestamps of the projected utilization metric.

" + "smithy.api#enumValue": "CostExplorerRightsizing" } }, - "values": { - "target": "com.amazonaws.computeoptimizer#MetricValues", + "COST_OPTIMIZATION_HUB": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The values of the projected utilization metrics.

" + "smithy.api#enumValue": "CostOptimizationHub" } } - }, - "traits": { - "smithy.api#documentation": "

Describes a projected utilization metric of a recommendation option, such as an\n Amazon EC2 instance. This represents the projected utilization of a\n recommendation option had you used that resource during the analyzed period.

\n

Compare the utilization metric data of your resource against its projected utilization\n metric data to determine the performance difference between your current resource and\n the recommended option.

\n \n

The Cpu, Memory, GPU, and GPU_MEMORY metrics \n are the only projected utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, these\n metrics are only returned for resources with the unified\n CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent and\n Enabling NVIDIA GPU \n utilization with the CloudWatch Agent.

\n
" - } - }, - "com.amazonaws.computeoptimizer#ProjectedMetrics": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#ProjectedMetric" - } - }, - "com.amazonaws.computeoptimizer#ProjectedUtilizationMetrics": { - "type": "list", - "member": { - "target": "com.amazonaws.computeoptimizer#UtilizationMetric" } }, - "com.amazonaws.computeoptimizer#PutRecommendationPreferences": { - "type": "operation", - "input": { - "target": "com.amazonaws.computeoptimizer#PutRecommendationPreferencesRequest" - }, - "output": { - "target": "com.amazonaws.computeoptimizer#PutRecommendationPreferencesResponse" - }, - "errors": [ - { - "target": "com.amazonaws.computeoptimizer#AccessDeniedException" - }, - { - "target": "com.amazonaws.computeoptimizer#InternalServerException" - }, - { - "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" - }, - { - "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" - }, - { - "target": "com.amazonaws.computeoptimizer#OptInRequiredException" - }, - { - "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + "com.amazonaws.computeoptimizer#RDSStorageEstimatedMonthlySavings": { + "type": "structure", + "members": { + "currency": { + "target": "com.amazonaws.computeoptimizer#Currency", + "traits": { + "smithy.api#documentation": "

\n The currency of the estimated monthly savings.\n

" + } }, - { - "target": "com.amazonaws.computeoptimizer#ThrottlingException" + "value": { + "target": "com.amazonaws.computeoptimizer#Value", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The value of the estimated monthly savings for Amazon RDS storage.\n

" + } } - ], + }, "traits": { - "smithy.api#documentation": "

Creates a new recommendation preference or updates an existing recommendation\n preference, such as enhanced infrastructure metrics.

\n

For more information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

" + "smithy.api#documentation": "

\n Describes the estimated monthly savings possible for Amazon RDS storage by adopting Compute Optimizer \n recommendations. This is based on Amazon RDS pricing after applying Savings Plans discounts.\n

" } }, - "com.amazonaws.computeoptimizer#PutRecommendationPreferencesRequest": { - "type": "structure", + "com.amazonaws.computeoptimizer#RDSStorageFinding": { + "type": "enum", "members": { - "resourceType": { - "target": "com.amazonaws.computeoptimizer#ResourceType", + "OPTIMIZED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The target resource type of the recommendation preference to create.

\n

The Ec2Instance option encompasses standalone instances and instances\n that are part of Auto Scaling groups. The AutoScalingGroup option\n encompasses only instances that are part of an Auto Scaling group.

\n \n

The valid values for this parameter are Ec2Instance and\n AutoScalingGroup.

\n
", - "smithy.api#required": {} + "smithy.api#enumValue": "Optimized" } }, - "scope": { - "target": "com.amazonaws.computeoptimizer#Scope", + "UNDER_PROVISIONED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

An object that describes the scope of the recommendation preference to create.

\n

You can create recommendation preferences at the organization level (for management\n accounts of an organization only), account level, and resource level. For more\n information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

\n \n

You cannot create recommendation preferences for Auto Scaling groups at the\n organization and account levels. You can create recommendation preferences for\n Auto Scaling groups only at the resource level by specifying a scope name\n of ResourceArn and a scope value of the Auto Scaling group Amazon\n Resource Name (ARN). This will configure the preference for all instances that are\n part of the specified Auto Scaling group. You also cannot create recommendation\n preferences at the resource level for instances that are part of an Auto Scaling group. You can create recommendation preferences at the resource level only for\n standalone instances.

\n
" + "smithy.api#enumValue": "Underprovisioned" } }, - "enhancedInfrastructureMetrics": { - "target": "com.amazonaws.computeoptimizer#EnhancedInfrastructureMetrics", + "OVER_PROVISIONED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The status of the enhanced infrastructure metrics recommendation preference to create\n or update.

\n

Specify the Active status to activate the preference, or specify\n Inactive to deactivate the preference.

\n

For more information, see Enhanced\n infrastructure metrics in the Compute Optimizer User\n Guide.

" + "smithy.api#enumValue": "Overprovisioned" } - }, - "inferredWorkloadTypes": { - "target": "com.amazonaws.computeoptimizer#InferredWorkloadTypesPreference", + } + } + }, + "com.amazonaws.computeoptimizer#RDSStorageFindingReasonCode": { + "type": "enum", + "members": { + "EBS_VOLUME_ALLOCATED_STORAGE_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The status of the inferred workload types recommendation preference to create or\n update.

\n \n

The inferred workload type feature is active by default. To deactivate it, create\n a recommendation preference.

\n
\n

Specify the Inactive status to deactivate the feature, or specify\n Active to activate it.

\n

For more information, see Inferred workload\n types in the Compute Optimizer User Guide.

" + "smithy.api#enumValue": "EBSVolumeAllocatedStorageUnderprovisioned" } }, - "externalMetricsPreference": { - "target": "com.amazonaws.computeoptimizer#ExternalMetricsPreference", + "EBS_VOLUME_THROUGHPUT_UNDER_PROVISIONED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

The provider of the external metrics recommendation preference to create or\n update.

\n

Specify a valid provider in the source field to activate the preference.\n To delete this preference, see the DeleteRecommendationPreferences\n action.

\n

This preference can only be set for the Ec2Instance resource type.

\n

For more information, see External metrics\n ingestion in the Compute Optimizer User\n Guide.

" + "smithy.api#enumValue": "EBSVolumeThroughputUnderprovisioned" } }, - "lookBackPeriod": { - "target": "com.amazonaws.computeoptimizer#LookBackPeriodPreference", + "EBS_VOLUME_IOPS_OVER_PROVISIONED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n The preference to control the number of days the utilization metrics of the Amazon Web Services resource are analyzed. \n When this preference isn't specified, we use the default value DAYS_14.\n

\n \n

You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types.

\n
" + "smithy.api#enumValue": "EBSVolumeIOPSOverprovisioned" } }, - "utilizationPreferences": { - "target": "com.amazonaws.computeoptimizer#UtilizationPreferences", + "EBS_VOLUME_THROUGHPUT_OVER_PROVISIONED": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n The preference to control the resource’s CPU utilization threshold, CPU utilization headroom, and memory utilization headroom. When this \n preference isn't specified, we use the following default values.\n

\n

CPU utilization:

\n
    \n
  • \n

    \n P99_5 for threshold

    \n
  • \n
  • \n

    \n PERCENT_20 for headroom

    \n
  • \n
\n

Memory utilization:

\n
    \n
  • \n

    \n PERCENT_20 for headroom

    \n
  • \n
\n \n
    \n
  • \n

    You can only set CPU and memory utilization preferences for the Amazon EC2 instance resource type.

    \n
  • \n
  • \n

    The threshold setting isn’t available for memory utilization.

    \n
  • \n
\n
" + "smithy.api#enumValue": "EBSVolumeThroughputOverprovisioned" } }, - "preferredResources": { - "target": "com.amazonaws.computeoptimizer#PreferredResources", + "NEW_GENERATION_STORAGE_TYPE_AVAILABLE": { + "target": "smithy.api#Unit", "traits": { - "smithy.api#documentation": "

\n The preference to control which resource type values are considered when generating rightsizing recommendations. \n You can specify this preference as a combination of include and exclude lists. You must specify either an \n includeList or excludeList. If the preference is an empty set of resource type values, \n an error occurs.\n

\n \n

You can only set this preference for the Amazon EC2 instance and Auto Scaling group resource types.

\n
" + "smithy.api#enumValue": "NewGenerationStorageTypeAvailable" + } + } + } + }, + "com.amazonaws.computeoptimizer#RDSStorageFindingReasonCodes": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#RDSStorageFindingReasonCode" + } + }, + "com.amazonaws.computeoptimizer#RDSStorageSavingsOpportunityAfterDiscounts": { + "type": "structure", + "members": { + "savingsOpportunityPercentage": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunityPercentage", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

\n The estimated monthly savings possible as a percentage of monthly cost by adopting Compute Optimizer’s \n Amazon RDS storage recommendations. This includes any applicable Savings Plans discounts.\n

" } }, - "savingsEstimationMode": { - "target": "com.amazonaws.computeoptimizer#SavingsEstimationMode", + "estimatedMonthlySavings": { + "target": "com.amazonaws.computeoptimizer#RDSStorageEstimatedMonthlySavings", "traits": { - "smithy.api#documentation": "

\n The status of the savings estimation mode preference to create or update.\n

\n

Specify the AfterDiscounts status to activate the preference, or specify BeforeDiscounts to deactivate the preference.

\n

Only the account manager or delegated administrator of your organization can activate this preference.

\n

For more information, see \n Savings estimation mode in the Compute Optimizer User Guide.

" + "smithy.api#documentation": "

\n The estimated monthly savings possible by adopting Compute Optimizer’s Amazon RDS storage recommendations. This includes any applicable Savings Plans discounts.\n

" } } }, "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.computeoptimizer#PutRecommendationPreferencesResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} + "smithy.api#documentation": "

\n Describes the savings opportunity for Amazon RDS storage recommendations after applying Savings Plans discounts.\n

\n

\n Savings opportunity represents the estimated monthly savings after applying Savings Plans discounts. \n You can achieve this by implementing a given Compute Optimizer recommendation.\n

" } }, "com.amazonaws.computeoptimizer#Rank": { @@ -8778,12 +10308,12 @@ "cpuVendorArchitectures": { "target": "com.amazonaws.computeoptimizer#CpuVendorArchitectures", "traits": { - "smithy.api#documentation": "

Specifies the CPU vendor and architecture for Amazon EC2 instance and Auto Scaling group recommendations.

\n

For example, when you specify AWS_ARM64 with:

\n " + "smithy.api#documentation": "

Specifies the CPU vendor and architecture for Amazon EC2 instance and Auto Scaling group recommendations.

\n

For example, when you specify AWS_ARM64 with:

\n " } } }, "traits": { - "smithy.api#documentation": "

Describes the recommendation preferences to return in the response of a GetAutoScalingGroupRecommendations, GetEC2InstanceRecommendations, and GetEC2RecommendationProjectedMetrics request.

" + "smithy.api#documentation": "

Describes the recommendation preferences to return in the response of a GetAutoScalingGroupRecommendations, GetEC2InstanceRecommendations, GetEC2RecommendationProjectedMetrics, GetRDSDatabaseRecommendations, and GetRDSDatabaseRecommendationProjectedMetrics request.

" } }, "com.amazonaws.computeoptimizer#RecommendationPreferencesDetail": { @@ -8915,6 +10445,18 @@ "traits": { "smithy.api#enumValue": "License" } + }, + "RDS_DB_INSTANCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RdsDBInstance" + } + }, + "RDS_DB_INSTANCE_STORAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RdsDBInstanceStorage" + } } } }, @@ -8974,6 +10516,9 @@ "smithy.api#documentation": "

A summary of a recommendation.

" } }, + "com.amazonaws.computeoptimizer#RecommendedDBInstanceClass": { + "type": "string" + }, "com.amazonaws.computeoptimizer#RecommendedInstanceType": { "type": "string" }, @@ -9076,6 +10621,12 @@ "traits": { "smithy.api#enumValue": "License" } + }, + "RDS_DB_INSTANCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RdsDBInstance" + } } } }, @@ -9310,6 +10861,9 @@ "com.amazonaws.computeoptimizer#StatusReason": { "type": "string" }, + "com.amazonaws.computeoptimizer#StorageType": { + "type": "string" + }, "com.amazonaws.computeoptimizer#Summaries": { "type": "list", "member": { @@ -9539,7 +11093,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n The preference to control the resource’s CPU utilization thresholds - threshold and headroom.\n

\n \n

This preference is only available for the Amazon EC2 instance resource type.

\n
" + "smithy.api#documentation": "

\n The preference to control the resource’s CPU utilization threshold, CPU utilization headroom, and memory utilization headroom.\n

\n \n

This preference is only available for the Amazon EC2 instance resource type.

\n
" } }, "com.amazonaws.computeoptimizer#UtilizationPreferences": { @@ -9706,17 +11260,17 @@ "smithy.api#documentation": "

The risk of the current EBS volume not meeting the performance needs of its workloads.\n The higher the risk, the more likely the current EBS volume doesn't have sufficient\n capacity.

" } }, - "tags": { - "target": "com.amazonaws.computeoptimizer#Tags", - "traits": { - "smithy.api#documentation": "

\n A list of tags assigned to your Amazon EBS volume recommendations.\n

" - } - }, "effectiveRecommendationPreferences": { "target": "com.amazonaws.computeoptimizer#EBSEffectiveRecommendationPreferences", "traits": { "smithy.api#documentation": "

\n Describes the effective recommendation preferences for Amazon EBS volume.\n

" } + }, + "tags": { + "target": "com.amazonaws.computeoptimizer#Tags", + "traits": { + "smithy.api#documentation": "

\n A list of tags assigned to your Amazon EBS volume recommendations.\n

" + } } }, "traits": { diff --git a/models/config-service.json b/models/config-service.json index 6441b07c46..9e4f4565fd 100644 --- a/models/config-service.json +++ b/models/config-service.json @@ -4135,7 +4135,20 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the details for the specified configuration recorders.\n\t\t\tIf the configuration recorder is not specified, this action returns\n\t\t\tthe details for all configuration recorders associated with the\n\t\t\taccount.

\n \n

You can specify only one configuration recorder for each Amazon Web Services Region for each account.

\n
" + "smithy.api#documentation": "

Returns the details for the specified configuration recorders.\n\t\t\tIf the configuration recorder is not specified, this action returns\n\t\t\tthe details for all configuration recorders associated with the\n\t\t\taccount.

\n \n

You can specify only one configuration recorder for each Amazon Web Services Region for each account.

\n
", + "smithy.test#smokeTests": [ + { + "id": "DescribeConfigurationRecordersSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.configservice#DescribeConfigurationRecordersRequest": { diff --git a/models/connect.json b/models/connect.json index d9638bf966..415c13eeaf 100644 --- a/models/connect.json +++ b/models/connect.json @@ -326,6 +326,20 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.connect#AgentHierarchyGroup": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.connect#ARN", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the group.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about an agent hierarchy group.

" + } + }, "com.amazonaws.connect#AgentHierarchyGroups": { "type": "structure", "members": { @@ -384,6 +398,21 @@ "traits": { "smithy.api#documentation": "

Agent pause duration for a contact in seconds.

" } + }, + "HierarchyGroups": { + "target": "com.amazonaws.connect#HierarchyGroups", + "traits": { + "smithy.api#documentation": "

The agent hierarchy groups for the agent.

" + } + }, + "DeviceInfo": { + "target": "com.amazonaws.connect#DeviceInfo", + "traits": { + "smithy.api#documentation": "

Information regarding Agent’s device.

" + } + }, + "Capabilities": { + "target": "com.amazonaws.connect#ParticipantCapabilities" } }, "traits": { @@ -408,6 +437,20 @@ } } }, + "com.amazonaws.connect#AgentQualityMetrics": { + "type": "structure", + "members": { + "Audio": { + "target": "com.amazonaws.connect#AudioQualityMetricsInfo", + "traits": { + "smithy.api#documentation": "

Information about the audio quality of the Agent

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the quality of the Agent's media connection

" + } + }, "com.amazonaws.connect#AgentResourceId": { "type": "string", "traits": { @@ -1252,6 +1295,12 @@ { "target": "com.amazonaws.connect#SearchAvailablePhoneNumbers" }, + { + "target": "com.amazonaws.connect#SearchContactFlowModules" + }, + { + "target": "com.amazonaws.connect#SearchContactFlows" + }, { "target": "com.amazonaws.connect#SearchContacts" }, @@ -2353,6 +2402,95 @@ "smithy.api#documentation": "

Configuration of the answering machine detection.

" } }, + "com.amazonaws.connect#AnsweringMachineDetectionStatus": { + "type": "enum", + "members": { + "ANSWERED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ANSWERED" + } + }, + "UNDETECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNDETECTED" + } + }, + "ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR" + } + }, + "HUMAN_ANSWERED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HUMAN_ANSWERED" + } + }, + "SIT_TONE_DETECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SIT_TONE_DETECTED" + } + }, + "SIT_TONE_BUSY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SIT_TONE_BUSY" + } + }, + "SIT_TONE_INVALID_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SIT_TONE_INVALID_NUMBER" + } + }, + "FAX_MACHINE_DETECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAX_MACHINE_DETECTED" + } + }, + "VOICEMAIL_BEEP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VOICEMAIL_BEEP" + } + }, + "VOICEMAIL_NO_BEEP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VOICEMAIL_NO_BEEP" + } + }, + "AMD_UNRESOLVED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AMD_UNRESOLVED" + } + }, + "AMD_UNANSWERED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AMD_UNANSWERED" + } + }, + "AMD_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AMD_ERROR" + } + }, + "AMD_NOT_APPLICABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AMD_NOT_APPLICABLE" + } + } + } + }, "com.amazonaws.connect#Application": { "type": "structure", "members": { @@ -3435,6 +3573,12 @@ "smithy.api#input": {} } }, + "com.amazonaws.connect#AssociatedQueueIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#QueueId" + } + }, "com.amazonaws.connect#AssociationId": { "type": "string", "traits": { @@ -3541,7 +3685,7 @@ } }, "traits": { - "smithy.api#documentation": "

Error describing a failure to retrieve attached file metadata through BatchGetAttachedFileMetadata action.

" + "smithy.api#documentation": "

Error describing a failure to retrieve attached file metadata through\n BatchGetAttachedFileMetadata action.

" } }, "com.amazonaws.connect#AttachedFileErrorsList": { @@ -3674,6 +3818,38 @@ "smithy.api#documentation": "

A list of conditions which would be applied together with an AND\n condition.

" } }, + "com.amazonaws.connect#AttributeCondition": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.connect#PredefinedAttributeName", + "traits": { + "smithy.api#documentation": "

The name of predefined attribute.

" + } + }, + "Value": { + "target": "com.amazonaws.connect#ProficiencyValue", + "traits": { + "smithy.api#documentation": "

The value of predefined attribute.

" + } + }, + "ProficiencyLevel": { + "target": "com.amazonaws.connect#ProficiencyLevel", + "traits": { + "smithy.api#documentation": "

The proficiency level of the condition.

" + } + }, + "ComparisonOperator": { + "target": "com.amazonaws.connect#ComparisonOperator", + "traits": { + "smithy.api#documentation": "

The operator of the condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An object to specify the predefined attribute condition.

" + } + }, "com.amazonaws.connect#AttributeName": { "type": "string", "traits": { @@ -3727,6 +3903,33 @@ "smithy.api#documentation": "

Has audio-specific configurations as the operating parameter for Echo Reduction.

" } }, + "com.amazonaws.connect#AudioQualityMetricsInfo": { + "type": "structure", + "members": { + "QualityScore": { + "target": "com.amazonaws.connect#AudioQualityScore", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

Number measuring the estimated quality of the media connection.

" + } + }, + "PotentialQualityIssues": { + "target": "com.amazonaws.connect#PotentialAudioQualityIssues", + "traits": { + "smithy.api#documentation": "

List of potential issues causing degradation of quality on a media connection. If the service did not detect any potential quality issues the list is empty.

\n

Valid values: HighPacketLoss | HighRoundTripTime | HighJitterBuffer\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information for score and potential quality issues for Audio

" + } + }, + "com.amazonaws.connect#AudioQualityScore": { + "type": "float", + "traits": { + "smithy.api#default": 0 + } + }, "com.amazonaws.connect#AutoAccept": { "type": "boolean", "traits": { @@ -4481,7 +4684,7 @@ } ], "traits": { - "smithy.api#documentation": "

Claims an available phone number to your Amazon Connect instance or traffic distribution\n group. You can call this API only in the same Amazon Web Services Region where the Amazon Connect instance or traffic distribution group was created.

\n

For more information about how to use this operation, see Claim a phone number in your\n country and Claim phone\n numbers to traffic distribution groups in the Amazon Connect Administrator\n Guide.

\n \n

You can call the SearchAvailablePhoneNumbers API for available phone numbers that you can claim. Call\n the DescribePhoneNumber API to verify the status of a previous ClaimPhoneNumber\n operation.

\n
\n

If you plan to claim and release numbers frequently during a 30 day period,\n contact us for a service quota exception. Otherwise, it is possible you will be blocked from\n claiming and releasing any more numbers until 30 days past the oldest number\n released has expired.

\n

By default you can claim and release up to 200% of your maximum number of active\n phone numbers during any 30 day period. If you claim and release phone numbers using\n the UI or API during a rolling 30 day cycle that exceeds 200% of your phone number\n service level quota, you will be blocked from claiming any more numbers until 30\n days past the oldest number released has expired.

\n

For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 30\n day period you release 99, claim 99, and then release 99, you will have exceeded the\n 200% limit. At that point you are blocked from claiming any more numbers until you\n open an Amazon Web Services support ticket.

", + "smithy.api#documentation": "

Claims an available phone number to your Amazon Connect instance or traffic distribution\n group. You can call this API only in the same Amazon Web Services Region where the Amazon Connect instance or traffic distribution group was created.

\n

For more information about how to use this operation, see Claim a phone number in your\n country and Claim phone\n numbers to traffic distribution groups in the Amazon Connect Administrator\n Guide.

\n \n

You can call the SearchAvailablePhoneNumbers API for available phone numbers that you can claim. Call\n the DescribePhoneNumber API to verify the status of a previous ClaimPhoneNumber\n operation.

\n
\n

If you plan to claim and release numbers frequently,\n contact us for a service quota exception. Otherwise, it is possible you will be blocked from\n claiming and releasing any more numbers until up to 180 days past the oldest number\n released has expired.

\n

By default you can claim and release up to 200% of your maximum number of active\n phone numbers. If you claim and release phone numbers using\n the UI or API during a rolling 180 day cycle that exceeds 200% of your phone number\n service level quota, you will be blocked from claiming any more numbers until 180\n days past the oldest number released has expired.

\n

For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 180\n day period you release 99, claim 99, and then release 99, you will have exceeded the\n 200% limit. At that point you are blocked from claiming any more numbers until you\n open an Amazon Web Services support ticket.

", "smithy.api#http": { "method": "POST", "uri": "/phone-number/claim", @@ -4658,6 +4861,15 @@ } } }, + "com.amazonaws.connect#ComparisonOperator": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 127 + } + } + }, "com.amazonaws.connect#CompleteAttachedFileUpload": { "type": "operation", "input": { @@ -4915,6 +5127,57 @@ "traits": { "smithy.api#documentation": "

Tags associated with the contact. This contains both Amazon Web Services generated and\n user-defined tags.

" } + }, + "ConnectedToSystemTimestamp": { + "target": "com.amazonaws.connect#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp when customer endpoint connected to Amazon Connect.

" + } + }, + "RoutingCriteria": { + "target": "com.amazonaws.connect#RoutingCriteria", + "traits": { + "smithy.api#documentation": "

Latest routing criteria on the contact.

" + } + }, + "Customer": { + "target": "com.amazonaws.connect#Customer", + "traits": { + "smithy.api#documentation": "

Information about the Customer on the contact.

" + } + }, + "Campaign": { + "target": "com.amazonaws.connect#Campaign" + }, + "AnsweringMachineDetectionStatus": { + "target": "com.amazonaws.connect#AnsweringMachineDetectionStatus", + "traits": { + "smithy.api#documentation": "

Indicates how an outbound campaign call is actually disposed if the contact is connected to Amazon Connect.

" + } + }, + "CustomerVoiceActivity": { + "target": "com.amazonaws.connect#CustomerVoiceActivity", + "traits": { + "smithy.api#documentation": "

Information about customer’s voice activity.

" + } + }, + "QualityMetrics": { + "target": "com.amazonaws.connect#QualityMetrics", + "traits": { + "smithy.api#documentation": "

Information about the quality of the participant's media connection.

" + } + }, + "DisconnectDetails": { + "target": "com.amazonaws.connect#DisconnectDetails", + "traits": { + "smithy.api#documentation": "

Information about the call disconnect experience.

" + } + }, + "SegmentAttributes": { + "target": "com.amazonaws.connect#SegmentAttributes", + "traits": { + "smithy.api#documentation": "

A set of system defined key-value pairs stored on individual contact segments using an attribute map. The attributes are standard Amazon Connect attributes and can be accessed in flows. Attribute keys can include only alphanumeric, -, and _ characters. This field can be used to show channel subtype. For example, connect:Guide or connect:SMS.

" + } } }, "traits": { @@ -5038,6 +5301,12 @@ "smithy.api#documentation": "

The type of flow.

" } }, + "Status": { + "target": "com.amazonaws.connect#ContactFlowStatus", + "traits": { + "smithy.api#documentation": "

The status of the contact flow.

" + } + }, "Description": { "target": "com.amazonaws.connect#ContactFlowDescription", "traits": { @@ -5170,6 +5439,52 @@ "smithy.api#pattern": "\\S" } }, + "com.amazonaws.connect#ContactFlowModuleSearchConditionList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#ContactFlowModuleSearchCriteria" + } + }, + "com.amazonaws.connect#ContactFlowModuleSearchCriteria": { + "type": "structure", + "members": { + "OrConditions": { + "target": "com.amazonaws.connect#ContactFlowModuleSearchConditionList", + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an OR\n condition.

" + } + }, + "AndConditions": { + "target": "com.amazonaws.connect#ContactFlowModuleSearchConditionList", + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an AND\n condition.

" + } + }, + "StringCondition": { + "target": "com.amazonaws.connect#StringCondition" + } + }, + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return flow modules.

" + } + }, + "com.amazonaws.connect#ContactFlowModuleSearchFilter": { + "type": "structure", + "members": { + "TagFilter": { + "target": "com.amazonaws.connect#ControlPlaneTagFilter" + } + }, + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return flow modules.

" + } + }, + "com.amazonaws.connect#ContactFlowModuleSearchSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#ContactFlowModule" + } + }, "com.amazonaws.connect#ContactFlowModuleState": { "type": "enum", "members": { @@ -5263,6 +5578,70 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.connect#ContactFlowSearchConditionList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#ContactFlowSearchCriteria" + } + }, + "com.amazonaws.connect#ContactFlowSearchCriteria": { + "type": "structure", + "members": { + "OrConditions": { + "target": "com.amazonaws.connect#ContactFlowSearchConditionList", + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an OR\n condition.

" + } + }, + "AndConditions": { + "target": "com.amazonaws.connect#ContactFlowSearchConditionList", + "traits": { + "smithy.api#documentation": "

A list of conditions which would be applied together with an AND\n condition.

" + } + }, + "StringCondition": { + "target": "com.amazonaws.connect#StringCondition" + }, + "TypeCondition": { + "target": "com.amazonaws.connect#ContactFlowType", + "traits": { + "smithy.api#documentation": "

The type of flow.

" + } + }, + "StateCondition": { + "target": "com.amazonaws.connect#ContactFlowState", + "traits": { + "smithy.api#documentation": "

The state of the flow.

" + } + }, + "StatusCondition": { + "target": "com.amazonaws.connect#ContactFlowStatus", + "traits": { + "smithy.api#documentation": "

The status of the flow.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return contact flows.

" + } + }, + "com.amazonaws.connect#ContactFlowSearchFilter": { + "type": "structure", + "members": { + "TagFilter": { + "target": "com.amazonaws.connect#ControlPlaneTagFilter" + } + }, + "traits": { + "smithy.api#documentation": "

Filters to be applied to search results.

" + } + }, + "com.amazonaws.connect#ContactFlowSearchSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#ContactFlow" + } + }, "com.amazonaws.connect#ContactFlowState": { "type": "enum", "members": { @@ -5280,6 +5659,23 @@ } } }, + "com.amazonaws.connect#ContactFlowStatus": { + "type": "enum", + "members": { + "PUBLISHED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PUBLISHED" + } + }, + "SAVED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SAVED" + } + } + } + }, "com.amazonaws.connect#ContactFlowSummary": { "type": "structure", "members": { @@ -5312,6 +5708,12 @@ "traits": { "smithy.api#documentation": "

The type of flow.

" } + }, + "ContactFlowStatus": { + "target": "com.amazonaws.connect#ContactFlowStatus", + "traits": { + "smithy.api#documentation": "

The status of the contact flow.

" + } } }, "traits": { @@ -6128,6 +6530,12 @@ "smithy.api#required": {} } }, + "Status": { + "target": "com.amazonaws.connect#ContactFlowStatus", + "traits": { + "smithy.api#documentation": "

Indicates the flow status as either SAVED or PUBLISHED. The\n PUBLISHED status will initiate validation on the content. the SAVED\n status does not initiate validation of the content. SAVED |\n PUBLISHED.

" + } + }, "Tags": { "target": "com.amazonaws.connect#TagMap", "traits": { @@ -6876,7 +7284,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a prompt. For more information about prompts, such as supported file types and\n maximum length, see Create prompts in the Amazon Connect Administrator's\n Guide.

", + "smithy.api#documentation": "

Creates a prompt. For more information about prompts, such as supported file types and\n maximum length, see Create prompts in the Amazon Connect Administrator\n Guide.

", "smithy.api#http": { "method": "PUT", "uri": "/prompts/{InstanceId}", @@ -8692,6 +9100,57 @@ "target": "com.amazonaws.connect#CurrentMetric" } }, + "com.amazonaws.connect#Customer": { + "type": "structure", + "members": { + "DeviceInfo": { + "target": "com.amazonaws.connect#DeviceInfo", + "traits": { + "smithy.api#documentation": "

Information regarding Customer’s device.

" + } + }, + "Capabilities": { + "target": "com.amazonaws.connect#ParticipantCapabilities" + } + }, + "traits": { + "smithy.api#documentation": "

Information about the Customer on the contact.

" + } + }, + "com.amazonaws.connect#CustomerQualityMetrics": { + "type": "structure", + "members": { + "Audio": { + "target": "com.amazonaws.connect#AudioQualityMetricsInfo", + "traits": { + "smithy.api#documentation": "

Information about the audio quality of the Customer

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the quality of the Customer's media connection

" + } + }, + "com.amazonaws.connect#CustomerVoiceActivity": { + "type": "structure", + "members": { + "GreetingStartTimestamp": { + "target": "com.amazonaws.connect#Timestamp", + "traits": { + "smithy.api#documentation": "

Timestamp that measures the beginning of the customer greeting from an outbound voice call.

" + } + }, + "GreetingEndTimestamp": { + "target": "com.amazonaws.connect#Timestamp", + "traits": { + "smithy.api#documentation": "

Timestamp that measures the end of the customer greeting from an outbound voice call.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about customer’s voice activity.

" + } + }, "com.amazonaws.connect#DataSetId": { "type": "string", "traits": { @@ -10548,7 +11007,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the specified flow.

\n

You can also create and update flows using the Amazon Connect\n Flow language.

", + "smithy.api#documentation": "

Describes the specified flow.

\n

You can also create and update flows using the Amazon Connect\n Flow language.

\n

Use the $SAVED alias in the request to describe the SAVED content\n of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. Once a contact flow is\n published, $SAVED needs to be supplied to view saved content that has not been\n published.

\n

In the response, Status indicates the flow status as either\n SAVED or PUBLISHED. The PUBLISHED status will initiate\n validation on the content. SAVED does not initiate validation of the content.\n SAVED | PUBLISHED\n

", "smithy.api#http": { "method": "GET", "uri": "/contact-flows/{InstanceId}/{ContactFlowId}", @@ -10585,7 +11044,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the specified flow module.

", + "smithy.api#documentation": "

Describes the specified flow module.

\n

Use the $SAVED alias in the request to describe the SAVED content\n of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. Once a contact flow is\n published, $SAVED needs to be supplied to view saved content that has not been\n published.

", "smithy.api#http": { "method": "GET", "uri": "/contact-flow-modules/{InstanceId}/{ContactFlowModuleId}", @@ -12105,6 +12564,32 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.connect#DeviceInfo": { + "type": "structure", + "members": { + "PlatformName": { + "target": "com.amazonaws.connect#PlatformName", + "traits": { + "smithy.api#documentation": "

Name of the platform that the participant used for the call.

" + } + }, + "PlatformVersion": { + "target": "com.amazonaws.connect#PlatformVersion", + "traits": { + "smithy.api#documentation": "

Version of the platform that the participant used for the call.

" + } + }, + "OperatingSystem": { + "target": "com.amazonaws.connect#OperatingSystem", + "traits": { + "smithy.api#documentation": "

Operating system that the participant used for the call.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information regarding the device.

" + } + }, "com.amazonaws.connect#Dimensions": { "type": "structure", "members": { @@ -13027,6 +13512,20 @@ "smithy.api#input": {} } }, + "com.amazonaws.connect#DisconnectDetails": { + "type": "structure", + "members": { + "PotentialDisconnectIssue": { + "target": "com.amazonaws.connect#PotentialDisconnectIssue", + "traits": { + "smithy.api#documentation": "

Indicates the potential disconnection issues for a call. This field is not populated if the service does not detect potential issues.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the call disconnect experience.

" + } + }, "com.amazonaws.connect#DisconnectReason": { "type": "structure", "members": { @@ -13193,6 +13692,9 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.connect#DurationInSeconds": { + "type": "integer" + }, "com.amazonaws.connect#Email": { "type": "string", "traits": { @@ -14748,6 +15250,58 @@ } } }, + "com.amazonaws.connect#Expiry": { + "type": "structure", + "members": { + "DurationInSeconds": { + "target": "com.amazonaws.connect#DurationInSeconds", + "traits": { + "smithy.api#documentation": "

The number of seconds to wait before expiring the routing step.

" + } + }, + "ExpiryTimestamp": { + "target": "com.amazonaws.connect#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp indicating when the routing step expires.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An object to specify the expiration of a routing step.

" + } + }, + "com.amazonaws.connect#Expression": { + "type": "structure", + "members": { + "AttributeCondition": { + "target": "com.amazonaws.connect#AttributeCondition", + "traits": { + "smithy.api#documentation": "

An object to specify the predefined attribute condition.

" + } + }, + "AndExpression": { + "target": "com.amazonaws.connect#Expressions", + "traits": { + "smithy.api#documentation": "

List of routing expressions which will be AND-ed together.

" + } + }, + "OrExpression": { + "target": "com.amazonaws.connect#Expressions", + "traits": { + "smithy.api#documentation": "

List of routing expressions which will be OR-ed together.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A tagged union to specify expression for a routing step.

" + } + }, + "com.amazonaws.connect#Expressions": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#Expression" + } + }, "com.amazonaws.connect#FailedRequest": { "type": "structure", "members": { @@ -15192,7 +15746,7 @@ "UrlExpiryInSeconds": { "target": "com.amazonaws.connect#URLExpiryInSeconds", "traits": { - "smithy.api#documentation": "

Optional override for the expiry of the pre-signed S3 URL in seconds.

", + "smithy.api#documentation": "

Optional override for the expiry of the pre-signed S3 URL in seconds. The default value is\n 300.

", "smithy.api#httpQuery": "urlExpiryInSeconds" } }, @@ -15253,7 +15807,7 @@ "AssociatedResourceArn": { "target": "com.amazonaws.connect#ARN", "traits": { - "smithy.api#documentation": "

The resource to which the attached file is (being) uploaded to. Cases are the only\n current supported resource.

\n \n

This value must be a valid ARN.

\n
" + "smithy.api#documentation": "

The resource to which the attached file is (being) uploaded to. Cases are the only\n current supported resource.

" } }, "FileUseCaseType": { @@ -15904,7 +16458,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets metric data from the specified Amazon Connect instance.

\n

\n GetMetricDataV2 offers more features than GetMetricData, the previous\n version of this API. It has new metrics, offers filtering at a metric level, and offers the\n ability to filter and group data by channels, queues, routing profiles, agents, and agent\n hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals.

\n

For a description of the historical metrics that are supported by\n GetMetricDataV2 and GetMetricData, see Historical metrics\n definitions in the Amazon Connect Administrator's Guide.

", + "smithy.api#documentation": "

Gets metric data from the specified Amazon Connect instance.

\n

\n GetMetricDataV2 offers more features than GetMetricData, the previous\n version of this API. It has new metrics, offers filtering at a metric level, and offers the\n ability to filter and group data by channels, queues, routing profiles, agents, and agent\n hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals.

\n

For a description of the historical metrics that are supported by\n GetMetricDataV2 and GetMetricData, see Historical metrics\n definitions in the Amazon Connect Administrator Guide.

", "smithy.api#http": { "method": "POST", "uri": "/metrics/data", @@ -15950,20 +16504,20 @@ "Filters": { "target": "com.amazonaws.connect#FiltersV2List", "traits": { - "smithy.api#documentation": "

The filters to apply to returned metrics. You can filter on the following resources:

\n
    \n
  • \n

    Queues

    \n
  • \n
  • \n

    Routing profiles

    \n
  • \n
  • \n

    Agents

    \n
  • \n
  • \n

    Channels

    \n
  • \n
  • \n

    User hierarchy groups

    \n
  • \n
  • \n

    Feature

    \n
  • \n
  • \n

    Routing step expression

    \n
  • \n
\n

At least one filter must be passed from queues, routing profiles, agents, or user hierarchy\n groups.

\n

To filter by phone number, see Create a historical\n metrics report in the Amazon Connect Administrator's\n Guide.

\n

Note the following limits:

\n
    \n
  • \n

    \n Filter keys: A maximum of 5 filter keys are supported in\n a single request. Valid filter keys: QUEUE | ROUTING_PROFILE |\n AGENT | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE |\n AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE |\n AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE |\n FEATURE | CASE_TEMPLATE_ARN | CASE_STATUS |\n contact/segmentAttributes/connect:Subtype |\n ROUTING_STEP_EXPRESSION\n

    \n
  • \n
  • \n

    \n Filter values: A maximum of 100 filter values are\n supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the\n CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a\n GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total\n of 100 filter values, along with 3 channel filters.

    \n

    \n contact_lens_conversational_analytics is a valid filterValue for the\n FEATURE filter key. It is available only to contacts analyzed by Contact Lens\n conversational analytics.

    \n

    \n connect:Chat, connect:SMS, connect:Telephony, and\n connect:WebRTC are valid filterValue examples (not exhaustive) for\n the contact/segmentAttributes/connect:Subtype filter key.

    \n

    \n ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000\n length. This filter is case and order sensitive. JSON string fields must be sorted in ascending\n order and JSON array order should be kept as is.

    \n
  • \n
", + "smithy.api#documentation": "

The filters to apply to returned metrics. You can filter on the following resources:

\n
    \n
  • \n

    Agents

    \n
  • \n
  • \n

    Channels

    \n
  • \n
  • \n

    Feature

    \n
  • \n
  • \n

    Queues

    \n
  • \n
  • \n

    Routing profiles

    \n
  • \n
  • \n

    Routing step expression

    \n
  • \n
  • \n

    User hierarchy groups

    \n
  • \n
\n

At least one filter must be passed from queues, routing profiles, agents, or user hierarchy\n groups.

\n

To filter by phone number, see Create a historical\n metrics report in the Amazon Connect Administrator\n Guide.

\n

Note the following limits:

\n
    \n
  • \n

    \n Filter keys: A maximum of 5 filter keys are supported in\n a single request. Valid filter keys: AGENT |\n AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO |\n AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR |\n AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN |\n CASE_STATUS | CHANNEL |\n contact/segmentAttributes/connect:Subtype | FEATURE |\n FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID |\n FLOWS_NEXT_RESOURCE_QUEUE_ID | FLOWS_OUTCOME_TYPE |\n FLOWS_RESOURCE_ID | INITIATION_METHOD |\n RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE |\n ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED |\n

    \n
  • \n
  • \n

    \n Filter values: A maximum of 100 filter values are\n supported in a single request. VOICE, CHAT, and TASK are valid filterValue for the\n CHANNEL filter key. They do not count towards limitation of 100 filter values. For example, a\n GetMetricDataV2 request can filter by 50 queues, 35 agents, and 15 routing profiles for a total\n of 100 filter values, along with 3 channel filters.

    \n

    \n contact_lens_conversational_analytics is a valid filterValue for the\n FEATURE filter key. It is available only to contacts analyzed by Contact Lens\n conversational analytics.

    \n

    \n connect:Chat, connect:SMS, connect:Telephony, and\n connect:WebRTC are valid filterValue examples (not exhaustive) for\n the contact/segmentAttributes/connect:Subtype filter key.

    \n

    \n ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000\n length. This filter is case and order sensitive. JSON string fields must be sorted in ascending\n order and JSON array order should be kept as is.

    \n

    \n Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the\n Q_CONNECT_ENABLED filter key.

    \n
      \n
    • \n

      TRUE includes all contacts that had Amazon Q in Connect enabled as part of the flow.

      \n
    • \n
    • \n

      FALSE includes all contacts that did not have Amazon Q in Connect enabled as part of the flow

      \n
    • \n
    \n

    This filter is available only for contact record-driven metrics.

    \n
  • \n
", "smithy.api#required": {} } }, "Groupings": { "target": "com.amazonaws.connect#GroupingsV2", "traits": { - "smithy.api#documentation": "

The grouping applied to the metrics that are returned. For example, when results are grouped\n by queue, the metrics returned are grouped by queue. The values that are returned apply to the\n metrics for each queue. They are not aggregated for all queues.

\n

If no grouping is specified, a summary of all metrics is returned.

\n

Valid grouping keys: QUEUE | ROUTING_PROFILE | AGENT\n | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE |\n AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE |\n AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE |\n CASE_TEMPLATE_ARN | CASE_STATUS |\n contact/segmentAttributes/connect:Subtype |\n ROUTING_STEP_EXPRESSION\n

" + "smithy.api#documentation": "

The grouping applied to the metrics that are returned. For example, when results are grouped\n by queue, the metrics returned are grouped by queue. The values that are returned apply to the\n metrics for each queue. They are not aggregated for all queues.

\n

If no grouping is specified, a summary of all metrics is returned.

\n

Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE |\n AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE |\n AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE |\n CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL |\n contact/segmentAttributes/connect:Subtype | FLOWS_RESOURCE_ID |\n FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE\n | INITIATION_METHOD | Q_CONNECT_ENABLED | QUEUE |\n RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE |\n ROUTING_STEP_EXPRESSION\n

" } }, "Metrics": { "target": "com.amazonaws.connect#MetricsV2", "traits": { - "smithy.api#documentation": "

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The\n following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator's\n Guide.

\n
\n
ABANDONMENT_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Abandonment rate\n

\n
\n
AGENT_ADHERENT_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherent time\n

\n
\n
AGENT_ANSWER_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent answer rate\n

\n
\n
AGENT_NON_ADHERENT_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-adherent time\n

\n
\n
AGENT_NON_RESPONSE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent\n non-response\n

\n
\n
AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

\n

UI name: Agent non-response without customer abandons\n

\n
\n
AGENT_OCCUPANCY
\n
\n

Unit: Percentage

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Occupancy\n

\n
\n
AGENT_SCHEDULE_ADHERENCE
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherence\n

\n
\n
AGENT_SCHEDULED_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Scheduled time\n

\n
\n
AVG_ABANDON_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average queue abandon time\n

\n
\n
AVG_ACTIVE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Average active time\n

\n
\n
AVG_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average after contact work time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_AGENT_CONNECTING_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. For now, this metric only\n supports the following as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Average agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
AVG_AGENT_PAUSE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Average agent pause time\n

\n
\n
AVG_CASE_RELATED_CONTACTS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average contacts per case\n

\n
\n
AVG_CASE_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average case resolution time\n

\n
\n
AVG_CONTACT_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average contact duration\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_CONVERSATION_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average conversation duration\n

\n
\n
AVG_GREETING_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average agent greeting time\n

\n
\n
AVG_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression

\n

UI name: Average handle time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average customer hold time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME_ALL_CONTACTS
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average customer hold time all contacts\n

\n
\n
AVG_HOLDS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average holds\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average agent interaction and customer hold time\n

\n
\n
AVG_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average agent interaction time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERRUPTIONS_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average agent interruptions\n

\n
\n
AVG_INTERRUPTION_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average agent interruption time\n

\n
\n
AVG_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average non-talk time\n

\n
\n
AVG_QUEUE_ANSWER_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average queue answer time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average resolution time\n

\n
\n
AVG_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average talk time\n

\n
\n
AVG_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average agent talk time\n

\n
\n
AVG_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Average customer talk time\n

\n
\n
CASES_CREATED
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases created\n

\n
\n
CONTACTS_ABANDONED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression

\n

UI name: Contact abandoned\n

\n
\n
CONTACTS_CREATED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts created\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression

\n

UI name: API contacts handled\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts handled (connected to agent timestamp)\n

\n
\n
CONTACTS_HOLD_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts hold disconnect\n

\n
\n
CONTACTS_ON_HOLD_AGENT_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Contacts hold agent disconnect\n

\n
\n
CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Contacts hold customer disconnect\n

\n
\n
CONTACTS_PUT_ON_HOLD
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Contacts put on hold\n

\n
\n
CONTACTS_TRANSFERRED_OUT_EXTERNAL
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Contacts transferred out external\n

\n
\n
CONTACTS_TRANSFERRED_OUT_INTERNAL
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Contacts transferred out internal\n

\n
\n
CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts queued\n

\n
\n
CONTACTS_QUEUED_BY_ENQUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts queued (enqueue timestamp)\n

\n
\n
CONTACTS_RESOLVED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile,\n contact/segmentAttributes/connect:Subtype

\n

Threshold: For ThresholdValue enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts resolved in X\n

\n
\n
CONTACTS_TRANSFERRED_OUT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts transferred out\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_TRANSFERRED_OUT_BY_AGENT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts transferred out by agent\n

\n
\n
CONTACTS_TRANSFERRED_OUT_FROM_QUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts transferred out queue\n

\n
\n
CURRENT_CASES
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Current cases\n

\n
\n
MAX_QUEUED_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Maximum queued time\n

\n
\n
PERCENT_CASES_FIRST_CONTACT_RESOLVED
\n
\n

Unit: Percent

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved on first contact\n

\n
\n
PERCENT_CONTACTS_STEP_EXPIRED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: Not available

\n
\n
PERCENT_CONTACTS_STEP_JOINED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: Not available

\n
\n
PERCENT_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Non-talk\n time percent\n

\n
\n
PERCENT_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Talk time\n percent\n

\n
\n
PERCENT_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Agent\n talk time percent\n

\n
\n
PERCENT_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Customer talk time percent\n

\n
\n
REOPENED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases reopened\n

\n
\n
RESOLVED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved\n

\n
\n
SERVICE_LEVEL
\n
\n

You can include up to 20 SERVICE_LEVEL metrics in a request.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Service level X\n

\n
\n
STEP_CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: Not available

\n
\n
SUM_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: After\n contact work time\n

\n
\n
SUM_CONNECTING_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. This metric only supports the\n following filter keys as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
SUM_CONTACT_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Contact flow time\n

\n
\n
SUM_CONTACT_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent on contact time\n

\n
\n
SUM_CONTACTS_ANSWERED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile,\n contact/segmentAttributes/connect:Subtype

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts answered in X seconds\n

\n
\n
SUM_CONTACTS_ABANDONED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile,\n contact/segmentAttributes/connect:Subtype

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts abandoned in X seconds\n

\n
\n
SUM_CONTACTS_DISCONNECTED
\n
\n

Valid metric filter key: DISCONNECT_REASON\n

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contact disconnected\n

\n
\n
SUM_ERROR_STATUS_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Error status time\n

\n
\n
SUM_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Contact handle time\n

\n
\n
SUM_HOLD_TIME
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Customer hold time\n

\n
\n
SUM_IDLE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent idle time\n

\n
\n
SUM_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent interaction and hold time\n

\n
\n
SUM_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent interaction time\n

\n
\n
SUM_NON_PRODUCTIVE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-Productive Time\n

\n
\n
SUM_ONLINE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Online time\n

\n
\n
SUM_RETRY_CALLBACK_ATTEMPTS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Callback attempts\n

\n
\n
", + "smithy.api#documentation": "

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The\n following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator\n Guide.

\n
\n
ABANDONMENT_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Abandonment rate\n

\n
\n
AGENT_ADHERENT_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherent time\n

\n
\n
AGENT_ANSWER_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent answer rate\n

\n
\n
AGENT_NON_ADHERENT_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-adherent time\n

\n
\n
AGENT_NON_RESPONSE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent\n non-response\n

\n
\n
AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

\n

UI name: Agent non-response without customer abandons\n

\n
\n
AGENT_OCCUPANCY
\n
\n

Unit: Percentage

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Occupancy\n

\n
\n
AGENT_SCHEDULE_ADHERENCE
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherence\n

\n
\n
AGENT_SCHEDULED_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Scheduled time\n

\n
\n
AVG_ABANDON_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue abandon time\n

\n
\n
AVG_ACTIVE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average active time\n

\n
\n
AVG_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average after contact work time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_AGENT_CONNECTING_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. For now, this metric only\n supports the following as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Average agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
AVG_AGENT_PAUSE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average agent pause time\n

\n
\n
AVG_CASE_RELATED_CONTACTS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average contacts per case\n

\n
\n
AVG_CASE_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average case resolution time\n

\n
\n
AVG_CONTACT_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average contact duration\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_CONVERSATION_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average conversation duration\n

\n
\n
AVG_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Average flow time\n

\n
\n
AVG_GREETING_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent greeting time\n

\n
\n
AVG_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression

\n

UI name: Average handle time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME_ALL_CONTACTS
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time all contacts\n

\n
\n
AVG_HOLDS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average holds\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction and customer hold time\n

\n
\n
AVG_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERRUPTIONS_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruptions\n

\n
\n
AVG_INTERRUPTION_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruption time\n

\n
\n
AVG_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average non-talk time\n

\n
\n
AVG_QUEUE_ANSWER_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue answer time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Average resolution time\n

\n
\n
AVG_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average talk time\n

\n
\n
AVG_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent talk time\n

\n
\n
AVG_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer talk time\n

\n
\n
CASES_CREATED
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases created\n

\n
\n
CONTACTS_ABANDONED
\n
\n

Unit: Count

\n

Metric filter:

\n
    \n
  • \n

    Valid values: API| Incoming | Outbound |\n Transfer | Callback | Queue_Transfer|\n Disconnect\n

    \n
  • \n
\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: Contact abandoned\n

\n
\n
CONTACTS_ABANDONED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts abandoned in X seconds\n

\n
\n
CONTACTS_ANSWERED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts answered in X seconds\n

\n
\n
CONTACTS_CREATED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts created\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: API contacts handled\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts handled (connected to agent timestamp)\n

\n
\n
CONTACTS_HOLD_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts hold disconnect\n

\n
\n
CONTACTS_ON_HOLD_AGENT_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold agent disconnect\n

\n
\n
CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold customer disconnect\n

\n
\n
CONTACTS_PUT_ON_HOLD
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts put on hold\n

\n
\n
CONTACTS_TRANSFERRED_OUT_EXTERNAL
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out external\n

\n
\n
CONTACTS_TRANSFERRED_OUT_INTERNAL
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out internal\n

\n
\n
CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts queued\n

\n
\n
CONTACTS_QUEUED_BY_ENQUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts queued (enqueue timestamp)\n

\n
\n
CONTACTS_RESOLVED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts resolved in X\n

\n
\n
CONTACTS_TRANSFERRED_OUT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_TRANSFERRED_OUT_BY_AGENT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out by agent\n

\n
\n
CONTACTS_TRANSFERRED_OUT_FROM_QUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out queue\n

\n
\n
CURRENT_CASES
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Current cases\n

\n
\n
FLOWS_OUTCOME
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome\n

\n
\n
FLOWS_STARTED
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows started\n

\n
\n
MAX_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Maximum flow time\n

\n
\n
MAX_QUEUED_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Maximum queued time\n

\n
\n
MIN_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Minimum flow time\n

\n
\n
PERCENT_CASES_FIRST_CONTACT_RESOLVED
\n
\n

Unit: Percent

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved on first contact\n

\n
\n
PERCENT_CONTACTS_STEP_EXPIRED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: Not available

\n
\n
PERCENT_CONTACTS_STEP_JOINED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: Not available

\n
\n
PERCENT_FLOWS_OUTCOME
\n
\n

Unit: Percent

\n

Valid metric filter key: FLOWS_OUTCOME_TYPE\n

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome percentage.

\n \n

The FLOWS_OUTCOME_TYPE is not a valid grouping.

\n
\n
\n
PERCENT_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Non-talk\n time percent\n

\n
\n
PERCENT_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Talk time\n percent\n

\n
\n
PERCENT_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Agent\n talk time percent\n

\n
\n
PERCENT_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Customer talk time percent\n

\n
\n
REOPENED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases reopened\n

\n
\n
RESOLVED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved\n

\n
\n
SERVICE_LEVEL
\n
\n

You can include up to 20 SERVICE_LEVEL metrics in a request.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Service level X\n

\n
\n
STEP_CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: Not available

\n
\n
SUM_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: After\n contact work time\n

\n
\n
SUM_CONNECTING_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. This metric only supports the\n following filter keys as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
SUM_CONTACT_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact flow time\n

\n
\n
SUM_CONTACT_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent on contact time\n

\n
\n
SUM_CONTACTS_DISCONNECTED
\n
\n

Valid metric filter key: DISCONNECT_REASON\n

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contact disconnected\n

\n
\n
SUM_ERROR_STATUS_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Error status time\n

\n
\n
SUM_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact handle time\n

\n
\n
SUM_HOLD_TIME
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Customer hold time\n

\n
\n
SUM_IDLE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent idle time\n

\n
\n
SUM_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Agent interaction and hold time\n

\n
\n
SUM_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent interaction time\n

\n
\n
SUM_NON_PRODUCTIVE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-Productive Time\n

\n
\n
SUM_ONLINE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Online time\n

\n
\n
SUM_RETRY_CALLBACK_ATTEMPTS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Callback attempts\n

\n
\n
", "smithy.api#required": {} } }, @@ -16561,6 +17115,44 @@ "smithy.api#documentation": "

Information about the hierarchy group.

" } }, + "com.amazonaws.connect#HierarchyGroups": { + "type": "structure", + "members": { + "Level1": { + "target": "com.amazonaws.connect#AgentHierarchyGroup", + "traits": { + "smithy.api#documentation": "

The group at level one of the agent hierarchy.

" + } + }, + "Level2": { + "target": "com.amazonaws.connect#AgentHierarchyGroup", + "traits": { + "smithy.api#documentation": "

The group at level two of the agent hierarchy.

" + } + }, + "Level3": { + "target": "com.amazonaws.connect#AgentHierarchyGroup", + "traits": { + "smithy.api#documentation": "

The group at level three of the agent hierarchy.

" + } + }, + "Level4": { + "target": "com.amazonaws.connect#AgentHierarchyGroup", + "traits": { + "smithy.api#documentation": "

The group at level four of the agent hierarchy.

" + } + }, + "Level5": { + "target": "com.amazonaws.connect#AgentHierarchyGroup", + "traits": { + "smithy.api#documentation": "

The group at level five of the agent hierarchy.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the agent hierarchy. Hierarchies can be configured with up to five levels.

" + } + }, "com.amazonaws.connect#HierarchyLevel": { "type": "structure", "members": { @@ -17454,6 +18046,9 @@ "com.amazonaws.connect#InboundCallsEnabled": { "type": "boolean" }, + "com.amazonaws.connect#Index": { + "type": "integer" + }, "com.amazonaws.connect#InitiationMethodList": { "type": "list", "member": { @@ -22987,7 +23582,7 @@ "MetricFilterKey": { "target": "com.amazonaws.connect#String", "traits": { - "smithy.api#documentation": "

The key to use for filtering data.

\n

Valid metric filter keys: INITIATION_METHOD, DISCONNECT_REASON.\n These are the same values as the InitiationMethod and DisconnectReason\n in the contact record. For more information, see ContactTraceRecord in the Amazon Connect Administrator's\n Guide.

" + "smithy.api#documentation": "

The key to use for filtering data.

\n

Valid metric filter keys: INITIATION_METHOD, DISCONNECT_REASON.\n These are the same values as the InitiationMethod and DisconnectReason\n in the contact record. For more information, see ContactTraceRecord in the Amazon Connect Administrator Guide.\n

" } }, "MetricFilterValues": { @@ -23454,6 +24049,15 @@ "smithy.api#documentation": "

Information about the property value used in automation of a numeric questions. Label values\n are associated with minimum and maximum values for the numeric question.

\n
    \n
  • \n

    Sentiment scores have a minimum value of -5 and maximum value of 5.

    \n
  • \n
  • \n

    Duration labels, such as NON_TALK_TIME, CONTACT_DURATION,\n AGENT_INTERACTION_DURATION, CUSTOMER_HOLD_TIME have a minimum value\n of 0 and maximum value of 28800.

    \n
  • \n
  • \n

    Percentages have a minimum value of 0 and maximum value of 100.

    \n
  • \n
  • \n

    \n NUMBER_OF_INTERRUPTIONS has a minimum value of 0 and maximum value of\n 1000.

    \n
  • \n
" } }, + "com.amazonaws.connect#OperatingSystem": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + } + } + }, "com.amazonaws.connect#Origin": { "type": "string", "traits": { @@ -25557,6 +26161,54 @@ } } }, + "com.amazonaws.connect#PlatformName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + } + } + }, + "com.amazonaws.connect#PlatformVersion": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + } + } + }, + "com.amazonaws.connect#PotentialAudioQualityIssue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + } + } + }, + "com.amazonaws.connect#PotentialAudioQualityIssues": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#PotentialAudioQualityIssue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 3 + } + } + }, + "com.amazonaws.connect#PotentialDisconnectIssue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + } + } + }, "com.amazonaws.connect#PredefinedAttribute": { "type": "structure", "members": { @@ -25762,6 +26414,15 @@ } } }, + "com.amazonaws.connect#ProficiencyValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, "com.amazonaws.connect#Prompt": { "type": "structure", "members": { @@ -26111,6 +26772,26 @@ "smithy.api#output": {} } }, + "com.amazonaws.connect#QualityMetrics": { + "type": "structure", + "members": { + "Agent": { + "target": "com.amazonaws.connect#AgentQualityMetrics", + "traits": { + "smithy.api#documentation": "

Information about the quality of Agent media connection.

" + } + }, + "Customer": { + "target": "com.amazonaws.connect#CustomerQualityMetrics", + "traits": { + "smithy.api#documentation": "

Information about the quality of Customer media connection.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the quality of the participant's media connection.

" + } + }, "com.amazonaws.connect#Queue": { "type": "structure", "members": { @@ -27621,7 +28302,7 @@ } ], "traits": { - "smithy.api#documentation": "

Releases a phone number previously claimed to an Amazon Connect instance or traffic distribution group. You\n can call this API only in the Amazon Web Services Region where the number was claimed.

\n \n

To release phone numbers from a traffic distribution group, use the ReleasePhoneNumber API, not the\n Amazon Connect admin website.

\n

After releasing a phone number, the phone number enters into a cooldown period of 30 days.\n It cannot be searched for or claimed again until the period has ended. If you accidentally\n release a phone number, contact Amazon Web Services Support.

\n
\n

If you plan to claim and release numbers frequently during a 30 day period,\n contact us for a service quota exception. Otherwise, it is possible you will be blocked from\n claiming and releasing any more numbers until 30 days past the oldest number\n released has expired.

\n

By default you can claim and release up to 200% of your maximum number of active\n phone numbers during any 30 day period. If you claim and release phone numbers using\n the UI or API during a rolling 30 day cycle that exceeds 200% of your phone number\n service level quota, you will be blocked from claiming any more numbers until 30\n days past the oldest number released has expired.

\n

For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 30\n day period you release 99, claim 99, and then release 99, you will have exceeded the\n 200% limit. At that point you are blocked from claiming any more numbers until you\n open an Amazon Web Services support ticket.

", + "smithy.api#documentation": "

Releases a phone number previously claimed to an Amazon Connect instance or traffic distribution group. You\n can call this API only in the Amazon Web Services Region where the number was claimed.

\n \n

To release phone numbers from a traffic distribution group, use the ReleasePhoneNumber API, not the\n Amazon Connect admin website.

\n

After releasing a phone number, the phone number enters into a cooldown period for up to\n 180 days. It cannot be searched for or claimed again until the period has ended. If you\n accidentally release a phone number, contact Amazon Web Services Support.

\n
\n

If you plan to claim and release numbers frequently,\n contact us for a service quota exception. Otherwise, it is possible you will be blocked from\n claiming and releasing any more numbers until up to 180 days past the oldest number\n released has expired.

\n

By default you can claim and release up to 200% of your maximum number of active\n phone numbers. If you claim and release phone numbers using\n the UI or API during a rolling 180 day cycle that exceeds 200% of your phone number\n service level quota, you will be blocked from claiming any more numbers until 180\n days past the oldest number released has expired.

\n

For example, if you already have 99 claimed numbers and a service level quota of 99 phone numbers, and in any 180\n day period you release 99, claim 99, and then release 99, you will have exceeded the\n 200% limit. At that point you are blocked from claiming any more numbers until you\n open an Amazon Web Services support ticket.

", "smithy.api#http": { "method": "DELETE", "uri": "/phone-number/{PhoneNumberId}", @@ -28079,6 +28760,61 @@ "smithy.api#output": {} } }, + "com.amazonaws.connect#RoutingCriteria": { + "type": "structure", + "members": { + "Steps": { + "target": "com.amazonaws.connect#Steps", + "traits": { + "smithy.api#documentation": "

List of routing steps. When Amazon Connect does not find an available agent meeting the requirements in a step for a given step duration, the routing criteria will move on to the next step sequentially until a join is completed with an agent. When all steps are exhausted, the contact will be offered to any agent in the queue.

" + } + }, + "ActivationTimestamp": { + "target": "com.amazonaws.connect#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp indicating when the routing criteria is set to active. A routing criteria is activated when contact is transferred to a queue. ActivationTimestamp will be set on routing criteria for contacts in agent queue even though Routing criteria is never activated for contacts in agent queue.

" + } + }, + "Index": { + "target": "com.amazonaws.connect#Index", + "traits": { + "smithy.api#documentation": "

Information about the index of the routing criteria.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Latest routing criteria on the contact.

" + } + }, + "com.amazonaws.connect#RoutingCriteriaStepStatus": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "INACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INACTIVE" + } + }, + "JOINED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "JOINED" + } + }, + "EXPIRED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXPIRED" + } + } + } + }, "com.amazonaws.connect#RoutingExpression": { "type": "string", "traits": { @@ -28187,6 +28923,12 @@ "smithy.api#default": false, "smithy.api#documentation": "

Whether this a default routing profile.

" } + }, + "AssociatedQueueIds": { + "target": "com.amazonaws.connect#AssociatedQueueIdList", + "traits": { + "smithy.api#documentation": "

The IDs of the associated queue.

" + } } }, "traits": { @@ -28391,7 +29133,7 @@ "StringCondition": { "target": "com.amazonaws.connect#StringCondition", "traits": { - "smithy.api#documentation": "

A leaf node condition which can be used to specify a string condition.

\n \n

The currently supported values for FieldName are name,\n description, and resourceID.

\n
" + "smithy.api#documentation": "

A leaf node condition which can be used to specify a string condition.

\n \n

The currently supported values for FieldName are\n associatedQueueIds, name, description, and resourceID.

\n
" } } }, @@ -28896,6 +29638,216 @@ "smithy.api#output": {} } }, + "com.amazonaws.connect#SearchContactFlowModules": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#SearchContactFlowModulesRequest" + }, + "output": { + "target": "com.amazonaws.connect#SearchContactFlowModulesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches the flow modules in an Amazon Connect instance, with optional filtering.

", + "smithy.api#http": { + "method": "POST", + "uri": "/search-contact-flow-modules", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "ContactFlowModules", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.connect#SearchContactFlowModulesRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instance ID in the\n Amazon Resource Name (ARN) of the instance.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken2500", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.connect#MaxResult100", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return per page.

" + } + }, + "SearchFilter": { + "target": "com.amazonaws.connect#ContactFlowModuleSearchFilter", + "traits": { + "smithy.api#documentation": "

Filters to be applied to search results.

" + } + }, + "SearchCriteria": { + "target": "com.amazonaws.connect#ContactFlowModuleSearchCriteria", + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return contact flow modules.

\n \n

The name and description fields support \"contains\" queries with a\n minimum of 2 characters and a maximum of 25 characters. Any queries with character lengths\n outside of this range will result in invalid results.

\n
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#SearchContactFlowModulesResponse": { + "type": "structure", + "members": { + "ContactFlowModules": { + "target": "com.amazonaws.connect#ContactFlowModuleSearchSummaryList", + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return contact flow modules.

" + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken2500", + "traits": { + "smithy.api#documentation": "

If there are additional results, this is the token for the next set of results.

" + } + }, + "ApproximateTotalCount": { + "target": "com.amazonaws.connect#ApproximateTotalCount", + "traits": { + "smithy.api#documentation": "

The total number of contact flows which matched your search query.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.connect#SearchContactFlows": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#SearchContactFlowsRequest" + }, + "output": { + "target": "com.amazonaws.connect#SearchContactFlowsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidParameterException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches the contact flows in an Amazon Connect instance, with optional\n filtering.

", + "smithy.api#http": { + "method": "POST", + "uri": "/search-contact-flows", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "ContactFlows", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.connect#SearchContactFlowsRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instance ID in the\n Amazon Resource Name (ARN) of the instance.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken2500", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.connect#MaxResult100", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return per page.

" + } + }, + "SearchFilter": { + "target": "com.amazonaws.connect#ContactFlowSearchFilter", + "traits": { + "smithy.api#documentation": "

Filters to be applied to search results.

" + } + }, + "SearchCriteria": { + "target": "com.amazonaws.connect#ContactFlowSearchCriteria", + "traits": { + "smithy.api#documentation": "

The search criteria to be used to return flows.

\n \n

The name and description fields support \"contains\" queries with a\n minimum of 2 characters and a maximum of 25 characters. Any queries with character lengths\n outside of this range will result in invalid results.

\n
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#SearchContactFlowsResponse": { + "type": "structure", + "members": { + "ContactFlows": { + "target": "com.amazonaws.connect#ContactFlowSearchSummaryList", + "traits": { + "smithy.api#documentation": "

Information about the contact flows.

" + } + }, + "NextToken": { + "target": "com.amazonaws.connect#NextToken2500", + "traits": { + "smithy.api#documentation": "

If there are additional results, this is the token for the next set of results.

" + } + }, + "ApproximateTotalCount": { + "target": "com.amazonaws.connect#ApproximateTotalCount", + "traits": { + "smithy.api#documentation": "

The total number of contact flows which matched your search query.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#SearchContacts": { "type": "operation", "input": { @@ -31089,7 +32041,7 @@ "UrlExpiryInSeconds": { "target": "com.amazonaws.connect#URLExpiryInSeconds", "traits": { - "smithy.api#documentation": "

Optional override for the expiry of the pre-signed S3 URL in seconds.

" + "smithy.api#documentation": "

Optional override for the expiry of the pre-signed S3 URL in seconds. The default value is\n 300.

" } }, "FileUseCaseType": { @@ -32008,6 +32960,38 @@ } } }, + "com.amazonaws.connect#Step": { + "type": "structure", + "members": { + "Expiry": { + "target": "com.amazonaws.connect#Expiry", + "traits": { + "smithy.api#documentation": "

An object to specify the expiration of a routing step.

" + } + }, + "Expression": { + "target": "com.amazonaws.connect#Expression", + "traits": { + "smithy.api#documentation": "

A tagged union to specify expression for a routing step.

" + } + }, + "Status": { + "target": "com.amazonaws.connect#RoutingCriteriaStepStatus", + "traits": { + "smithy.api#documentation": "

Represents status of the Routing step.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Step signifies the criteria to be used for routing to an agent

" + } + }, + "com.amazonaws.connect#Steps": { + "type": "list", + "member": { + "target": "com.amazonaws.connect#Step" + } + }, "com.amazonaws.connect#StopContact": { "type": "operation", "input": { @@ -32034,7 +33018,7 @@ } ], "traits": { - "smithy.api#documentation": "

Ends the specified contact. Use this API to stop queued callbacks. It does not work for\n voice contacts that use the following initiation methods:

\n
    \n
  • \n

    DISCONNECT

    \n
  • \n
  • \n

    TRANSFER

    \n
  • \n
  • \n

    QUEUE_TRANSFER

    \n
  • \n
\n

Chat and task contacts can be terminated in any state, regardless of initiation\n method.

", + "smithy.api#documentation": "

Ends the specified contact. Use this API to stop queued callbacks. It does not work for\n voice contacts that use the following initiation methods:

\n
    \n
  • \n

    DISCONNECT

    \n
  • \n
  • \n

    TRANSFER

    \n
  • \n
  • \n

    QUEUE_TRANSFER

    \n
  • \n
  • \n

    EXTERNAL_OUTBOUND

    \n
  • \n
  • \n

    MONITOR

    \n
  • \n
\n

Chat and task contacts can be terminated in any state, regardless of initiation\n method.

", "smithy.api#http": { "method": "POST", "uri": "/contact/stop", @@ -34296,7 +35280,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the specified flow.

\n

You can also create and update flows using the Amazon Connect\n Flow language.

", + "smithy.api#documentation": "

Updates the specified flow.

\n

You can also create and update flows using the Amazon Connect\n Flow language.

\n

Use the $SAVED alias in the request to describe the SAVED content\n of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. Once a contact flow is\n published, $SAVED needs to be supplied to view saved content that has not been\n published.

", "smithy.api#http": { "method": "POST", "uri": "/contact-flows/{InstanceId}/{ContactFlowId}/content", @@ -34457,7 +35441,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates specified flow module for the specified Amazon Connect instance.

", + "smithy.api#documentation": "

Updates specified flow module for the specified Amazon Connect instance.

\n

Use the $SAVED alias in the request to describe the SAVED content\n of a Flow. For example, arn:aws:.../contact-flow/{id}:$SAVED. Once a contact flow is\n published, $SAVED needs to be supplied to view saved content that has not been\n published.

", "smithy.api#http": { "method": "POST", "uri": "/contact-flow-modules/{InstanceId}/{ContactFlowModuleId}/content", @@ -34750,7 +35734,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Updates routing priority and age on the contact (QueuePriority and QueueTimeAdjustmentInSeconds).\n These properties can be used to change a customer's position in the queue. For example, you can\n move a contact to the back of the queue by setting a lower routing priority relative to other\n contacts in queue; or you can move a contact to the front of the queue by increasing the routing\n age which will make the contact look artificially older and therefore higher up in the\n first-in-first-out routing order. Note that adjusting the routing age of a contact affects only\n its position in queue, and not its actual queue wait time as reported through metrics. These\n properties can also be updated by using the Set routing priority / age flow\n block.

", + "smithy.api#documentation": "

Updates routing priority and age on the contact (QueuePriority and QueueTimeAdjustmentInSeconds).\n These properties can be used to change a customer's position in the queue. For example, you can\n move a contact to the back of the queue by setting a lower routing priority relative to other\n contacts in queue; or you can move a contact to the front of the queue by increasing the routing\n age which will make the contact look artificially older and therefore higher up in the\n first-in-first-out routing order. Note that adjusting the routing age of a contact affects only\n its position in queue, and not its actual queue wait time as reported through metrics. These\n properties can also be updated by using the Set routing priority / age flow\n block.

\n \n

Either QueuePriority or QueueTimeAdjustmentInSeconds should be provided within the request body, but not\n both.

\n
", "smithy.api#http": { "method": "POST", "uri": "/contacts/{InstanceId}/{ContactId}/routing-data", diff --git a/models/controltower.json b/models/controltower.json index 0f16f24339..996779814b 100644 --- a/models/controltower.json +++ b/models/controltower.json @@ -7,41 +7,32 @@ "operations": [ { "target": "com.amazonaws.controltower#DisableControl" - }, - { - "target": "com.amazonaws.controltower#EnableControl" - }, - { - "target": "com.amazonaws.controltower#GetControlOperation" - }, + } + ], + "resources": [ { - "target": "com.amazonaws.controltower#GetEnabledControl" + "target": "com.amazonaws.controltower#BaselineOperationResource" }, { - "target": "com.amazonaws.controltower#ListEnabledControls" + "target": "com.amazonaws.controltower#BaselineResource" }, { - "target": "com.amazonaws.controltower#ListTagsForResource" + "target": "com.amazonaws.controltower#ControlOperationResource" }, { - "target": "com.amazonaws.controltower#TagResource" + "target": "com.amazonaws.controltower#EnabledBaselineResource" }, { - "target": "com.amazonaws.controltower#UntagResource" + "target": "com.amazonaws.controltower#EnabledControlResource" }, { - "target": "com.amazonaws.controltower#UpdateEnabledControl" - } - ], - "resources": [ - { - "target": "com.amazonaws.controltower#BaselineResource" + "target": "com.amazonaws.controltower#LandingZoneOperationResource" }, { - "target": "com.amazonaws.controltower#EnabledBaselineResource" + "target": "com.amazonaws.controltower#LandingZoneResource" }, { - "target": "com.amazonaws.controltower#LandingZoneResource" + "target": "com.amazonaws.controltower#TaggingResource" } ], "traits": { @@ -76,7 +67,7 @@ "x-amzn-trace-id" ] }, - "smithy.api#documentation": "

These interfaces allow you to apply the Amazon Web Services library of pre-defined\n controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms \"control\" and \"guardrail\" are synonyms.

\n

To call these APIs, you'll need to know:

\n
    \n
  • \n

    the controlIdentifier for the control--or guardrail--you are targeting.

    \n
  • \n
  • \n

    the ARN associated with the target organizational unit (OU), which we call the targetIdentifier.

    \n
  • \n
  • \n

    the ARN associated with a resource that you wish to tag or untag.

    \n
  • \n
\n

\n To get the controlIdentifier for your Amazon Web Services Control Tower\n control:\n

\n

The controlIdentifier is an ARN that is specified for each\n control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation.

\n

The controlIdentifier is unique in each Amazon Web Services Region for each control. You can\n find the controlIdentifier for each Region and control in the Tables of control metadata in the Amazon Web Services Control Tower User Guide.\n

\n

A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and\n Elective controls is given in Resource identifiers for\n APIs and controls in the Controls reference guide section\n of the Amazon Web Services Control Tower User Guide. Remember that Mandatory controls\n cannot be added or removed.

\n \n

\n ARN format:\n arn:aws:controltower:{REGION}::control/{CONTROL_NAME}\n

\n

\n Example:\n

\n

\n arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED\n

\n
\n

\n To get the targetIdentifier:\n

\n

The targetIdentifier is the ARN for an OU.

\n

In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU.

\n \n

\n OU ARN format:\n

\n

\n arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}\n

\n
\n

\n Details and examples\n

\n \n

To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower\n

\n

\n Recording API Requests\n

\n

Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your\n Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by\n CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made\n the request and when, and so on. For more about Amazon Web Services Control Tower and its support for\n CloudTrail, see Logging Amazon Web Services Control Tower\n Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about\n CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User\n Guide.

", + "smithy.api#documentation": "

Amazon Web Services Control Tower offers application programming interface (API) operations that support programmatic interaction with these types of resources:

\n \n

For more information about these types of resources, see the \n Amazon Web Services Control Tower User Guide\n .

\n

\n About control APIs\n

\n

These interfaces allow you to apply the Amazon Web Services library of pre-defined\n controls to your organizational units, programmatically. In Amazon Web Services Control Tower, the terms \"control\" and \"guardrail\" are synonyms.

\n

To call these APIs, you'll need to know:

\n
    \n
  • \n

    the controlIdentifier for the control--or guardrail--you are targeting.

    \n
  • \n
  • \n

    the ARN associated with the target organizational unit (OU), which we call the targetIdentifier.

    \n
  • \n
  • \n

    the ARN associated with a resource that you wish to tag or untag.

    \n
  • \n
\n

\n To get the controlIdentifier for your Amazon Web Services Control Tower\n control:\n

\n

The controlIdentifier is an ARN that is specified for each\n control. You can view the controlIdentifier in the console on the Control details page, as well as in the documentation.

\n

The controlIdentifier is unique in each Amazon Web Services Region for each control. You can\n find the controlIdentifier for each Region and control in the Tables of control metadata or the Control availability by Region tables in the Amazon Web Services Control Tower Controls Reference Guide.

\n

A quick-reference list of control identifers for the Amazon Web Services Control Tower legacy Strongly recommended and\n Elective controls is given in Resource identifiers for\n APIs and controls in the \n Amazon Web Services Control Tower Controls Reference Guide\n . Remember that Mandatory controls cannot be added or removed.

\n \n

\n ARN format:\n arn:aws:controltower:{REGION}::control/{CONTROL_NAME}\n

\n

\n Example:\n

\n

\n arn:aws:controltower:us-west-2::control/AWS-GR_AUTOSCALING_LAUNCH_CONFIG_PUBLIC_IP_DISABLED\n

\n
\n

\n To get the targetIdentifier:\n

\n

The targetIdentifier is the ARN for an OU.

\n

In the Amazon Web Services Organizations console, you can find the ARN for the OU on the Organizational unit details page associated with that OU.

\n \n

\n OU ARN format:\n

\n

\n arn:${Partition}:organizations::${MasterAccountId}:ou/o-${OrganizationId}/ou-${OrganizationalUnitId}\n

\n
\n

\n About landing zone APIs\n

\n

You can configure and launch an Amazon Web Services Control Tower landing zone with APIs. For an introduction and steps, see Getting started with Amazon Web Services Control Tower using APIs.

\n

For an overview of landing zone API operations, see Amazon Web Services Control Tower supports landing zone APIs. The individual API operations for landing zones are detailed in this document, the API reference manual, in the \"Actions\" section.

\n

\n About baseline APIs\n

\n

You can apply the AWSControlTowerBaseline baseline to an organizational unit (OU) as a way to register the OU with Amazon Web Services Control Tower, programmatically. For a general overview of this capability, see Amazon Web Services Control Tower supports APIs for OU registration and configuration with baselines.

\n

You can call the baseline API operations to view the baselines that Amazon Web Services Control Tower enables for your landing zone, on your behalf, when setting up the landing zone. These baselines are read-only baselines.

\n

The individual API operations for baselines are detailed in this document, the API reference manual, in the \"Actions\" section. For usage examples, see Baseline API input and output examples with CLI.

\n

\n Details and examples\n

\n \n

To view the open source resource repository on GitHub, see aws-cloudformation/aws-cloudformation-resource-providers-controltower\n

\n

\n Recording API Requests\n

\n

Amazon Web Services Control Tower supports Amazon Web Services CloudTrail, a service that records Amazon Web Services API calls for your\n Amazon Web Services account and delivers log files to an Amazon S3 bucket. By using information collected by\n CloudTrail, you can determine which requests the Amazon Web Services Control Tower service received, who made\n the request and when, and so on. For more about Amazon Web Services Control Tower and its support for\n CloudTrail, see Logging Amazon Web Services Control Tower\n Actions with Amazon Web Services CloudTrail in the Amazon Web Services Control Tower User Guide. To learn more about\n CloudTrail, including how to turn it on and find your log files, see the Amazon Web Services CloudTrail User\n Guide.

", "smithy.api#title": "AWS Control Tower", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1005,6 +996,17 @@ "smithy.api#documentation": "

An object of shape BaselineOperation, returning details about the specified Baseline operation ID.

" } }, + "com.amazonaws.controltower#BaselineOperationResource": { + "type": "resource", + "identifiers": { + "operationIdentifier": { + "target": "com.amazonaws.controltower#OperationIdentifier" + } + }, + "read": { + "target": "com.amazonaws.controltower#GetBaselineOperation" + } + }, "com.amazonaws.controltower#BaselineOperationStatus": { "type": "enum", "members": { @@ -1141,6 +1143,18 @@ "smithy.api#pattern": "^arn:aws[0-9a-zA-Z_\\-:\\/]+$" } }, + "com.amazonaws.controltower#ControlIdentifiers": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#ControlIdentifier" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, "com.amazonaws.controltower#ControlOperation": { "type": "structure", "members": { @@ -1175,48 +1189,226 @@ "traits": { "smithy.api#documentation": "

If the operation result is FAILED, this string contains a message explaining\n why the operation failed.

" } + }, + "operationIdentifier": { + "target": "com.amazonaws.controltower#OperationIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the specified operation.

" + } + }, + "controlIdentifier": { + "target": "com.amazonaws.controltower#ControlIdentifier", + "traits": { + "smithy.api#documentation": "

The controlIdentifier of the control for the operation.

" + } + }, + "targetIdentifier": { + "target": "com.amazonaws.controltower#TargetIdentifier", + "traits": { + "smithy.api#documentation": "

The target upon which the control operation is working.

" + } + }, + "enabledControlIdentifier": { + "target": "com.amazonaws.controltower#Arn", + "traits": { + "smithy.api#documentation": "

The controlIdentifier of the enabled control.

" + } } }, "traits": { "smithy.api#documentation": "

An operation performed by the control.

" } }, + "com.amazonaws.controltower#ControlOperationFilter": { + "type": "structure", + "members": { + "controlIdentifiers": { + "target": "com.amazonaws.controltower#ControlIdentifiers", + "traits": { + "smithy.api#documentation": "

The set of controlIdentifier returned by the filter.

" + } + }, + "targetIdentifiers": { + "target": "com.amazonaws.controltower#TargetIdentifiers", + "traits": { + "smithy.api#documentation": "

The set of targetIdentifier objects returned by the filter.

" + } + }, + "enabledControlIdentifiers": { + "target": "com.amazonaws.controltower#EnabledControlIdentifiers", + "traits": { + "smithy.api#documentation": "

The set controlIdentifier of enabled controls selected by the filter.

" + } + }, + "statuses": { + "target": "com.amazonaws.controltower#ControlOperationStatuses", + "traits": { + "smithy.api#documentation": "

Lists the status of control operations.

" + } + }, + "controlOperationTypes": { + "target": "com.amazonaws.controltower#ControlOperationTypes", + "traits": { + "smithy.api#documentation": "

The set of ControlOperation objects returned by the filter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A filter object that lets you call ListControlOperations with a specific filter.

" + } + }, + "com.amazonaws.controltower#ControlOperationResource": { + "type": "resource", + "identifiers": { + "operationIdentifier": { + "target": "com.amazonaws.controltower#OperationIdentifier" + } + }, + "read": { + "target": "com.amazonaws.controltower#GetControlOperation" + }, + "list": { + "target": "com.amazonaws.controltower#ListControlOperations" + } + }, "com.amazonaws.controltower#ControlOperationStatus": { - "type": "string", + "type": "enum", + "members": { + "SUCCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + } + } + }, + "com.amazonaws.controltower#ControlOperationStatuses": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#ControlOperationStatus" + }, "traits": { - "smithy.api#enum": [ - { - "name": "SUCCEEDED", - "value": "SUCCEEDED" - }, - { - "name": "FAILED", - "value": "FAILED" - }, - { - "name": "IN_PROGRESS", - "value": "IN_PROGRESS" + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.controltower#ControlOperationSummary": { + "type": "structure", + "members": { + "operationType": { + "target": "com.amazonaws.controltower#ControlOperationType", + "traits": { + "smithy.api#documentation": "

The type of operation.

" + } + }, + "startTime": { + "target": "com.amazonaws.controltower#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which a control operation began.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "endTime": { + "target": "com.amazonaws.controltower#Timestamp", + "traits": { + "smithy.api#documentation": "

The time at which the control operation was completed.

", + "smithy.api#timestampFormat": "date-time" + } + }, + "status": { + "target": "com.amazonaws.controltower#ControlOperationStatus", + "traits": { + "smithy.api#documentation": "

The status of the specified control operation.

" + } + }, + "statusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A speficic message displayed as part of the control status.

" + } + }, + "operationIdentifier": { + "target": "com.amazonaws.controltower#OperationIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of a control operation.

" + } + }, + "controlIdentifier": { + "target": "com.amazonaws.controltower#ControlIdentifier", + "traits": { + "smithy.api#documentation": "

The controlIdentifier of a control.

" + } + }, + "targetIdentifier": { + "target": "com.amazonaws.controltower#TargetIdentifier", + "traits": { + "smithy.api#documentation": "

The unique identifier of the target of a control operation.

" } - ] + }, + "enabledControlIdentifier": { + "target": "com.amazonaws.controltower#Arn", + "traits": { + "smithy.api#documentation": "

The controlIdentifier of an enabled control.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A summary of information about the specified control operation.

" } }, "com.amazonaws.controltower#ControlOperationType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "name": "ENABLE_CONTROL", - "value": "ENABLE_CONTROL" - }, - { - "name": "DISABLE_CONTROL", - "value": "DISABLE_CONTROL" - }, - { - "name": "UPDATE_ENABLED_CONTROL", - "value": "UPDATE_ENABLED_CONTROL" + "type": "enum", + "members": { + "ENABLE_CONTROL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLE_CONTROL" + } + }, + "DISABLE_CONTROL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLE_CONTROL" + } + }, + "UPDATE_ENABLED_CONTROL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATE_ENABLED_CONTROL" } - ] + } + } + }, + "com.amazonaws.controltower#ControlOperationTypes": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#ControlOperationType" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.controltower#ControlOperations": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#ControlOperationSummary" } }, "com.amazonaws.controltower#CreateLandingZone": { @@ -1266,7 +1458,7 @@ "manifest": { "target": "com.amazonaws.controltower#Manifest", "traits": { - "smithy.api#documentation": "

The manifest.yaml file is a text file that describes your Amazon Web Services resources. For examples, review \n The manifest file.

", + "smithy.api#documentation": "

The manifest JSON file is a text file that describes your Amazon Web Services resources. For examples, review \n Launch your landing zone.

", "smithy.api#required": {} } }, @@ -1404,7 +1596,7 @@ } ], "traits": { - "smithy.api#documentation": "

Disable an EnabledBaseline resource on the specified Target. This API starts an asynchronous operation to remove all resources deployed as part of the baseline enablement. The resource will vary depending on the enabled baseline.

", + "smithy.api#documentation": "

Disable an EnabledBaseline resource on the specified Target. This API starts an asynchronous operation to remove all resources deployed as part of the baseline enablement. The resource will vary depending on the enabled baseline. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1475,7 +1667,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API call turns off a control. It starts an asynchronous operation that deletes AWS\n resources on the specified organizational unit and the accounts it contains. The resources\n will vary according to the control that you specify. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", + "smithy.api#documentation": "

This API call turns off a control. It starts an asynchronous operation that deletes Amazon Web Services\n resources on the specified organizational unit and the accounts it contains. The resources\n will vary according to the control that you specify. For usage examples, see the \n Controls Reference Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1500,6 +1692,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.controltower#DisableControlOutput": { @@ -1512,6 +1707,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.controltower#DriftStatus": { @@ -1557,6 +1755,18 @@ "smithy.api#documentation": "

The drift summary of the enabled control.

\n

Amazon Web Services Control Tower expects the enabled control\n configuration to include all supported and governed Regions. If the enabled control differs\n from the expected configuration, it is defined to be in a state of drift. You can repair this drift by resetting the enabled control.

" } }, + "com.amazonaws.controltower#DriftStatuses": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#DriftStatus" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, "com.amazonaws.controltower#EnableBaseline": { "type": "operation", "input": { @@ -1589,7 +1799,7 @@ } ], "traits": { - "smithy.api#documentation": "

Enable (apply) a Baseline to a Target. This API starts an asynchronous operation to deploy resources specified by the Baseline to the specified Target.

", + "smithy.api#documentation": "

Enable (apply) a Baseline to a Target. This API starts an asynchronous operation to deploy resources specified by the Baseline to the specified Target. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1692,7 +1902,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services\n resources on the specified organizational unit and the accounts it contains. The resources\n created will vary according to the control that you specify. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", + "smithy.api#documentation": "

This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services\n resources on the specified organizational unit and the accounts it contains. The resources\n created will vary according to the control that you specify. For usage examples, see the \n Controls Reference Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -1729,6 +1939,9 @@ "smithy.api#documentation": "

A list of input parameter values, which are specified to configure the control when you enable it.

" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.controltower#EnableControlOutput": { @@ -1747,6 +1960,9 @@ "smithy.api#documentation": "

The ARN of the EnabledControl resource.

" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.controltower#EnabledBaselineBaselineIdentifiers": { @@ -1914,11 +2130,6 @@ "target": "com.amazonaws.controltower#ResetEnabledBaseline" } ], - "collectionOperations": [ - { - "target": "com.amazonaws.controltower#GetBaselineOperation" - } - ], "traits": { "aws.api#arn": { "template": "enabledbaseline/{enabledBaselineIdentifier}" @@ -2034,35 +2245,73 @@ "smithy.api#documentation": "

Information about the enabled control.

" } }, - "com.amazonaws.controltower#EnabledControlParameter": { + "com.amazonaws.controltower#EnabledControlFilter": { "type": "structure", "members": { - "key": { - "target": "smithy.api#String", + "controlIdentifiers": { + "target": "com.amazonaws.controltower#ControlIdentifiers", "traits": { - "smithy.api#documentation": "

The key of a key/value pair.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The set of controlIdentifier returned by the filter.

" } }, - "value": { - "target": "smithy.api#Document", + "statuses": { + "target": "com.amazonaws.controltower#EnablementStatuses", "traits": { - "smithy.api#documentation": "

The value of a key/value pair.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A list of EnablementStatus items.

" + } + }, + "driftStatuses": { + "target": "com.amazonaws.controltower#DriftStatuses", + "traits": { + "smithy.api#documentation": "

A list of DriftStatus items.

" } } }, "traits": { - "smithy.api#documentation": "

A key/value pair, where Key is of type String and Value is of type Document.

" + "smithy.api#documentation": "

A structure that returns a set of control identifiers, the control status for each control in the set, and the drift status for each control in the set.

" } }, - "com.amazonaws.controltower#EnabledControlParameterSummaries": { + "com.amazonaws.controltower#EnabledControlIdentifiers": { "type": "list", "member": { - "target": "com.amazonaws.controltower#EnabledControlParameterSummary" + "target": "com.amazonaws.controltower#Arn" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } } }, - "com.amazonaws.controltower#EnabledControlParameterSummary": { + "com.amazonaws.controltower#EnabledControlParameter": { + "type": "structure", + "members": { + "key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The key of a key/value pair.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#Document", + "traits": { + "smithy.api#documentation": "

The value of a key/value pair.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A key/value pair, where Key is of type String and Value is of type Document.

" + } + }, + "com.amazonaws.controltower#EnabledControlParameterSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#EnabledControlParameterSummary" + } + }, + "com.amazonaws.controltower#EnabledControlParameterSummary": { "type": "structure", "members": { "key": { @@ -2090,6 +2339,26 @@ "target": "com.amazonaws.controltower#EnabledControlParameter" } }, + "com.amazonaws.controltower#EnabledControlResource": { + "type": "resource", + "identifiers": { + "enabledControlIdentifier": { + "target": "com.amazonaws.controltower#Arn" + } + }, + "create": { + "target": "com.amazonaws.controltower#EnableControl" + }, + "read": { + "target": "com.amazonaws.controltower#GetEnabledControl" + }, + "update": { + "target": "com.amazonaws.controltower#UpdateEnabledControl" + }, + "list": { + "target": "com.amazonaws.controltower#ListEnabledControls" + } + }, "com.amazonaws.controltower#EnabledControlSummary": { "type": "structure", "members": { @@ -2163,18 +2432,30 @@ "status": { "target": "com.amazonaws.controltower#EnablementStatus", "traits": { - "smithy.api#documentation": "

The deployment status of the enabled control.

\n

Valid values:

\n
    \n
  • \n

    \n SUCCEEDED: The enabledControl configuration was deployed successfully.

    \n
  • \n
  • \n

    \n UNDER_CHANGE: The enabledControl configuration is changing.

    \n
  • \n
  • \n

    \n FAILED: The enabledControl configuration failed to deploy.

    \n
  • \n
" + "smithy.api#documentation": "

The deployment status of the enabled resource.

\n

Valid values:

\n
    \n
  • \n

    \n SUCCEEDED: The EnabledControl or EnabledBaseline configuration was deployed successfully.

    \n
  • \n
  • \n

    \n UNDER_CHANGE: The EnabledControl or EnabledBaseline configuration is changing.

    \n
  • \n
  • \n

    \n FAILED: The EnabledControl or EnabledBaseline configuration failed to deploy.

    \n
  • \n
" } }, "lastOperationIdentifier": { "target": "com.amazonaws.controltower#OperationIdentifier", "traits": { - "smithy.api#documentation": "

The last operation identifier for the enabled control.

" + "smithy.api#documentation": "

The last operation identifier for the enabled resource.

" } } }, "traits": { - "smithy.api#documentation": "

The deployment summary of the enabled control.

" + "smithy.api#documentation": "

The deployment summary of an EnabledControl or EnabledBaseline resource.

" + } + }, + "com.amazonaws.controltower#EnablementStatuses": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#EnablementStatus" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } } }, "com.amazonaws.controltower#GetBaseline": { @@ -2203,7 +2484,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieve details about an existing Baseline resource by specifying its identifier.

", + "smithy.api#documentation": "

Retrieve details about an existing Baseline resource by specifying its identifier. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -2253,12 +2534,13 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the details of an asynchronous baseline operation, as initiated by any of these APIs: EnableBaseline, DisableBaseline, UpdateEnabledBaseline, ResetEnabledBaseline. A status message is displayed in case of operation failure.

", + "smithy.api#documentation": "

Returns the details of an asynchronous baseline operation, as initiated by any of these APIs: EnableBaseline, DisableBaseline, UpdateEnabledBaseline, ResetEnabledBaseline. A status message is displayed in case of operation failure. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", "uri": "/get-baseline-operation" - } + }, + "smithy.api#readonly": {} } }, "com.amazonaws.controltower#GetBaselineOperationInput": { @@ -2345,7 +2627,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the status of a particular EnableControl or\n DisableControl operation. Displays a message in case of error. Details for an\n operation are available for 90 days. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", + "smithy.api#documentation": "

Returns the status of a particular EnableControl or\n DisableControl operation. Displays a message in case of error. Details for an\n operation are available for 90 days. For usage examples, see the \n Controls Reference Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -2364,6 +2646,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.controltower#GetControlOperationOutput": { @@ -2376,6 +2661,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.controltower#GetEnabledBaseline": { @@ -2468,7 +2756,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves details about an enabled control. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", + "smithy.api#documentation": "

Retrieves details about an enabled control. For usage examples, see the \n Controls Reference Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -2583,7 +2871,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the status of the specified landing zone operation. Details for an operation are available for \n 60 days.

", + "smithy.api#documentation": "

Returns the status of the specified landing zone operation. Details for an operation are available for \n 90 days.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -2667,7 +2955,7 @@ "manifest": { "target": "com.amazonaws.controltower#Manifest", "traits": { - "smithy.api#documentation": "

The landing zone manifest.yaml text file that specifies the landing zone configurations.

", + "smithy.api#documentation": "

The landing zone manifest JSON text file that specifies the landing zone configurations.

", "smithy.api#required": {} } }, @@ -2740,6 +3028,18 @@ "smithy.api#documentation": "

The landing zone operation type.

\n

Valid values:

\n
    \n
  • \n

    \n DELETE: The DeleteLandingZone operation.

    \n
  • \n
  • \n

    \n CREATE: The CreateLandingZone operation.

    \n
  • \n
  • \n

    \n UPDATE: The UpdateLandingZone operation.

    \n
  • \n
  • \n

    \n RESET: The ResetLandingZone operation.

    \n
  • \n
" } }, + "operationIdentifier": { + "target": "com.amazonaws.controltower#OperationIdentifier", + "traits": { + "smithy.api#documentation": "

The operationIdentifier of the landing zone operation.

" + } + }, + "status": { + "target": "com.amazonaws.controltower#LandingZoneOperationStatus", + "traits": { + "smithy.api#documentation": "

Valid values:

\n
    \n
  • \n

    \n SUCCEEDED: The landing zone operation succeeded.

    \n
  • \n
  • \n

    \n IN_PROGRESS: The landing zone operation is in progress.

    \n
  • \n
  • \n

    \n FAILED: The landing zone operation failed.

    \n
  • \n
" + } + }, "startTime": { "target": "com.amazonaws.controltower#Timestamp", "traits": { @@ -2752,12 +3052,6 @@ "smithy.api#documentation": "

The landing zone operation end time.

" } }, - "status": { - "target": "com.amazonaws.controltower#LandingZoneOperationStatus", - "traits": { - "smithy.api#documentation": "

Valid values:

\n
    \n
  • \n

    \n SUCCEEDED: The landing zone operation succeeded.

    \n
  • \n
  • \n

    \n IN_PROGRESS: The landing zone operation is in progress.

    \n
  • \n
  • \n

    \n FAILED: The landing zone operation failed.

    \n
  • \n
" - } - }, "statusMessage": { "target": "smithy.api#String", "traits": { @@ -2769,6 +3063,40 @@ "smithy.api#documentation": "

Information about a landing zone operation.

" } }, + "com.amazonaws.controltower#LandingZoneOperationFilter": { + "type": "structure", + "members": { + "types": { + "target": "com.amazonaws.controltower#LandingZoneOperationTypes", + "traits": { + "smithy.api#documentation": "

The set of landing zone operation types selected by the filter.

" + } + }, + "statuses": { + "target": "com.amazonaws.controltower#LandingZoneOperationStatuses", + "traits": { + "smithy.api#documentation": "

The statuses of the set of landing zone operations selected by the filter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A filter object that lets you call ListLandingZoneOperations with a specific filter.

" + } + }, + "com.amazonaws.controltower#LandingZoneOperationResource": { + "type": "resource", + "identifiers": { + "operationIdentifier": { + "target": "com.amazonaws.controltower#OperationIdentifier" + } + }, + "read": { + "target": "com.amazonaws.controltower#GetLandingZoneOperation" + }, + "list": { + "target": "com.amazonaws.controltower#ListLandingZoneOperations" + } + }, "com.amazonaws.controltower#LandingZoneOperationStatus": { "type": "enum", "members": { @@ -2792,6 +3120,44 @@ } } }, + "com.amazonaws.controltower#LandingZoneOperationStatuses": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#LandingZoneOperationStatus" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.controltower#LandingZoneOperationSummary": { + "type": "structure", + "members": { + "operationType": { + "target": "com.amazonaws.controltower#LandingZoneOperationType", + "traits": { + "smithy.api#documentation": "

The type of the landing zone operation.

" + } + }, + "operationIdentifier": { + "target": "com.amazonaws.controltower#OperationIdentifier", + "traits": { + "smithy.api#documentation": "

The operationIdentifier of the landing zone operation.

" + } + }, + "status": { + "target": "com.amazonaws.controltower#LandingZoneOperationStatus", + "traits": { + "smithy.api#documentation": "

The status of the landing zone operation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Returns a summary of information about a landing zone operation.

" + } + }, "com.amazonaws.controltower#LandingZoneOperationType": { "type": "enum", "members": { @@ -2821,6 +3187,24 @@ } } }, + "com.amazonaws.controltower#LandingZoneOperationTypes": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#LandingZoneOperationType" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.controltower#LandingZoneOperations": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#LandingZoneOperationSummary" + } + }, "com.amazonaws.controltower#LandingZoneResource": { "type": "resource", "identifiers": { @@ -2848,11 +3232,6 @@ "target": "com.amazonaws.controltower#ResetLandingZone" } ], - "collectionOperations": [ - { - "target": "com.amazonaws.controltower#GetLandingZoneOperation" - } - ], "traits": { "aws.cloudformation#cfnResource": { "name": "LandingZone" @@ -2935,7 +3314,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a summary list of all available baselines.

", + "smithy.api#documentation": "

Returns a summary list of all available baselines. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -3000,6 +3379,106 @@ "smithy.api#output": {} } }, + "com.amazonaws.controltower#ListControlOperations": { + "type": "operation", + "input": { + "target": "com.amazonaws.controltower#ListControlOperationsInput" + }, + "output": { + "target": "com.amazonaws.controltower#ListControlOperationsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.controltower#AccessDeniedException" + }, + { + "target": "com.amazonaws.controltower#InternalServerException" + }, + { + "target": "com.amazonaws.controltower#ThrottlingException" + }, + { + "target": "com.amazonaws.controltower#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Provides a list of operations in progress or queued. For usage examples, see ListControlOperation examples.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/list-control-operations" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "controlOperations" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.controltower#ListControlOperationsInput": { + "type": "structure", + "members": { + "filter": { + "target": "com.amazonaws.controltower#ControlOperationFilter", + "traits": { + "smithy.api#documentation": "

An input filter for the ListControlOperations API that lets you select the types of control operations to view.

" + } + }, + "nextToken": { + "target": "com.amazonaws.controltower#ListControlOperationsNextToken", + "traits": { + "smithy.api#documentation": "

A pagination token.

" + } + }, + "maxResults": { + "target": "com.amazonaws.controltower#ListControlOperationsMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be shown.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.controltower#ListControlOperationsMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.controltower#ListControlOperationsNextToken": { + "type": "string", + "traits": { + "smithy.api#pattern": "\\S+" + } + }, + "com.amazonaws.controltower#ListControlOperationsOutput": { + "type": "structure", + "members": { + "controlOperations": { + "target": "com.amazonaws.controltower#ControlOperations", + "traits": { + "smithy.api#documentation": "

Returns a list of output from control operations.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.controltower#ListControlOperationsNextToken", + "traits": { + "smithy.api#documentation": "

A pagination token.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.controltower#ListEnabledBaselines": { "type": "operation", "input": { @@ -3023,7 +3502,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of summaries describing EnabledBaseline resources. You can filter the list by the corresponding Baseline or Target of the EnabledBaseline resources.

", + "smithy.api#documentation": "

Returns a list of summaries describing EnabledBaseline resources. You can filter the list by the corresponding Baseline or Target of the EnabledBaseline resources. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -3126,7 +3605,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the controls enabled by Amazon Web Services Control Tower on the specified organizational unit and\n the accounts it contains. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", + "smithy.api#documentation": "

Lists the controls enabled by Amazon Web Services Control Tower on the specified organizational unit and\n the accounts it contains. For usage examples, see the \n Controls Reference Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -3147,8 +3626,8 @@ "targetIdentifier": { "target": "com.amazonaws.controltower#TargetIdentifier", "traits": { - "smithy.api#documentation": "

The ARN of the organizational unit. For information on how to find the targetIdentifier, see the overview page.

", - "smithy.api#required": {} + "smithy.api#default": null, + "smithy.api#documentation": "

The ARN of the organizational unit. For information on how to find the targetIdentifier, see the overview page.

" } }, "nextToken": { @@ -3162,7 +3641,16 @@ "traits": { "smithy.api#documentation": "

How many results to return per API call.

" } + }, + "filter": { + "target": "com.amazonaws.controltower#EnabledControlFilter", + "traits": { + "smithy.api#documentation": "

An input filter for the ListEnabledControls API that lets you select the types of control operations to view.

" + } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.controltower#ListEnabledControlsOutput": { @@ -3181,6 +3669,103 @@ "smithy.api#documentation": "

Retrieves the next page of results. If the string is empty, the response is the\n end of the results.

" } } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.controltower#ListLandingZoneOperations": { + "type": "operation", + "input": { + "target": "com.amazonaws.controltower#ListLandingZoneOperationsInput" + }, + "output": { + "target": "com.amazonaws.controltower#ListLandingZoneOperationsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.controltower#AccessDeniedException" + }, + { + "target": "com.amazonaws.controltower#InternalServerException" + }, + { + "target": "com.amazonaws.controltower#ThrottlingException" + }, + { + "target": "com.amazonaws.controltower#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all landing zone operations from the past 90 days. Results are sorted by time, with the most recent operation first.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/list-landingzone-operations" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "landingZoneOperations" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.controltower#ListLandingZoneOperationsInput": { + "type": "structure", + "members": { + "filter": { + "target": "com.amazonaws.controltower#LandingZoneOperationFilter", + "traits": { + "smithy.api#documentation": "

An input filter for the ListLandingZoneOperations API that lets you select the types of landing zone operations to view.

" + } + }, + "nextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The token to continue the list from a previous API call with the same parameters.

" + } + }, + "maxResults": { + "target": "com.amazonaws.controltower#ListLandingZoneOperationsMaxResults", + "traits": { + "smithy.api#documentation": "

How many results to return per API call.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.controltower#ListLandingZoneOperationsMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.controltower#ListLandingZoneOperationsOutput": { + "type": "structure", + "members": { + "landingZoneOperations": { + "target": "com.amazonaws.controltower#LandingZoneOperations", + "traits": { + "smithy.api#documentation": "

Lists landing zone operations.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Retrieves the next page of results. If the string is empty, the response is the end of the results.

" + } + } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.controltower#ListLandingZones": { @@ -3295,7 +3880,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of tags associated with the resource. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", + "smithy.api#documentation": "

Returns a list of tags associated with the resource. For usage examples, see the \n Controls Reference Guide\n .

", "smithy.api#http": { "code": 200, "method": "GET", @@ -3412,7 +3997,7 @@ } ], "traits": { - "smithy.api#documentation": "

Re-enables an EnabledBaseline resource. For example, this API can re-apply the existing Baseline after a new member account is moved to the target OU.

", + "smithy.api#documentation": "

Re-enables an EnabledBaseline resource. For example, this API can re-apply the existing Baseline after a new member account is moved to the target OU. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -3479,7 +4064,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API call resets a landing zone. It starts an asynchronous operation that resets the \n landing zone to the parameters specified in its original configuration.

", + "smithy.api#documentation": "

This API call resets a landing zone. It starts an asynchronous operation that resets the\n landing zone to the parameters specified in the original configuration, which you specified\n in the manifest file. Nothing in the manifest file's original landing zone configuration is changed\n during the reset process, by default. This API is not the same as a rollback of a landing\n zone version, which is not a supported operation.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -3605,7 +4190,7 @@ } ], "traits": { - "smithy.api#documentation": "

Applies tags to a resource. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", + "smithy.api#documentation": "

Applies tags to a resource. For usage examples, see the \n Controls Reference Guide\n .

", "smithy.api#http": { "code": 204, "method": "POST", @@ -3652,6 +4237,25 @@ } } }, + "com.amazonaws.controltower#TaggingResource": { + "type": "resource", + "identifiers": { + "tagKey": { + "target": "com.amazonaws.controltower#TagKey" + } + }, + "list": { + "target": "com.amazonaws.controltower#ListTagsForResource" + }, + "collectionOperations": [ + { + "target": "com.amazonaws.controltower#TagResource" + }, + { + "target": "com.amazonaws.controltower#UntagResource" + } + ] + }, "com.amazonaws.controltower#TargetIdentifier": { "type": "string", "traits": { @@ -3662,6 +4266,18 @@ "smithy.api#pattern": "^arn:aws[0-9a-zA-Z_\\-:\\/]+$" } }, + "com.amazonaws.controltower#TargetIdentifiers": { + "type": "list", + "member": { + "target": "com.amazonaws.controltower#TargetIdentifier" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, "com.amazonaws.controltower#TargetRegions": { "type": "list", "member": { @@ -3732,7 +4348,7 @@ } ], "traits": { - "smithy.api#documentation": "

Removes tags from a resource. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", + "smithy.api#documentation": "

Removes tags from a resource. For usage examples, see the \n Controls Reference Guide\n .

", "smithy.api#http": { "code": 204, "method": "DELETE", @@ -3803,7 +4419,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates an EnabledBaseline resource's applied parameters or version.

", + "smithy.api#documentation": "

Updates an EnabledBaseline resource's applied parameters or version. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n .

", "smithy.api#http": { "code": 200, "method": "POST", @@ -3886,7 +4502,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Updates the configuration of an already enabled control.

\n

If the enabled control shows an EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request.

\n

If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services Control Tower will update the control to match any valid parameters that you supply.

\n

If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling DisableControl and again calling EnableControl, or you can run an extending governance operation. For usage examples, see \n the Amazon Web Services Control Tower User Guide\n \n

", + "smithy.api#documentation": "

\n Updates the configuration of an already enabled control.

\n

If the enabled control shows an EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request.

\n

If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services Control Tower updates the control to match any valid parameters that you supply.

\n

If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling DisableControl and again calling EnableControl, or you can run an extending governance operation. For usage examples, see the \n Controls Reference Guide\n .\n

", "smithy.api#http": { "code": 200, "method": "POST", @@ -3981,7 +4597,7 @@ "manifest": { "target": "com.amazonaws.controltower#Manifest", "traits": { - "smithy.api#documentation": "

The manifest.yaml file is a text file that describes your Amazon Web Services resources. For examples, review \n The manifest file.

", + "smithy.api#documentation": "

The manifest file (JSON) is a text file that describes your Amazon Web Services resources. For an example, review \n Launch your landing zone. The example manifest file contains each of the available parameters. The schema for the landing zone's JSON manifest file is not published, by design.

", "smithy.api#required": {} } }, diff --git a/models/cost-and-usage-report-service.json b/models/cost-and-usage-report-service.json index 01cfb188b6..d1418ece5c 100644 --- a/models/cost-and-usage-report-service.json +++ b/models/cost-and-usage-report-service.json @@ -1085,7 +1085,20 @@ "inputToken": "NextToken", "outputToken": "NextToken", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeReportDefinitionsSuccess", + "params": {}, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.costandusagereportservice#DescribeReportDefinitionsRequest": { diff --git a/models/cost-optimization-hub.json b/models/cost-optimization-hub.json index 3194cd4af3..08cdf093d0 100644 --- a/models/cost-optimization-hub.json +++ b/models/cost-optimization-hub.json @@ -936,6 +936,20 @@ "com.amazonaws.costoptimizationhub#Datetime": { "type": "timestamp" }, + "com.amazonaws.costoptimizationhub#DbInstanceConfiguration": { + "type": "structure", + "members": { + "dbInstanceClass": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The DB instance class of the DB instance.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The DB instance configuration used for recommendations.

" + } + }, "com.amazonaws.costoptimizationhub#EbsVolume": { "type": "structure", "members": { @@ -1690,7 +1704,7 @@ "estimatedMonthlyCost": { "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

The estimated monthly cost of the recommendation.

" + "smithy.api#documentation": "

The estimated monthly cost of the current resource. For Reserved Instances and Savings Plans, it refers to the cost for eligible usage.

" } }, "implementationEffort": { @@ -2307,6 +2321,92 @@ "smithy.api#documentation": "

Defines how rows will be sorted in the response.

" } }, + "com.amazonaws.costoptimizationhub#RdsDbInstance": { + "type": "structure", + "members": { + "configuration": { + "target": "com.amazonaws.costoptimizationhub#RdsDbInstanceConfiguration", + "traits": { + "smithy.api#documentation": "

The Amazon RDS DB instance configuration used for recommendations.

" + } + }, + "costCalculation": { + "target": "com.amazonaws.costoptimizationhub#ResourceCostCalculation" + } + }, + "traits": { + "smithy.api#documentation": "

Contains the details of an Amazon RDS DB instance.

", + "smithy.api#tags": [ + "rds" + ] + } + }, + "com.amazonaws.costoptimizationhub#RdsDbInstanceConfiguration": { + "type": "structure", + "members": { + "instance": { + "target": "com.amazonaws.costoptimizationhub#DbInstanceConfiguration", + "traits": { + "smithy.api#documentation": "

Details about the instance configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Amazon RDS DB instance configuration used for recommendations.

" + } + }, + "com.amazonaws.costoptimizationhub#RdsDbInstanceStorage": { + "type": "structure", + "members": { + "configuration": { + "target": "com.amazonaws.costoptimizationhub#RdsDbInstanceStorageConfiguration", + "traits": { + "smithy.api#documentation": "

The Amazon RDS DB instance storage configuration used for recommendations.

" + } + }, + "costCalculation": { + "target": "com.amazonaws.costoptimizationhub#ResourceCostCalculation" + } + }, + "traits": { + "smithy.api#documentation": "

Contains the details of an Amazon RDS DB instance storage.

", + "smithy.api#tags": [ + "rds" + ] + } + }, + "com.amazonaws.costoptimizationhub#RdsDbInstanceStorageConfiguration": { + "type": "structure", + "members": { + "storageType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The storage type to associate with the DB instance.

" + } + }, + "allocatedStorageInGb": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

The new amount of storage in GB to allocate for the DB instance.

" + } + }, + "iops": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

The amount of Provisioned IOPS (input/output operations per second) to be initially\n allocated for the DB instance.

" + } + }, + "storageThroughput": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

The storage throughput for the DB instance.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Amazon RDS DB instance storage configuration used for recommendations.

" + } + }, "com.amazonaws.costoptimizationhub#RdsReservedInstances": { "type": "structure", "members": { @@ -2497,7 +2597,7 @@ "estimatedMonthlyCost": { "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

The estimated monthly cost for the recommendation.

" + "smithy.api#documentation": "

The estimated monthly cost of the current resource. For Reserved Instances and Savings Plans, it refers to the cost for eligible usage.

" } }, "currencyCode": { @@ -2897,6 +2997,24 @@ "traits": { "smithy.api#documentation": "

The SageMaker Savings Plans recommendation details.

" } + }, + "rdsDbInstance": { + "target": "com.amazonaws.costoptimizationhub#RdsDbInstance", + "traits": { + "smithy.api#documentation": "

The DB instance recommendation details.

", + "smithy.api#tags": [ + "rds" + ] + } + }, + "rdsDbInstanceStorage": { + "target": "com.amazonaws.costoptimizationhub#RdsDbInstanceStorage", + "traits": { + "smithy.api#documentation": "

The DB instance storage recommendation details.

", + "smithy.api#tags": [ + "rds" + ] + } } }, "traits": { @@ -3050,6 +3168,24 @@ "traits": { "smithy.api#enumValue": "ElastiCacheReservedInstances" } + }, + "RDS_DB_INSTANCE_STORAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RdsDbInstanceStorage", + "smithy.api#tags": [ + "rds" + ] + } + }, + "RDS_DB_INSTANCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RdsDbInstance", + "smithy.api#tags": [ + "rds" + ] + } } } }, diff --git a/models/customer-profiles.json b/models/customer-profiles.json index cb48f8b807..4776891979 100644 --- a/models/customer-profiles.json +++ b/models/customer-profiles.json @@ -89,7 +89,7 @@ "KeyName": { "target": "com.amazonaws.customerprofiles#name", "traits": { - "smithy.api#documentation": "

A searchable identifier of a customer profile. The predefined keys you can use\n include: _account, _profileId, _assetId, _caseId, _orderId, _fullName, _phone,\n _email, _ctrContactId, _marketoLeadId, _salesforceAccountId, _salesforceContactId,\n _salesforceAssetId, _zendeskUserId, _zendeskExternalId, _zendeskTicketId,\n _serviceNowSystemId, _serviceNowIncidentId, _segmentUserId, _shopifyCustomerId,\n _shopifyOrderId.

", + "smithy.api#documentation": "

A searchable identifier of a customer profile. The predefined keys you can use include: _account, _profileId, _assetId,\n _caseId, _orderId, _fullName, _phone, _email, _ctrContactId, _marketoLeadId,\n _salesforceAccountId, _salesforceContactId, _salesforceAssetId, _zendeskUserId,\n _zendeskExternalId, _zendeskTicketId, _serviceNowSystemId, _serviceNowIncidentId,\n _segmentUserId, _shopifyCustomerId, _shopifyOrderId.

", "smithy.api#required": {} } }, @@ -152,7 +152,7 @@ } }, "traits": { - "smithy.api#documentation": "

A data type pair that consists of a KeyName and Values list that is\n used in conjunction with the\n KeyName\n and\n Values\n parameters to search for profiles using the SearchProfiles API.

" + "smithy.api#documentation": "

A data type pair that consists of a KeyName and Values list\n that is used in conjunction with the KeyName and Values parameters to search for profiles using the SearchProfiles\n API.

" } }, "com.amazonaws.customerprofiles#Address": { @@ -262,7 +262,7 @@ "SourceConnectorType": { "target": "com.amazonaws.customerprofiles#SourceConnectorType", "traits": { - "smithy.api#documentation": "

Specifies the source connector type, such as Salesforce, ServiceNow, and Marketo. Indicates source of ingestion.

", + "smithy.api#documentation": "

Specifies the source connector type, such as Salesforce, ServiceNow, and Marketo.\n Indicates source of ingestion.

", "smithy.api#required": {} } }, @@ -281,7 +281,7 @@ } }, "traits": { - "smithy.api#documentation": "

Structure holding all APPFLOW_INTEGRATION specific workflow attributes.

" + "smithy.api#documentation": "

Structure holding all APPFLOW_INTEGRATION specific workflow\n attributes.

" } }, "com.amazonaws.customerprofiles#AppflowIntegrationWorkflowMetrics": { @@ -313,7 +313,7 @@ } }, "traits": { - "smithy.api#documentation": "

Workflow specific execution metrics for APPFLOW_INTEGRATION workflow.

" + "smithy.api#documentation": "

Workflow specific execution metrics for APPFLOW_INTEGRATION\n workflow.

" } }, "com.amazonaws.customerprofiles#AppflowIntegrationWorkflowStep": { @@ -322,7 +322,7 @@ "FlowName": { "target": "com.amazonaws.customerprofiles#FlowName", "traits": { - "smithy.api#documentation": "

Name of the flow created during execution of workflow step. APPFLOW_INTEGRATION workflow type creates an appflow flow during workflow step execution on the customers behalf.

", + "smithy.api#documentation": "

Name of the flow created during execution of workflow step.\n APPFLOW_INTEGRATION workflow type creates an appflow flow during workflow\n step execution on the customers behalf.

", "smithy.api#required": {} } }, @@ -336,7 +336,7 @@ "ExecutionMessage": { "target": "com.amazonaws.customerprofiles#string1To255", "traits": { - "smithy.api#documentation": "

Message indicating execution of workflow step for APPFLOW_INTEGRATION workflow.

", + "smithy.api#documentation": "

Message indicating execution of workflow step for APPFLOW_INTEGRATION\n workflow.

", "smithy.api#required": {} } }, @@ -344,35 +344,35 @@ "target": "com.amazonaws.customerprofiles#long", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

Total number of records processed during execution of workflow step for APPFLOW_INTEGRATION workflow.

", + "smithy.api#documentation": "

Total number of records processed during execution of workflow step for\n APPFLOW_INTEGRATION workflow.

", "smithy.api#required": {} } }, "BatchRecordsStartTime": { "target": "com.amazonaws.customerprofiles#string1To255", "traits": { - "smithy.api#documentation": "

Start datetime of records pulled in batch during execution of workflow step for APPFLOW_INTEGRATION workflow.

", + "smithy.api#documentation": "

Start datetime of records pulled in batch during execution of workflow step for\n APPFLOW_INTEGRATION workflow.

", "smithy.api#required": {} } }, "BatchRecordsEndTime": { "target": "com.amazonaws.customerprofiles#string1To255", "traits": { - "smithy.api#documentation": "

End datetime of records pulled in batch during execution of workflow step for APPFLOW_INTEGRATION workflow.

", + "smithy.api#documentation": "

End datetime of records pulled in batch during execution of workflow step for\n APPFLOW_INTEGRATION workflow.

", "smithy.api#required": {} } }, "CreatedAt": { "target": "com.amazonaws.customerprofiles#timestamp", "traits": { - "smithy.api#documentation": "

Creation timestamp of workflow step for APPFLOW_INTEGRATION workflow.

", + "smithy.api#documentation": "

Creation timestamp of workflow step for APPFLOW_INTEGRATION\n workflow.

", "smithy.api#required": {} } }, "LastUpdatedAt": { "target": "com.amazonaws.customerprofiles#timestamp", "traits": { - "smithy.api#documentation": "

Last updated timestamp for workflow step for APPFLOW_INTEGRATION workflow.

", + "smithy.api#documentation": "

Last updated timestamp for workflow step for APPFLOW_INTEGRATION\n workflow.

", "smithy.api#required": {} } } @@ -394,13 +394,13 @@ "Expression": { "target": "com.amazonaws.customerprofiles#string1To255", "traits": { - "smithy.api#documentation": "

Mathematical expression that is performed on attribute items provided in the attribute list. Each element in the expression should follow the structure of \\\"{ObjectTypeName.AttributeName}\\\".

", + "smithy.api#documentation": "

Mathematical expression that is performed on attribute items provided in the attribute\n list. Each element in the expression should follow the structure of\n \\\"{ObjectTypeName.AttributeName}\\\".

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Mathematical expression and a list of attribute items specified in that expression.

", + "smithy.api#documentation": "

Mathematical expression and a list of attribute items specified in that\n expression.

", "smithy.api#sensitive": {} } }, @@ -463,7 +463,7 @@ "AttributeMatchingModel": { "target": "com.amazonaws.customerprofiles#AttributeMatchingModel", "traits": { - "smithy.api#documentation": "

Configures the AttributeMatchingModel, you can either choose ONE_TO_ONE or \n MANY_TO_MANY.

", + "smithy.api#documentation": "

Configures the AttributeMatchingModel, you can either choose\n ONE_TO_ONE or MANY_TO_MANY.

", "smithy.api#required": {} } }, @@ -527,7 +527,7 @@ "MinAllowedConfidenceScoreForMerging": { "target": "com.amazonaws.customerprofiles#Double0To1", "traits": { - "smithy.api#documentation": "

A number between 0 and 1 that represents the minimum confidence score required for\n profiles within a matching group to be merged during the auto-merge process. A higher\n score means higher similarity required to merge profiles.

" + "smithy.api#documentation": "

A number between 0 and 1 that represents the minimum confidence score required for\n profiles within a matching group to be merged during the auto-merge process. A higher score\n means higher similarity required to merge profiles.

" } } }, @@ -567,7 +567,7 @@ } }, "traits": { - "smithy.api#documentation": "

Batch defines the boundaries for ingestion for each step in APPFLOW_INTEGRATION workflow. APPFLOW_INTEGRATION workflow splits ingestion based on these boundaries.

" + "smithy.api#documentation": "

Batch defines the boundaries for ingestion for each step in\n APPFLOW_INTEGRATION workflow. APPFLOW_INTEGRATION workflow\n splits ingestion based on these boundaries.

" } }, "com.amazonaws.customerprofiles#Batches": { @@ -634,7 +634,7 @@ } }, "traits": { - "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated attribute.

", + "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated\n attribute.

", "smithy.api#sensitive": {} } }, @@ -765,7 +765,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new calculated attribute definition. After creation, new object data ingested \n into Customer Profiles will be included in the calculated attribute, which can be retrieved \n for a profile using the GetCalculatedAttributeForProfile API. \n Defining a calculated attribute makes it available for all profiles within a domain. Each \n calculated attribute can only reference one ObjectType and at most, two fields \n from that ObjectType.

", + "smithy.api#documentation": "

Creates a new calculated attribute definition. After creation, new object data ingested\n into Customer Profiles will be included in the calculated attribute, which can be retrieved\n for a profile using the GetCalculatedAttributeForProfile API. Defining a calculated attribute makes it\n available for all profiles within a domain. Each calculated attribute can only reference\n one ObjectType and at most, two fields from that\n ObjectType.

", "smithy.api#http": { "method": "POST", "uri": "/domains/{DomainName}/calculated-attributes/{CalculatedAttributeName}", @@ -807,14 +807,14 @@ "AttributeDetails": { "target": "com.amazonaws.customerprofiles#AttributeDetails", "traits": { - "smithy.api#documentation": "

Mathematical expression and a list of attribute items specified in that expression.

", + "smithy.api#documentation": "

Mathematical expression and a list of attribute items specified in that\n expression.

", "smithy.api#required": {} } }, "Conditions": { "target": "com.amazonaws.customerprofiles#Conditions", "traits": { - "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated attribute.

" + "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated\n attribute.

" } }, "Statistic": { @@ -859,13 +859,13 @@ "AttributeDetails": { "target": "com.amazonaws.customerprofiles#AttributeDetails", "traits": { - "smithy.api#documentation": "

Mathematical expression and a list of attribute items specified in that expression.

" + "smithy.api#documentation": "

Mathematical expression and a list of attribute items specified in that\n expression.

" } }, "Conditions": { "target": "com.amazonaws.customerprofiles#Conditions", "traits": { - "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated attribute.

" + "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated\n attribute.

" } }, "Statistic": { @@ -883,7 +883,7 @@ "LastUpdatedAt": { "target": "com.amazonaws.customerprofiles#timestamp", "traits": { - "smithy.api#documentation": "

The timestamp of when the calculated attribute definition was most recently edited.

" + "smithy.api#documentation": "

The timestamp of when the calculated attribute definition was most recently\n edited.

" } }, "Tags": { @@ -923,7 +923,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a domain, which is a container for all customer data, such as customer profile\n attributes, object types, profile keys, and encryption keys. You can create multiple\n domains, and each domain can have multiple third-party integrations.

\n

Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can\n be associated with one domain.

\n

Use this API or UpdateDomain to\n enable identity\n resolution: set Matching to true.

\n

To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should\n apply.

", + "smithy.api#documentation": "

Creates a domain, which is a container for all customer data, such as customer profile\n attributes, object types, profile keys, and encryption keys. You can create multiple\n domains, and each domain can have multiple third-party integrations.

\n

Each Amazon Connect instance can be associated with only one domain. Multiple\n Amazon Connect instances can be associated with one domain.

\n

Use this API or UpdateDomain to\n enable identity\n resolution: set Matching to true.

\n

To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should\n apply.

\n \n

It is not possible to associate a Customer Profiles domain with an Amazon Connect Instance directly from\n the API. If you would like to create a domain and associate a Customer Profiles domain, use the Amazon Connect\n admin website. For more information, see Enable Customer Profiles.

\n

Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances\n can be associated with one domain.

\n
", "smithy.api#http": { "method": "POST", "uri": "/domains/{DomainName}", @@ -970,7 +970,7 @@ "RuleBasedMatching": { "target": "com.amazonaws.customerprofiles#RuleBasedMatchingRequest", "traits": { - "smithy.api#documentation": "

The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, \n Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration \n in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and \n review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can \n download the results from S3.

" + "smithy.api#documentation": "

The process of matching duplicate profiles using the Rule-Based matching. If\n RuleBasedMatching = true, Amazon Connect Customer Profiles will start\n to match and merge your profiles according to your configuration in the\n RuleBasedMatchingRequest. You can use the ListRuleBasedMatches\n and GetSimilarProfiles API to return and review the results. Also, if you have\n configured ExportingConfig in the RuleBasedMatchingRequest, you\n can download the results from S3.

" } }, "Tags": { @@ -1022,7 +1022,7 @@ "RuleBasedMatching": { "target": "com.amazonaws.customerprofiles#RuleBasedMatchingResponse", "traits": { - "smithy.api#documentation": "

The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, \n Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration \n in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and \n review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can \n download the results from S3.

" + "smithy.api#documentation": "

The process of matching duplicate profiles using the Rule-Based matching. If\n RuleBasedMatching = true, Amazon Connect Customer Profiles will start\n to match and merge your profiles according to your configuration in the\n RuleBasedMatchingRequest. You can use the ListRuleBasedMatches\n and GetSimilarProfiles API to return and review the results. Also, if you have\n configured ExportingConfig in the RuleBasedMatchingRequest, you\n can download the results from S3.

" } }, "CreatedAt": { @@ -1076,7 +1076,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an event stream, which is a subscription to real-time events, such as when profiles are created and \n updated through Amazon Connect Customer Profiles.

\n

Each event stream can be associated with only one Kinesis Data Stream destination in the same region and \n Amazon Web Services account as the customer profiles domain

", + "smithy.api#documentation": "

Creates an event stream, which is a subscription to real-time events, such as when\n profiles are created and updated through Amazon Connect Customer Profiles.

\n

Each event stream can be associated with only one Kinesis Data Stream destination in the\n same region and Amazon Web Services account as the customer profiles domain

", "smithy.api#http": { "method": "POST", "uri": "/domains/{DomainName}/event-streams/{EventStreamName}", @@ -1098,7 +1098,7 @@ "Uri": { "target": "com.amazonaws.customerprofiles#string1To255", "traits": { - "smithy.api#documentation": "

The StreamARN of the destination to deliver profile events to. For example, \n arn:aws:kinesis:region:account-id:stream/stream-name

", + "smithy.api#documentation": "

The StreamARN of the destination to deliver profile events to. For example,\n arn:aws:kinesis:region:account-id:stream/stream-name

", "smithy.api#required": {} } }, @@ -1168,7 +1168,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n\tCreates an integration workflow. An integration workflow is an async process which ingests historic data and sets up an integration for ongoing updates. The supported Amazon AppFlow sources are Salesforce, ServiceNow, and Marketo.\n\t

", + "smithy.api#documentation": "

Creates an integration workflow. An integration workflow is an async process which\n ingests historic data and sets up an integration for ongoing updates. The supported Amazon AppFlow sources are Salesforce, ServiceNow, and Marketo.

", "smithy.api#http": { "method": "POST", "uri": "/domains/{DomainName}/workflows/integrations", @@ -1296,7 +1296,7 @@ "AccountNumber": { "target": "com.amazonaws.customerprofiles#sensitiveString1To255", "traits": { - "smithy.api#documentation": "

A unique account number that you have given to the customer.

" + "smithy.api#documentation": "

An account number that you have given to the customer.

" } }, "AdditionalInformation": { @@ -1617,14 +1617,14 @@ "sdkId": "Customer Profiles", "arnNamespace": "profile", "cloudFormationName": "CustomerProfiles", - "cloudTrailEventSource": "customerprofiles.amazonaws.com", + "cloudTrailEventSource": "profile.amazonaws.com", "endpointPrefix": "profile" }, "aws.auth#sigv4": { "name": "profile" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Amazon Connect Customer Profiles\n

Amazon Connect Customer Profiles is a unified customer profile for your contact center that has\n pre-built connectors powered by AppFlow that make it easy to combine customer information\n from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your\n enterprise resource planning (ERP), with contact history from your Amazon Connect contact center.\n If you're new to Amazon Connect, you might find it helpful to review the Amazon Connect Administrator Guide.

", + "smithy.api#documentation": "Amazon Connect Customer Profiles\n

Amazon Connect Customer Profiles is a unified customer profile for your contact\n center that has pre-built connectors powered by AppFlow that make it easy to combine\n customer information from third party applications, such as Salesforce (CRM), ServiceNow\n (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center.

\n

For more information about the Amazon Connect Customer Profiles feature, see Use Customer\n Profiles in the Amazon Connect Administrator's Guide.

", "smithy.api#title": "Amazon Connect Customer Profiles", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -2429,7 +2429,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an existing calculated attribute definition. Note that deleting a default calculated attribute \n is possible, however once deleted, you will be unable to undo that action and will need to recreate it on \n your own using the CreateCalculatedAttributeDefinition API if you want it back.

", + "smithy.api#documentation": "

Deletes an existing calculated attribute definition. Note that deleting a default\n calculated attribute is possible, however once deleted, you will be unable to undo that\n action and will need to recreate it on your own using the\n CreateCalculatedAttributeDefinition API if you want it back.

", "smithy.api#http": { "method": "DELETE", "uri": "/domains/{DomainName}/calculated-attributes/{CalculatedAttributeName}", @@ -3011,7 +3011,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified workflow and all its corresponding resources. This is an async process.

", + "smithy.api#documentation": "

Deletes the specified workflow and all its corresponding resources. This is an async\n process.

", "smithy.api#http": { "method": "DELETE", "uri": "/domains/{DomainName}/workflows/{WorkflowId}", @@ -3066,7 +3066,7 @@ "Uri": { "target": "com.amazonaws.customerprofiles#string1To255", "traits": { - "smithy.api#documentation": "

The StreamARN of the destination to deliver profile events to. For example, \n arn:aws:kinesis:region:account-id:stream/stream-name.

", + "smithy.api#documentation": "

The StreamARN of the destination to deliver profile events to. For example,\n arn:aws:kinesis:region:account-id:stream/stream-name.

", "smithy.api#required": {} } }, @@ -3151,7 +3151,7 @@ "DetectedProfileObjectTypes": { "target": "com.amazonaws.customerprofiles#DetectedProfileObjectTypes", "traits": { - "smithy.api#documentation": "

Detected ProfileObjectType mappings from given objects. A maximum of one mapping is supported.

" + "smithy.api#documentation": "

Detected ProfileObjectType mappings from given objects. A maximum of one\n mapping is supported.

" } } }, @@ -3165,7 +3165,7 @@ "SourceLastUpdatedTimestampFormat": { "target": "com.amazonaws.customerprofiles#string1To255", "traits": { - "smithy.api#documentation": "

The format of sourceLastUpdatedTimestamp that was detected in fields.

" + "smithy.api#documentation": "

The format of sourceLastUpdatedTimestamp that was detected in\n fields.

" } }, "Fields": { @@ -3263,7 +3263,7 @@ "Uri": { "target": "com.amazonaws.customerprofiles#string1To255", "traits": { - "smithy.api#documentation": "

The StreamARN of the destination to deliver profile events to. For example, \n arn:aws:kinesis:region:account-id:stream/stream-name.

", + "smithy.api#documentation": "

The StreamARN of the destination to deliver profile events to. For example,\n arn:aws:kinesis:region:account-id:stream/stream-name.

", "smithy.api#required": {} } }, @@ -3283,7 +3283,7 @@ "Message": { "target": "com.amazonaws.customerprofiles#string1To1000", "traits": { - "smithy.api#documentation": "

The human-readable string that corresponds to the error or success while enabling the streaming destination.

" + "smithy.api#documentation": "

The human-readable string that corresponds to the error or success while enabling the\n streaming destination.

" } } }, @@ -3687,7 +3687,7 @@ } }, "traits": { - "smithy.api#documentation": "

A data type pair that consists of a KeyName and Values list that were used\n to find a profile returned in response to a SearchProfiles request.

" + "smithy.api#documentation": "

A data type pair that consists of a KeyName and Values list\n that were used to find a profile returned in response to a SearchProfiles request.\n

" } }, "com.amazonaws.customerprofiles#Gender": { @@ -3779,7 +3779,7 @@ "MinAllowedConfidenceScoreForMerging": { "target": "com.amazonaws.customerprofiles#Double0To1", "traits": { - "smithy.api#documentation": "

Minimum confidence score required for profiles within a matching group to be merged during the auto-merge process.

" + "smithy.api#documentation": "

Minimum confidence score required for profiles within a matching group to be merged\n during the auto-merge process.

" } } }, @@ -3849,7 +3849,7 @@ } ], "traits": { - "smithy.api#documentation": "

Provides more information on a calculated attribute definition for Customer Profiles.

", + "smithy.api#documentation": "

Provides more information on a calculated attribute definition for Customer\n Profiles.

", "smithy.api#http": { "method": "GET", "uri": "/domains/{DomainName}/calculated-attributes/{CalculatedAttributeName}", @@ -3911,7 +3911,7 @@ "LastUpdatedAt": { "target": "com.amazonaws.customerprofiles#timestamp", "traits": { - "smithy.api#documentation": "

The timestamp of when the calculated attribute definition was most recently edited.

" + "smithy.api#documentation": "

The timestamp of when the calculated attribute definition was most recently\n edited.

" } }, "Statistic": { @@ -3923,13 +3923,13 @@ "Conditions": { "target": "com.amazonaws.customerprofiles#Conditions", "traits": { - "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated attribute.

" + "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated\n attribute.

" } }, "AttributeDetails": { "target": "com.amazonaws.customerprofiles#AttributeDetails", "traits": { - "smithy.api#documentation": "

Mathematical expression and a list of attribute items specified in that expression.

" + "smithy.api#documentation": "

Mathematical expression and a list of attribute items specified in that\n expression.

" } }, "Tags": { @@ -4027,7 +4027,7 @@ "IsDataPartial": { "target": "com.amazonaws.customerprofiles#string1To255", "traits": { - "smithy.api#documentation": "

Indicates whether the calculated attribute’s value is based on partial data. If data is partial, it is set to true.

" + "smithy.api#documentation": "

Indicates whether the calculated attribute’s value is based on partial data. If data is\n partial, it is set to true.

" } }, "Value": { @@ -4134,7 +4134,7 @@ "RuleBasedMatching": { "target": "com.amazonaws.customerprofiles#RuleBasedMatchingResponse", "traits": { - "smithy.api#documentation": "

The process of matching duplicate profiles using the Rule-Based matching. If RuleBasedMatching = true, \n Amazon Connect Customer Profiles will start to match and merge your profiles according to your configuration \n in the RuleBasedMatchingRequest. You can use the ListRuleBasedMatches and GetSimilarProfiles API to return and \n review the results. Also, if you have configured ExportingConfig in the RuleBasedMatchingRequest, you can \n download the results from S3.

" + "smithy.api#documentation": "

The process of matching duplicate profiles using the Rule-Based matching. If\n RuleBasedMatching = true, Amazon Connect Customer Profiles will start\n to match and merge your profiles according to your configuration in the\n RuleBasedMatchingRequest. You can use the ListRuleBasedMatches\n and GetSimilarProfiles API to return and review the results. Also, if you have\n configured ExportingConfig in the RuleBasedMatchingRequest, you\n can download the results from S3.

" } }, "CreatedAt": { @@ -4522,7 +4522,7 @@ "IsUnstructured": { "target": "com.amazonaws.customerprofiles#optionalBoolean", "traits": { - "smithy.api#documentation": "

Boolean that shows if the Flow that's associated with the Integration is created in Amazon \n Appflow, or with ObjectTypeName equals _unstructured via API/CLI in flowDefinition.

" + "smithy.api#documentation": "

Boolean that shows if the Flow that's associated with the Integration is created in\n Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in\n flowDefinition.

" } } }, @@ -4732,6 +4732,18 @@ "smithy.api#documentation": "

The format of your sourceLastUpdatedTimestamp that was previously set\n up.

" } }, + "MaxAvailableProfileObjectCount": { + "target": "com.amazonaws.customerprofiles#minSize0", + "traits": { + "smithy.api#documentation": "

The amount of provisioned profile object max count available.

" + } + }, + "MaxProfileObjectCount": { + "target": "com.amazonaws.customerprofiles#minSize1", + "traits": { + "smithy.api#documentation": "

The amount of profile object max count assigned to the object type.

" + } + }, "Fields": { "target": "com.amazonaws.customerprofiles#FieldMap", "traits": { @@ -4894,7 +4906,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a set of profiles that belong to the same matching group using the matchId or \n profileId. You can also specify the type of matching that you want for finding similar profiles using \n either RULE_BASED_MATCHING or ML_BASED_MATCHING.

", + "smithy.api#documentation": "

Returns a set of profiles that belong to the same matching group using the\n matchId or profileId. You can also specify the type of\n matching that you want for finding similar profiles using either\n RULE_BASED_MATCHING or ML_BASED_MATCHING.

", "smithy.api#http": { "method": "POST", "uri": "/domains/{DomainName}/matches", @@ -5621,7 +5633,7 @@ "LastUpdatedAt": { "target": "com.amazonaws.customerprofiles#timestamp", "traits": { - "smithy.api#documentation": "

The timestamp of when the calculated attribute definition was most recently edited.

" + "smithy.api#documentation": "

The timestamp of when the calculated attribute definition was most recently\n edited.

" } }, "Tags": { @@ -5683,7 +5695,7 @@ "NextToken": { "target": "com.amazonaws.customerprofiles#token", "traits": { - "smithy.api#documentation": "

The pagination token from the previous call to ListCalculatedAttributeDefinitions.

", + "smithy.api#documentation": "

The pagination token from the previous call to\n ListCalculatedAttributeDefinitions.

", "smithy.api#httpQuery": "next-token" } }, @@ -5711,7 +5723,7 @@ "NextToken": { "target": "com.amazonaws.customerprofiles#token", "traits": { - "smithy.api#documentation": "

The pagination token from the previous call to ListCalculatedAttributeDefinitions.

" + "smithy.api#documentation": "

The pagination token from the previous call to\n ListCalculatedAttributeDefinitions.

" } } }, @@ -5737,7 +5749,7 @@ "IsDataPartial": { "target": "com.amazonaws.customerprofiles#string1To255", "traits": { - "smithy.api#documentation": "

Indicates whether the calculated attribute’s value is based on partial data. If data is partial, it is set to true.

" + "smithy.api#documentation": "

Indicates whether the calculated attribute’s value is based on partial data. If data is\n partial, it is set to true.

" } }, "Value": { @@ -5791,7 +5803,7 @@ "NextToken": { "target": "com.amazonaws.customerprofiles#token", "traits": { - "smithy.api#documentation": "

The pagination token from the previous call to ListCalculatedAttributesForProfile.

", + "smithy.api#documentation": "

The pagination token from the previous call to\n ListCalculatedAttributesForProfile.

", "smithy.api#httpQuery": "next-token" } }, @@ -5835,7 +5847,7 @@ "NextToken": { "target": "com.amazonaws.customerprofiles#token", "traits": { - "smithy.api#documentation": "

The pagination token from the previous call to ListCalculatedAttributesForProfile.

" + "smithy.api#documentation": "

The pagination token from the previous call to\n ListCalculatedAttributesForProfile.

" } } }, @@ -6186,7 +6198,7 @@ "IsUnstructured": { "target": "com.amazonaws.customerprofiles#optionalBoolean", "traits": { - "smithy.api#documentation": "

Boolean that shows if the Flow that's associated with the Integration is created in Amazon \n Appflow, or with ObjectTypeName equals _unstructured via API/CLI in flowDefinition.

" + "smithy.api#documentation": "

Boolean that shows if the Flow that's associated with the Integration is created in\n Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in\n flowDefinition.

" } } }, @@ -6314,6 +6326,18 @@ "smithy.api#documentation": "

The timestamp of when the domain was most recently edited.

" } }, + "MaxProfileObjectCount": { + "target": "com.amazonaws.customerprofiles#minSize1", + "traits": { + "smithy.api#documentation": "

The amount of profile object max count assigned to the object type.

" + } + }, + "MaxAvailableProfileObjectCount": { + "target": "com.amazonaws.customerprofiles#minSize0", + "traits": { + "smithy.api#documentation": "

The amount of provisioned profile object max count available.

" + } + }, "Tags": { "target": "com.amazonaws.customerprofiles#TagMap", "traits": { @@ -7081,7 +7105,7 @@ "ConfidenceScore": { "target": "com.amazonaws.customerprofiles#Double", "traits": { - "smithy.api#documentation": "

A number between 0 and 1, where a higher score means higher similarity.\n Examining match confidence scores lets you distinguish between groups of similar records in which\n the system is highly confident (which you may decide to merge), groups of similar records about\n which the system is uncertain (which you may decide to have reviewed by a human),\n and groups of similar records that the system deems to be unlikely (which you may decide to reject).\n Given confidence scores vary as per the data input, it should not be used an absolute\n measure of matching quality.

" + "smithy.api#documentation": "

A number between 0 and 1, where a higher score means higher similarity. Examining match\n confidence scores lets you distinguish between groups of similar records in which the\n system is highly confident (which you may decide to merge), groups of similar records about\n which the system is uncertain (which you may decide to have reviewed by a human), and\n groups of similar records that the system deems to be unlikely (which you may decide to\n reject). Given confidence scores vary as per the data input, it should not be used an\n absolute measure of matching quality.

" } } }, @@ -7213,7 +7237,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies how does the rule-based matching process should match profiles. You can choose\n from the following attributes to build the matching Rule:

\n
    \n
  • \n

    AccountNumber

    \n
  • \n
  • \n

    Address.Address

    \n
  • \n
  • \n

    Address.City

    \n
  • \n
  • \n

    Address.Country

    \n
  • \n
  • \n

    Address.County

    \n
  • \n
  • \n

    Address.PostalCode

    \n
  • \n
  • \n

    Address.State

    \n
  • \n
  • \n

    Address.Province

    \n
  • \n
  • \n

    BirthDate

    \n
  • \n
  • \n

    BusinessName

    \n
  • \n
  • \n

    EmailAddress

    \n
  • \n
  • \n

    FirstName

    \n
  • \n
  • \n

    Gender

    \n
  • \n
  • \n

    LastName

    \n
  • \n
  • \n

    MiddleName

    \n
  • \n
  • \n

    PhoneNumber

    \n
  • \n
  • \n

    Any customized profile attributes that start with the Attributes\n

    \n
  • \n
" + "smithy.api#documentation": "

Specifies how does the rule-based matching process should match profiles. You can choose\n from the following attributes to build the matching Rule:

\n
    \n
  • \n

    AccountNumber

    \n
  • \n
  • \n

    Address.Address

    \n
  • \n
  • \n

    Address.City

    \n
  • \n
  • \n

    Address.Country

    \n
  • \n
  • \n

    Address.County

    \n
  • \n
  • \n

    Address.PostalCode

    \n
  • \n
  • \n

    Address.State

    \n
  • \n
  • \n

    Address.Province

    \n
  • \n
  • \n

    BirthDate

    \n
  • \n
  • \n

    BusinessName

    \n
  • \n
  • \n

    EmailAddress

    \n
  • \n
  • \n

    FirstName

    \n
  • \n
  • \n

    Gender

    \n
  • \n
  • \n

    LastName

    \n
  • \n
  • \n

    MiddleName

    \n
  • \n
  • \n

    PhoneNumber

    \n
  • \n
  • \n

    Any customized profile attributes that start with the\n Attributes\n

    \n
  • \n
" } }, "com.amazonaws.customerprofiles#MatchingRuleAttributeList": { @@ -7364,7 +7388,7 @@ "KeyName": { "target": "com.amazonaws.customerprofiles#name", "traits": { - "smithy.api#documentation": "

A searchable identifier of a profile object. The predefined keys you can use to\n search for _asset include: _assetId, _assetName, \n and _serialNumber. The predefined keys you can use to search for _case \n include: _caseId. The predefined keys you can use to search for\n _order include: _orderId.

", + "smithy.api#documentation": "

A searchable identifier of a profile object. The predefined keys you can use to search\n for _asset include: _assetId, _assetName, and\n _serialNumber. The predefined keys you can use to search for\n _case include: _caseId. The predefined keys you can use to\n search for _order include: _orderId.

", "smithy.api#required": {} } }, @@ -7377,7 +7401,7 @@ } }, "traits": { - "smithy.api#documentation": "

The filter applied to ListProfileObjects response to include profile objects with the\n specified index values.

" + "smithy.api#documentation": "

The filter applied to ListProfileObjects response to include profile\n objects with the specified index values.

" } }, "com.amazonaws.customerprofiles#ObjectTypeField": { @@ -7412,7 +7436,7 @@ "StandardIdentifiers": { "target": "com.amazonaws.customerprofiles#StandardIdentifierList", "traits": { - "smithy.api#documentation": "

The types of keys that a ProfileObject can have. Each ProfileObject can have only 1\n UNIQUE key but multiple PROFILE keys. PROFILE, ASSET, CASE, or ORDER means that this key can be\n used to tie an object to a PROFILE, ASSET, CASE, or ORDER respectively. UNIQUE means that it can be\n used to uniquely identify an object. If a key a is marked as SECONDARY, it will be used to\n search for profiles after all other PROFILE keys have been searched. A LOOKUP_ONLY key is\n only used to match a profile but is not persisted to be used for searching of the profile.\n A NEW_ONLY key is only used if the profile does not already exist before the object is\n ingested, otherwise it is only used for matching objects to profiles.

" + "smithy.api#documentation": "

The types of keys that a ProfileObject can have. Each ProfileObject can have only 1\n UNIQUE key but multiple PROFILE keys. PROFILE, ASSET, CASE, or ORDER means that this key\n can be used to tie an object to a PROFILE, ASSET, CASE, or ORDER respectively. UNIQUE means\n that it can be used to uniquely identify an object. If a key a is marked as SECONDARY, it\n will be used to search for profiles after all other PROFILE keys have been searched. A\n LOOKUP_ONLY key is only used to match a profile but is not persisted to be used for\n searching of the profile. A NEW_ONLY key is only used if the profile does not already exist\n before the object is ingested, otherwise it is only used for matching objects to\n profiles.

" } }, "FieldNames": { @@ -7623,7 +7647,7 @@ "AccountNumber": { "target": "com.amazonaws.customerprofiles#sensitiveString1To255", "traits": { - "smithy.api#documentation": "

A unique account number that you have given to the customer.

" + "smithy.api#documentation": "

An account number that you have given to the customer.

" } }, "AdditionalInformation": { @@ -7749,7 +7773,7 @@ "FoundByItems": { "target": "com.amazonaws.customerprofiles#foundByList", "traits": { - "smithy.api#documentation": "

A list of items used to find a profile returned in a SearchProfiles response.\n An item is a key-value(s) pair that matches an attribute in the profile.

\n

If the optional AdditionalSearchKeys parameter was included in the\n SearchProfiles request, the FoundByItems list should be\n interpreted based on the LogicalOperator used in the request:

\n
    \n
  • \n

    \n AND - The profile included in the response matched all of the search keys\n specified in the request. The FoundByItems will include all of the key-value(s)\n pairs that were specified in the request (as this is a requirement of AND search logic).

    \n
  • \n
  • \n

    \n OR - The profile included in the response matched at least one of the\n search keys specified in the request. The FoundByItems will\n include each of the key-value(s) pairs that the profile was found by.

    \n
  • \n
\n

The OR relationship is the default behavior if the LogicalOperator parameter is\n not included in the SearchProfiles request.

" + "smithy.api#documentation": "

A list of items used to find a profile returned in a SearchProfiles response.\n An item is a key-value(s) pair that matches an attribute in the profile.

\n

If the optional AdditionalSearchKeys parameter was included in the\n SearchProfiles request, the FoundByItems list should be interpreted\n based on the LogicalOperator used in the request:

\n
    \n
  • \n

    \n AND - The profile included in the response matched all of the search\n keys specified in the request. The FoundByItems will include all of the\n key-value(s) pairs that were specified in the request (as this is a requirement of\n AND search logic).

    \n
  • \n
  • \n

    \n OR - The profile included in the response matched at least one of the\n search keys specified in the request. The FoundByItems will include each\n of the key-value(s) pairs that the profile was found by.

    \n
  • \n
\n

The OR relationship is the default behavior if the\n LogicalOperator parameter is not included in the SearchProfiles\n request.

" } }, "PartyTypeString": { @@ -7850,7 +7874,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds an integration between the service and a third-party service, which includes\n Amazon AppFlow and Amazon Connect.

\n

An integration can belong to only one domain.

\n

To add or remove tags on an existing Integration, see TagResource\n /\n UntagResource.

", + "smithy.api#documentation": "

Adds an integration between the service and a third-party service, which includes\n Amazon AppFlow and Amazon Connect.

\n

An integration can belong to only one domain.

\n

To add or remove tags on an existing Integration, see TagResource\n /\n UntagResource.

", "smithy.api#http": { "method": "PUT", "uri": "/domains/{DomainName}/integrations", @@ -7962,7 +7986,7 @@ "IsUnstructured": { "target": "com.amazonaws.customerprofiles#optionalBoolean", "traits": { - "smithy.api#documentation": "

Boolean that shows if the Flow that's associated with the Integration is created in Amazon \n Appflow, or with ObjectTypeName equals _unstructured via API/CLI in flowDefinition.

" + "smithy.api#documentation": "

Boolean that shows if the Flow that's associated with the Integration is created in\n Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in\n flowDefinition.

" } } }, @@ -8074,7 +8098,7 @@ } ], "traits": { - "smithy.api#documentation": "

Defines a ProfileObjectType.

\n

To add or remove tags on an existing ObjectType, see \n TagResource/UntagResource.

", + "smithy.api#documentation": "

Defines a ProfileObjectType.

\n

To add or remove tags on an existing ObjectType, see \n TagResource/UntagResource.

", "smithy.api#http": { "method": "PUT", "uri": "/domains/{DomainName}/object-types/{ObjectTypeName}", @@ -8111,7 +8135,7 @@ "TemplateId": { "target": "com.amazonaws.customerprofiles#name", "traits": { - "smithy.api#documentation": "

A unique identifier for the object template. For some attributes in the request, the\n service will use the default value from the object template when TemplateId is present. If\n these attributes are present in the request, the service may return a BadRequestException. \n These attributes include: AllowProfileCreation, SourceLastUpdatedTimestampFormat, \n Fields, and Keys. For example, if AllowProfileCreation is set to true when TemplateId is set, the service\n may return a BadRequestException.

" + "smithy.api#documentation": "

A unique identifier for the object template. For some attributes in the request, the\n service will use the default value from the object template when TemplateId is present. If\n these attributes are present in the request, the service may return a\n BadRequestException. These attributes include: AllowProfileCreation,\n SourceLastUpdatedTimestampFormat, Fields, and Keys. For example, if AllowProfileCreation is\n set to true when TemplateId is set, the service may return a\n BadRequestException.

" } }, "ExpirationDays": { @@ -8139,6 +8163,12 @@ "smithy.api#documentation": "

The format of your sourceLastUpdatedTimestamp that was previously set up.\n

" } }, + "MaxProfileObjectCount": { + "target": "com.amazonaws.customerprofiles#minSize1", + "traits": { + "smithy.api#documentation": "

The amount of profile object max count assigned to the object type

" + } + }, "Fields": { "target": "com.amazonaws.customerprofiles#FieldMap", "traits": { @@ -8210,6 +8240,18 @@ "smithy.api#documentation": "

The format of your sourceLastUpdatedTimestamp that was previously set up in\n fields that were parsed using SimpleDateFormat. If you have sourceLastUpdatedTimestamp in your\n field, you must set up sourceLastUpdatedTimestampFormat.

" } }, + "MaxProfileObjectCount": { + "target": "com.amazonaws.customerprofiles#minSize1", + "traits": { + "smithy.api#documentation": "

The amount of profile object max count assigned to the object type.

" + } + }, + "MaxAvailableProfileObjectCount": { + "target": "com.amazonaws.customerprofiles#minSize0", + "traits": { + "smithy.api#documentation": "

The amount of provisioned profile object max count available.

" + } + }, "Fields": { "target": "com.amazonaws.customerprofiles#FieldMap", "traits": { @@ -8303,7 +8345,7 @@ "MatchingRules": { "target": "com.amazonaws.customerprofiles#MatchingRules", "traits": { - "smithy.api#documentation": "

Configures how the rule-based matching process should match profiles. You can have up to 15 \n MatchingRule in the MatchingRules.

" + "smithy.api#documentation": "

Configures how the rule-based matching process should match profiles. You can have up to\n 15 MatchingRule in the MatchingRules.

" } }, "MaxAllowedRuleLevelForMerging": { @@ -8347,7 +8389,7 @@ "MatchingRules": { "target": "com.amazonaws.customerprofiles#MatchingRules", "traits": { - "smithy.api#documentation": "

Configures how the rule-based matching process should match profiles. You can have up to 15 \n MatchingRule in the MatchingRules.

" + "smithy.api#documentation": "

Configures how the rule-based matching process should match profiles. You can have up to\n 15 MatchingRule in the MatchingRules.

" } }, "Status": { @@ -8862,7 +8904,7 @@ } ], "traits": { - "smithy.api#documentation": "

Searches for profiles within a specific domain using one or more predefined search keys\n (e.g., _fullName, _phone, _email, _account, etc.) and/or custom-defined search keys. A search key\n is a data type pair that consists of a KeyName and Values list.

\n

This operation supports searching for profiles with a minimum of 1 key-value(s) pair and up to\n 5 key-value(s) pairs using either AND or OR logic.

", + "smithy.api#documentation": "

Searches for profiles within a specific domain using one or more predefined search keys\n (e.g., _fullName, _phone, _email, _account, etc.) and/or custom-defined search keys. A\n search key is a data type pair that consists of a KeyName and\n Values list.

\n

This operation supports searching for profiles with a minimum of 1 key-value(s) pair and\n up to 5 key-value(s) pairs using either AND or OR logic.

", "smithy.api#http": { "method": "POST", "uri": "/domains/{DomainName}/profiles/search", @@ -8898,7 +8940,7 @@ "KeyName": { "target": "com.amazonaws.customerprofiles#name", "traits": { - "smithy.api#documentation": "

A searchable identifier of a customer profile. The predefined keys you can use\n to search include: _account, _profileId, _assetId, _caseId, _orderId, _fullName, _phone,\n _email, _ctrContactId, _marketoLeadId, _salesforceAccountId, _salesforceContactId,\n _salesforceAssetId, _zendeskUserId, _zendeskExternalId, _zendeskTicketId,\n _serviceNowSystemId, _serviceNowIncidentId, _segmentUserId, _shopifyCustomerId,\n _shopifyOrderId.

", + "smithy.api#documentation": "

A searchable identifier of a customer profile. The predefined keys you can use to search include: _account, _profileId,\n _assetId, _caseId, _orderId, _fullName, _phone, _email, _ctrContactId, _marketoLeadId,\n _salesforceAccountId, _salesforceContactId, _salesforceAssetId, _zendeskUserId,\n _zendeskExternalId, _zendeskTicketId, _serviceNowSystemId, _serviceNowIncidentId,\n _segmentUserId, _shopifyCustomerId, _shopifyOrderId.

", "smithy.api#required": {} } }, @@ -8912,13 +8954,13 @@ "AdditionalSearchKeys": { "target": "com.amazonaws.customerprofiles#additionalSearchKeysList", "traits": { - "smithy.api#documentation": "

A list of AdditionalSearchKey objects that are each searchable identifiers of a\n profile. Each AdditionalSearchKey object contains a KeyName and a\n list of Values associated with that specific key (i.e., a key-value(s) pair).\n These additional search keys will be used in conjunction with the LogicalOperator and the\n required KeyName and Values parameters to search for profiles\n that satisfy the search criteria.

" + "smithy.api#documentation": "

A list of AdditionalSearchKey objects that are each searchable identifiers\n of a profile. Each AdditionalSearchKey object contains a KeyName\n and a list of Values associated with that specific key (i.e., a key-value(s)\n pair). These additional search keys will be used in conjunction with the\n LogicalOperator and the required KeyName and\n Values parameters to search for profiles that satisfy the search criteria.\n

" } }, "LogicalOperator": { "target": "com.amazonaws.customerprofiles#logicalOperator", "traits": { - "smithy.api#documentation": "

Relationship between all specified search keys that will be used to search for\n profiles. This includes the required KeyName and Values parameters\n as well as any key-value(s) pairs specified in the AdditionalSearchKeys list.

\n

This parameter influences which profiles will be returned in the response in the following manner:

\n
    \n
  • \n

    \n AND - The response only includes profiles that match all of the search keys.

    \n
  • \n
  • \n

    \n OR - The response includes profiles that match at least one of the search keys.

    \n
  • \n
\n

The OR relationship is the default behavior if this parameter is not included in the request.

" + "smithy.api#documentation": "

Relationship between all specified search keys that will be used to search for profiles.\n This includes the required KeyName and Values parameters as well\n as any key-value(s) pairs specified in the AdditionalSearchKeys list.

\n

This parameter influences which profiles will be returned in the response in the\n following manner:

\n
    \n
  • \n

    \n AND - The response only includes profiles that match all of the\n search keys.

    \n
  • \n
  • \n

    \n OR - The response includes profiles that match at least one of the\n search keys.

    \n
  • \n
\n

The OR relationship is the default behavior if this parameter is not\n included in the request.

" } } }, @@ -9863,7 +9905,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates an existing calculated attribute definition. When updating the Conditions, note that increasing \n the date range of a calculated attribute will not trigger inclusion of historical data greater than the \n current date range.

", + "smithy.api#documentation": "

Updates an existing calculated attribute definition. When updating the Conditions, note\n that increasing the date range of a calculated attribute will not trigger inclusion of\n historical data greater than the current date range.

", "smithy.api#http": { "method": "PUT", "uri": "/domains/{DomainName}/calculated-attributes/{CalculatedAttributeName}", @@ -9905,7 +9947,7 @@ "Conditions": { "target": "com.amazonaws.customerprofiles#Conditions", "traits": { - "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated attribute.

" + "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated\n attribute.

" } } }, @@ -9943,7 +9985,7 @@ "LastUpdatedAt": { "target": "com.amazonaws.customerprofiles#timestamp", "traits": { - "smithy.api#documentation": "

The timestamp of when the calculated attribute definition was most recently edited.

" + "smithy.api#documentation": "

The timestamp of when the calculated attribute definition was most recently\n edited.

" } }, "Statistic": { @@ -9955,13 +9997,13 @@ "Conditions": { "target": "com.amazonaws.customerprofiles#Conditions", "traits": { - "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated attribute.

" + "smithy.api#documentation": "

The conditions including range, object count, and threshold for the calculated\n attribute.

" } }, "AttributeDetails": { "target": "com.amazonaws.customerprofiles#AttributeDetails", "traits": { - "smithy.api#documentation": "

The mathematical expression and a list of attribute items specified in that expression.

" + "smithy.api#documentation": "

The mathematical expression and a list of attribute items specified in that\n expression.

" } }, "Tags": { @@ -10187,7 +10229,7 @@ "AccountNumber": { "target": "com.amazonaws.customerprofiles#sensitiveString0To255", "traits": { - "smithy.api#documentation": "

A unique account number that you have given to the customer.

" + "smithy.api#documentation": "

An account number that you have given to the customer.

" } }, "PartyType": { @@ -10625,6 +10667,22 @@ "com.amazonaws.customerprofiles#message": { "type": "string" }, + "com.amazonaws.customerprofiles#minSize0": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0 + } + } + }, + "com.amazonaws.customerprofiles#minSize1": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, "com.amazonaws.customerprofiles#name": { "type": "string", "traits": { diff --git a/models/datasync.json b/models/datasync.json index 63dc4e3127..19d7ae436a 100644 --- a/models/datasync.json +++ b/models/datasync.json @@ -8533,6 +8533,12 @@ "smithy.api#enumValue": "QUEUED" } }, + "CANCELLING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CANCELLING" + } + }, "LAUNCHING": { "target": "smithy.api#Unit", "traits": { diff --git a/models/datazone.json b/models/datazone.json index 091153192e..2645914872 100644 --- a/models/datazone.json +++ b/models/datazone.json @@ -376,6 +376,20 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.datazone#ActionParameters": { + "type": "union", + "members": { + "awsConsoleLink": { + "target": "com.amazonaws.datazone#AwsConsoleLinkParameters", + "traits": { + "smithy.api#documentation": "

The console link specified as part of the environment action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The parameters of the environment action.

" + } + }, "com.amazonaws.datazone#ApplicableAssetTypes": { "type": "list", "member": { @@ -1034,6 +1048,82 @@ "smithy.api#documentation": "

The details of the asset type.

" } }, + "com.amazonaws.datazone#AssociateEnvironmentRole": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#AssociateEnvironmentRoleInput" + }, + "output": { + "target": "com.amazonaws.datazone#AssociateEnvironmentRoleOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Associates the environment role in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/roles/{environmentRoleArn}" + } + } + }, + "com.amazonaws.datazone#AssociateEnvironmentRoleInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the environment role is associated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone environment.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentRoleArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the environment role.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#AssociateEnvironmentRoleOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#Attribute": { "type": "string", "traits": { @@ -1084,6 +1174,20 @@ "smithy.api#pattern": "^\\d{12}$" } }, + "com.amazonaws.datazone#AwsConsoleLinkParameters": { + "type": "structure", + "members": { + "uri": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The URI of the console link specified as part of the environment action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The parameters of the console link specified as part of the environment action.

" + } + }, "com.amazonaws.datazone#AwsRegion": { "type": "string", "traits": { @@ -2575,6 +2679,136 @@ } } }, + "com.amazonaws.datazone#CreateEnvironmentAction": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#CreateEnvironmentActionInput" + }, + "output": { + "target": "com.amazonaws.datazone#CreateEnvironmentActionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an action for the environment, for example, creates a console link for an\n analytics tool that is available in this environment.

", + "smithy.api#http": { + "code": 201, + "method": "POST", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions" + } + } + }, + "com.amazonaws.datazone#CreateEnvironmentActionInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the environment action is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment in which the environment action is created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the environment action.

", + "smithy.api#required": {} + } + }, + "parameters": { + "target": "com.amazonaws.datazone#ActionParameters", + "traits": { + "smithy.api#documentation": "

The parameters of the environment action.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the environment action that is being created in the\n environment.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#CreateEnvironmentActionOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the domain in which the environment action is created.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment in which the environment is created.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#EnvironmentActionId", + "traits": { + "smithy.api#documentation": "

The ID of the environment action.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the environment action.

", + "smithy.api#required": {} + } + }, + "parameters": { + "target": "com.amazonaws.datazone#ActionParameters", + "traits": { + "smithy.api#documentation": "

The parameters of the environment action.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the environment action.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#CreateEnvironmentInput": { "type": "structure", "members": { @@ -2624,6 +2858,24 @@ "traits": { "smithy.api#documentation": "

The glossary terms that can be used in this Amazon DataZone environment.

" } + }, + "environmentAccountIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the account in which the environment is being created.

" + } + }, + "environmentAccountRegion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The region of the account in which the environment is being created.

" + } + }, + "environmentBlueprintIdentifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the blueprint with which the environment is being created.

" + } } }, "traits": { @@ -2690,8 +2942,9 @@ "environmentProfileId": { "target": "com.amazonaws.datazone#EnvironmentProfileId", "traits": { - "smithy.api#documentation": "

The ID of the environment profile with which this Amazon DataZone environment was\n created.

", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "

The ID of the environment profile with which this Amazon DataZone environment was\n created.

" } }, "awsAccountId": { @@ -4861,6 +5114,9 @@ }, "updatedAt": { "target": "com.amazonaws.datazone#DateTime" + }, + "retainPermissionsOnRevokeFailure": { + "target": "smithy.api#Boolean" } }, "create": { @@ -5444,12 +5700,18 @@ { "target": "com.amazonaws.datazone#AcceptSubscriptionRequest" }, + { + "target": "com.amazonaws.datazone#AssociateEnvironmentRole" + }, { "target": "com.amazonaws.datazone#CancelSubscription" }, { "target": "com.amazonaws.datazone#CreateEnvironment" }, + { + "target": "com.amazonaws.datazone#CreateEnvironmentAction" + }, { "target": "com.amazonaws.datazone#CreateEnvironmentProfile" }, @@ -5480,6 +5742,9 @@ { "target": "com.amazonaws.datazone#DeleteEnvironment" }, + { + "target": "com.amazonaws.datazone#DeleteEnvironmentAction" + }, { "target": "com.amazonaws.datazone#DeleteEnvironmentProfile" }, @@ -5501,9 +5766,15 @@ { "target": "com.amazonaws.datazone#DeleteTimeSeriesDataPoints" }, + { + "target": "com.amazonaws.datazone#DisassociateEnvironmentRole" + }, { "target": "com.amazonaws.datazone#GetEnvironment" }, + { + "target": "com.amazonaws.datazone#GetEnvironmentAction" + }, { "target": "com.amazonaws.datazone#GetEnvironmentBlueprint" }, @@ -5543,6 +5814,9 @@ { "target": "com.amazonaws.datazone#ListDataSourceRunActivities" }, + { + "target": "com.amazonaws.datazone#ListEnvironmentActions" + }, { "target": "com.amazonaws.datazone#ListEnvironmentBlueprints" }, @@ -5615,6 +5889,9 @@ { "target": "com.amazonaws.datazone#UpdateEnvironment" }, + { + "target": "com.amazonaws.datazone#UpdateEnvironmentAction" + }, { "target": "com.amazonaws.datazone#UpdateEnvironmentProfile" }, @@ -6313,6 +6590,13 @@ "smithy.api#httpQuery": "clientToken", "smithy.api#idempotencyToken": {} } + }, + "retainPermissionsOnRevokeFailure": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies that the granted permissions are retained in case of a self-subscribe\n functionality failure for a data source.

", + "smithy.api#httpQuery": "retainPermissionsOnRevokeFailure" + } } }, "traits": { @@ -6441,6 +6725,19 @@ "traits": { "smithy.api#documentation": "

The timestamp of when this data source was updated.

" } + }, + "selfGrantStatus": { + "target": "com.amazonaws.datazone#SelfGrantStatusOutput", + "traits": { + "smithy.api#documentation": "

Specifies the status of the self-granting functionality.

", + "smithy.api#notProperty": {} + } + }, + "retainPermissionsOnRevokeFailure": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies that the granted permissions are retained in case of a self-subscribe\n functionality failure for a data source.

" + } } }, "traits": { @@ -6570,36 +6867,106 @@ "smithy.api#idempotent": {} } }, - "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfiguration": { + "com.amazonaws.datazone#DeleteEnvironmentAction": { "type": "operation", "input": { - "target": "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfigurationInput" + "target": "com.amazonaws.datazone#DeleteEnvironmentActionInput" }, "output": { - "target": "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfigurationOutput" + "target": "smithy.api#Unit" }, "errors": [ { "target": "com.amazonaws.datazone#AccessDeniedException" }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, { "target": "com.amazonaws.datazone#InternalServerException" }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, { "target": "com.amazonaws.datazone#ValidationException" } ], "traits": { - "smithy.api#documentation": "

Deletes the blueprint configuration in Amazon DataZone.

", + "smithy.api#documentation": "

Deletes an action for the environment, for example, deletes a console link for an\n analytics tool that is available in this environment.

", "smithy.api#http": { "code": 204, "method": "DELETE", - "uri": "/v2/domains/{domainIdentifier}/environment-blueprint-configurations/{environmentBlueprintIdentifier}" + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions/{identifier}" }, - "smithy.api#idempotent": {}, - "smithy.api#tags": [ - "Administration" - ] + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.datazone#DeleteEnvironmentActionInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which an environment action is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment where an environment action is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the environment action that is deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfigurationInput" + }, + "output": { + "target": "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfigurationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the blueprint configuration in Amazon DataZone.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/environment-blueprint-configurations/{environmentBlueprintIdentifier}" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "Administration" + ] } }, "com.amazonaws.datazone#DeleteEnvironmentBlueprintConfigurationInput": { @@ -7672,6 +8039,82 @@ "target": "com.amazonaws.datazone#DetailedGlossaryTerm" } }, + "com.amazonaws.datazone#DisassociateEnvironmentRole": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#DisassociateEnvironmentRoleInput" + }, + "output": { + "target": "com.amazonaws.datazone#DisassociateEnvironmentRoleOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Disassociates the environment role in Amazon DataZone.

", + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/roles/{environmentRoleArn}" + } + } + }, + "com.amazonaws.datazone#DisassociateEnvironmentRoleInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which an environment role is disassociated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The ID of the environment.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentRoleArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ARN of the environment role.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#DisassociateEnvironmentRoleOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#Domain": { "type": "resource", "identifiers": { @@ -7927,12 +8370,67 @@ } } }, + "com.amazonaws.datazone#EnvironmentActionId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + } + }, "com.amazonaws.datazone#EnvironmentActionList": { "type": "list", "member": { "target": "com.amazonaws.datazone#ConfigurableEnvironmentAction" } }, + "com.amazonaws.datazone#EnvironmentActionSummary": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The Amazon DataZone domain ID of the environment action.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The environment ID of the environment action.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#EnvironmentActionId", + "traits": { + "smithy.api#documentation": "

The ID of the environment action.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the environment action.

", + "smithy.api#required": {} + } + }, + "parameters": { + "target": "com.amazonaws.datazone#ActionParameters", + "traits": { + "smithy.api#documentation": "

The parameters of the environment action.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The environment action description.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details about the specified action configured for an environment. For example, the\n details of the specified console links for an analytics tool that is available in this\n environment.

" + } + }, "com.amazonaws.datazone#EnvironmentBlueprintConfiguration": { "type": "resource", "identifiers": { @@ -8193,7 +8691,7 @@ "com.amazonaws.datazone#EnvironmentProfileId": { "type": "string", "traits": { - "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,36}$" + "smithy.api#pattern": "^[a-zA-Z0-9_-]{0,36}$" } }, "com.amazonaws.datazone#EnvironmentProfileName": { @@ -8443,8 +8941,9 @@ "environmentProfileId": { "target": "com.amazonaws.datazone#EnvironmentProfileId", "traits": { - "smithy.api#documentation": "

The identifier of the environment profile with which the environment was created.

", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "

The identifier of the environment profile with which the environment was created.

" } }, "awsAccountId": { @@ -9538,6 +10037,13 @@ "traits": { "smithy.api#documentation": "

The timestamp of when the data source was updated.

" } + }, + "selfGrantStatus": { + "target": "com.amazonaws.datazone#SelfGrantStatusOutput", + "traits": { + "smithy.api#documentation": "

Specifies the status of the self-granting functionality.

", + "smithy.api#notProperty": {} + } } }, "traits": { @@ -9878,6 +10384,122 @@ "smithy.api#readonly": {} } }, + "com.amazonaws.datazone#GetEnvironmentAction": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#GetEnvironmentActionInput" + }, + "output": { + "target": "com.amazonaws.datazone#GetEnvironmentActionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets the specified environment action.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions/{identifier}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentActionInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the GetEnvironmentAction API is\n invoked.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The environment ID of the environment action.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the environment action

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#GetEnvironmentActionOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the environment action lives.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The environment ID of the environment action.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#EnvironmentActionId", + "traits": { + "smithy.api#documentation": "

The ID of the environment action.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the environment action.

", + "smithy.api#required": {} + } + }, + "parameters": { + "target": "com.amazonaws.datazone#ActionParameters", + "traits": { + "smithy.api#documentation": "

The parameters of the environment action.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the environment action.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#GetEnvironmentBlueprint": { "type": "operation", "input": { @@ -10214,8 +10836,9 @@ "environmentProfileId": { "target": "com.amazonaws.datazone#EnvironmentProfileId", "traits": { - "smithy.api#documentation": "

The ID of the environment profile with which the environment is created.

", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "

The ID of the environment profile with which the environment is created.

" } }, "awsAccountId": { @@ -12711,6 +13334,21 @@ "smithy.api#documentation": "

The configuration details of the Amazon Web Services Glue data source.

" } }, + "com.amazonaws.datazone#GlueSelfGrantStatusOutput": { + "type": "structure", + "members": { + "selfGrantStatusDetails": { + "target": "com.amazonaws.datazone#SelfGrantStatusDetails", + "traits": { + "smithy.api#documentation": "

The details for the self granting status for a Glue data source.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the self granting status.

" + } + }, "com.amazonaws.datazone#GrantedEntity": { "type": "union", "members": { @@ -13474,39 +14112,141 @@ "smithy.api#httpQuery": "status" } }, - "maxResults": { - "target": "com.amazonaws.datazone#MaxResultsForListDomains", + "maxResults": { + "target": "com.amazonaws.datazone#MaxResultsForListDomains", + "traits": { + "smithy.api#documentation": "

The maximum number of domains to return in a single call to ListDomains.\n When the number of domains to be listed is greater than the value of\n MaxResults, the response contains a NextToken value that you\n can use in a subsequent call to ListDomains to list the next set of\n domains.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of domains is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of domains, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to ListDomains to list the next set of\n domains.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#ListDomainsOutput": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.datazone#DomainSummaries", + "traits": { + "smithy.api#documentation": "

The results of the ListDomains action.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.datazone#PaginationToken", + "traits": { + "smithy.api#documentation": "

When the number of domains is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of domains, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to ListDomains to list the next set of\n domains.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentActionSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#EnvironmentActionSummary" + } + }, + "com.amazonaws.datazone#ListEnvironmentActions": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#ListEnvironmentActionsInput" + }, + "output": { + "target": "com.amazonaws.datazone#ListEnvironmentActionsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists existing environment actions.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "items" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.datazone#ListEnvironmentActionsInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon DataZone domain in which the environment actions are listed.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", "traits": { - "smithy.api#documentation": "

The maximum number of domains to return in a single call to ListDomains.\n When the number of domains to be listed is greater than the value of\n MaxResults, the response contains a NextToken value that you\n can use in a subsequent call to ListDomains to list the next set of\n domains.

", - "smithy.api#httpQuery": "maxResults" + "smithy.api#documentation": "

The ID of the envrironment whose environment actions are listed.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, "nextToken": { "target": "com.amazonaws.datazone#PaginationToken", "traits": { - "smithy.api#documentation": "

When the number of domains is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of domains, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to ListDomains to list the next set of\n domains.

", + "smithy.api#documentation": "

When the number of environment actions is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of environment actions, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListEnvironmentActions\n to list the next set of environment actions.

", "smithy.api#httpQuery": "nextToken" } + }, + "maxResults": { + "target": "com.amazonaws.datazone#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of environment actions to return in a single call to\n ListEnvironmentActions. When the number of environment actions to be listed\n is greater than the value of MaxResults, the response contains a\n NextToken value that you can use in a subsequent call to\n ListEnvironmentActions to list the next set of environment actions.

", + "smithy.api#httpQuery": "maxResults" + } } }, "traits": { "smithy.api#input": {} } }, - "com.amazonaws.datazone#ListDomainsOutput": { + "com.amazonaws.datazone#ListEnvironmentActionsOutput": { "type": "structure", "members": { "items": { - "target": "com.amazonaws.datazone#DomainSummaries", + "target": "com.amazonaws.datazone#ListEnvironmentActionSummaries", "traits": { - "smithy.api#documentation": "

The results of the ListDomains action.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The results of ListEnvironmentActions.

" } }, "nextToken": { "target": "com.amazonaws.datazone#PaginationToken", "traits": { - "smithy.api#documentation": "

When the number of domains is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of domains, the response includes a\n pagination token named NextToken. You can specify this NextToken\n value in a subsequent call to ListDomains to list the next set of\n domains.

" + "smithy.api#documentation": "

When the number of environment actions is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of environment actions, the\n response includes a pagination token named NextToken. You can specify this\n NextToken value in a subsequent call to ListEnvironmentActions\n to list the next set of environment actions.

" } } }, @@ -16409,6 +17149,21 @@ "smithy.api#documentation": "

The configuration details of the Amazon Redshift data source.

" } }, + "com.amazonaws.datazone#RedshiftSelfGrantStatusOutput": { + "type": "structure", + "members": { + "selfGrantStatusDetails": { + "target": "com.amazonaws.datazone#SelfGrantStatusDetails", + "traits": { + "smithy.api#documentation": "

The details for the self granting status for an Amazon Redshift data source.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details for the self granting status for an Amazon Redshift data source.

" + } + }, "com.amazonaws.datazone#RedshiftServerlessStorage": { "type": "structure", "members": { @@ -17908,6 +18663,121 @@ "smithy.api#output": {} } }, + "com.amazonaws.datazone#SelfGrantStatus": { + "type": "enum", + "members": { + "GRANT_PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GRANT_PENDING" + } + }, + "REVOKE_PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REVOKE_PENDING" + } + }, + "GRANT_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GRANT_IN_PROGRESS" + } + }, + "REVOKE_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REVOKE_IN_PROGRESS" + } + }, + "GRANTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GRANTED" + } + }, + "GRANT_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GRANT_FAILED" + } + }, + "REVOKE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REVOKE_FAILED" + } + } + } + }, + "com.amazonaws.datazone#SelfGrantStatusDetail": { + "type": "structure", + "members": { + "databaseName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the database used for the data source.

", + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#required": {} + } + }, + "schemaName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the schema used in the data source.

", + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "status": { + "target": "com.amazonaws.datazone#SelfGrantStatus", + "traits": { + "smithy.api#documentation": "

The self granting status of the data source.

", + "smithy.api#required": {} + } + }, + "failureCause": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The reason for why the operation failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details for the self granting status.

" + } + }, + "com.amazonaws.datazone#SelfGrantStatusDetails": { + "type": "list", + "member": { + "target": "com.amazonaws.datazone#SelfGrantStatusDetail" + } + }, + "com.amazonaws.datazone#SelfGrantStatusOutput": { + "type": "union", + "members": { + "glueSelfGrantStatus": { + "target": "com.amazonaws.datazone#GlueSelfGrantStatusOutput", + "traits": { + "smithy.api#documentation": "

The details for the self granting status for a Glue data source.

" + } + }, + "redshiftSelfGrantStatus": { + "target": "com.amazonaws.datazone#RedshiftSelfGrantStatusOutput", + "traits": { + "smithy.api#documentation": "

The details for the self granting status for an Amazon Redshift data source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details for the self granting status for a data source.

" + } + }, "com.amazonaws.datazone#ServiceQuotaExceededException": { "type": "structure", "members": { @@ -17958,7 +18828,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 10000 + "max": 100000 } } }, @@ -20205,6 +21075,12 @@ "traits": { "smithy.api#documentation": "

The recommendation to be updated as part of the UpdateDataSource\n action.

" } + }, + "retainPermissionsOnRevokeFailure": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies that the granted permissions are retained in case of a self-subscribe\n functionality failure for a data source.

" + } } }, "traits": { @@ -20339,6 +21215,19 @@ "traits": { "smithy.api#documentation": "

The timestamp of when the data source was updated.

" } + }, + "selfGrantStatus": { + "target": "com.amazonaws.datazone#SelfGrantStatusOutput", + "traits": { + "smithy.api#documentation": "

Specifies the status of the self-granting functionality.

", + "smithy.api#notProperty": {} + } + }, + "retainPermissionsOnRevokeFailure": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specifies that the granted permissions are retained in case of a self-subscribe\n functionality failure for a data source.

" + } } }, "traits": { @@ -20519,6 +21408,142 @@ } } }, + "com.amazonaws.datazone#UpdateEnvironmentAction": { + "type": "operation", + "input": { + "target": "com.amazonaws.datazone#UpdateEnvironmentActionInput" + }, + "output": { + "target": "com.amazonaws.datazone#UpdateEnvironmentActionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.datazone#AccessDeniedException" + }, + { + "target": "com.amazonaws.datazone#ConflictException" + }, + { + "target": "com.amazonaws.datazone#InternalServerException" + }, + { + "target": "com.amazonaws.datazone#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.datazone#ThrottlingException" + }, + { + "target": "com.amazonaws.datazone#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates an environment action.

", + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/v2/domains/{domainIdentifier}/environments/{environmentIdentifier}/actions/{identifier}" + } + } + }, + "com.amazonaws.datazone#UpdateEnvironmentActionInput": { + "type": "structure", + "members": { + "domainIdentifier": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The domain ID of the environment action.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "environmentIdentifier": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The environment ID of the environment action.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identifier": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the environment action.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "parameters": { + "target": "com.amazonaws.datazone#ActionParameters", + "traits": { + "smithy.api#documentation": "

The parameters of the environment action.

" + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the environment action.

" + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the environment action.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.datazone#UpdateEnvironmentActionOutput": { + "type": "structure", + "members": { + "domainId": { + "target": "com.amazonaws.datazone#DomainId", + "traits": { + "smithy.api#documentation": "

The domain ID of the environment action.

", + "smithy.api#required": {} + } + }, + "environmentId": { + "target": "com.amazonaws.datazone#EnvironmentId", + "traits": { + "smithy.api#documentation": "

The environment ID of the environment action.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.datazone#EnvironmentActionId", + "traits": { + "smithy.api#documentation": "

The ID of the environment action.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the environment action.

", + "smithy.api#required": {} + } + }, + "parameters": { + "target": "com.amazonaws.datazone#ActionParameters", + "traits": { + "smithy.api#documentation": "

The parameters of the environment action.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the environment action.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.datazone#UpdateEnvironmentInput": { "type": "structure", "members": { @@ -20621,8 +21646,9 @@ "environmentProfileId": { "target": "com.amazonaws.datazone#EnvironmentProfileId", "traits": { - "smithy.api#documentation": "

The profile identifier of the environment.

", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "", + "smithy.api#documentation": "

The profile identifier of the environment.

" } }, "awsAccountId": { diff --git a/models/direct-connect.json b/models/direct-connect.json index 897ec29330..5839503f07 100644 --- a/models/direct-connect.json +++ b/models/direct-connect.json @@ -2512,7 +2512,33 @@ } ], "traits": { - "smithy.api#documentation": "

Displays the specified connection or all connections in this Region.

" + "smithy.api#documentation": "

Displays the specified connection or all connections in this Region.

", + "smithy.test#smokeTests": [ + { + "id": "DescribeConnectionsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + }, + { + "id": "DescribeConnectionsFailure", + "params": { + "connectionId": "fake-connection" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.directconnect#DescribeConnectionsOnInterconnect": { @@ -5261,7 +5287,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -5304,7 +5329,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -5317,7 +5343,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -5331,7 +5356,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -5354,7 +5378,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -5389,7 +5412,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -5400,14 +5422,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -5421,14 +5445,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -5437,11 +5459,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -5452,14 +5474,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -5473,7 +5497,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -5493,7 +5516,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -5504,14 +5526,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -5522,9 +5546,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/models/directory-service.json b/models/directory-service.json index b7934404ac..f7c9f940be 100644 --- a/models/directory-service.json +++ b/models/directory-service.json @@ -2378,7 +2378,20 @@ "outputToken": "NextToken", "items": "DirectoryDescriptions", "pageSize": "Limit" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeDirectoriesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.directoryservice#DescribeDirectoriesRequest": { @@ -3965,7 +3978,7 @@ "sdkId": "Directory Service", "arnNamespace": "ds", "cloudFormationName": "DirectoryService", - "cloudTrailEventSource": "directoryservice.amazonaws.com", + "cloudTrailEventSource": "ds.amazonaws.com", "docId": "ds-2015-04-16", "endpointPrefix": "ds" }, diff --git a/models/dynamodb.json b/models/dynamodb.json index 014ffe20b6..0d8e4d1e42 100644 --- a/models/dynamodb.json +++ b/models/dynamodb.json @@ -1171,7 +1171,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

The BatchWriteItem operation puts or deletes multiple items in one or\n more tables. A single call to BatchWriteItem can transmit up to 16MB of\n data over the network, consisting of up to 25 item put or delete operations. While\n individual items can be up to 400 KB once stored, it's important to note that an item's\n representation might be greater than 400KB while being sent in DynamoDB's JSON format\n for the API call. For more details on this distinction, see Naming Rules and Data Types.

\n \n

\n BatchWriteItem cannot update items. If you perform a BatchWriteItem\n operation on an existing item, that item's values will be overwritten by the\n operation and it will appear like it was updated. To update items, we recommend you\n use the UpdateItem action.

\n
\n

The individual PutItem and DeleteItem operations specified\n in BatchWriteItem are atomic; however BatchWriteItem as a\n whole is not. If any requested operations fail because the table's provisioned\n throughput is exceeded or an internal processing failure occurs, the failed operations\n are returned in the UnprocessedItems response parameter. You can\n investigate and optionally resend the requests. Typically, you would call\n BatchWriteItem in a loop. Each iteration would check for unprocessed\n items and submit a new BatchWriteItem request with those unprocessed items\n until all items have been processed.

\n

If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchWriteItem returns a\n ProvisionedThroughputExceededException.

\n \n

If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.

\n

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.

\n
\n

With BatchWriteItem, you can efficiently write or delete large amounts of\n data, such as from Amazon EMR, or copy data from another database into DynamoDB. In\n order to improve performance with these large-scale operations,\n BatchWriteItem does not behave in the same way as individual\n PutItem and DeleteItem calls would. For example, you\n cannot specify conditions on individual put and delete requests, and\n BatchWriteItem does not return deleted items in the response.

\n

If you use a programming language that supports concurrency, you can use threads to\n write items in parallel. Your application must include the necessary logic to manage the\n threads. With languages that don't support threading, you must update or delete the\n specified items one at a time. In both situations, BatchWriteItem performs\n the specified put and delete operations in parallel, giving you the power of the thread\n pool approach without having to introduce complexity into your application.

\n

Parallel processing reduces latency, but each specified put and delete request\n consumes the same number of write capacity units whether it is processed in parallel or\n not. Delete operations on nonexistent items consume one write capacity unit.

\n

If one or more of the following is true, DynamoDB rejects the entire batch write\n operation:

\n
    \n
  • \n

    One or more tables specified in the BatchWriteItem request does\n not exist.

    \n
  • \n
  • \n

    Primary key attributes specified on an item in the request do not match those\n in the corresponding table's primary key schema.

    \n
  • \n
  • \n

    You try to perform multiple operations on the same item in the same\n BatchWriteItem request. For example, you cannot put and delete\n the same item in the same BatchWriteItem request.

    \n
  • \n
  • \n

    Your request contains at least two items with identical hash and range keys\n (which essentially is two put operations).

    \n
  • \n
  • \n

    There are more than 25 requests in the batch.

    \n
  • \n
  • \n

    Any individual item in a batch exceeds 400 KB.

    \n
  • \n
  • \n

    The total request size exceeds 16 MB.

    \n
  • \n
", + "smithy.api#documentation": "

The BatchWriteItem operation puts or deletes multiple items in one or\n more tables. A single call to BatchWriteItem can transmit up to 16MB of\n data over the network, consisting of up to 25 item put or delete operations. While\n individual items can be up to 400 KB once stored, it's important to note that an item's\n representation might be greater than 400KB while being sent in DynamoDB's JSON format\n for the API call. For more details on this distinction, see Naming Rules and Data Types.

\n \n

\n BatchWriteItem cannot update items. If you perform a BatchWriteItem\n operation on an existing item, that item's values will be overwritten by the\n operation and it will appear like it was updated. To update items, we recommend you\n use the UpdateItem action.

\n
\n

The individual PutItem and DeleteItem operations specified\n in BatchWriteItem are atomic; however BatchWriteItem as a\n whole is not. If any requested operations fail because the table's provisioned\n throughput is exceeded or an internal processing failure occurs, the failed operations\n are returned in the UnprocessedItems response parameter. You can\n investigate and optionally resend the requests. Typically, you would call\n BatchWriteItem in a loop. Each iteration would check for unprocessed\n items and submit a new BatchWriteItem request with those unprocessed items\n until all items have been processed.

\n

If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchWriteItem returns a\n ProvisionedThroughputExceededException.

\n \n

If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.

\n

For more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.

\n
\n

With BatchWriteItem, you can efficiently write or delete large amounts of\n data, such as from Amazon EMR, or copy data from another database into DynamoDB. In\n order to improve performance with these large-scale operations,\n BatchWriteItem does not behave in the same way as individual\n PutItem and DeleteItem calls would. For example, you\n cannot specify conditions on individual put and delete requests, and\n BatchWriteItem does not return deleted items in the response.

\n

If you use a programming language that supports concurrency, you can use threads to\n write items in parallel. Your application must include the necessary logic to manage the\n threads. With languages that don't support threading, you must update or delete the\n specified items one at a time. In both situations, BatchWriteItem performs\n the specified put and delete operations in parallel, giving you the power of the thread\n pool approach without having to introduce complexity into your application.

\n

Parallel processing reduces latency, but each specified put and delete request\n consumes the same number of write capacity units whether it is processed in parallel or\n not. Delete operations on nonexistent items consume one write capacity unit.

\n

If one or more of the following is true, DynamoDB rejects the entire batch write\n operation:

\n
    \n
  • \n

    One or more tables specified in the BatchWriteItem request does\n not exist.

    \n
  • \n
  • \n

    Primary key attributes specified on an item in the request do not match those\n in the corresponding table's primary key schema.

    \n
  • \n
  • \n

    You try to perform multiple operations on the same item in the same\n BatchWriteItem request. For example, you cannot put and delete\n the same item in the same BatchWriteItem request.

    \n
  • \n
  • \n

    Your request contains at least two items with identical hash and range keys\n (which essentially is two put operations).

    \n
  • \n
  • \n

    There are more than 25 requests in the batch.

    \n
  • \n
  • \n

    Any individual item in a batch exceeds 400 KB.

    \n
  • \n
  • \n

    The total request size exceeds 16 MB.

    \n
  • \n
  • \n

    Any individual items with keys exceeding the key length limits. For a\n partition key, the limit is 2048 bytes and for a sort key, the limit is 1024\n bytes.

    \n
  • \n
", "smithy.api#examples": [ { "title": "To add multiple items to a table", @@ -1698,7 +1698,7 @@ } }, "traits": { - "smithy.api#documentation": "

The capacity units consumed by an operation. The data returned includes the total\n provisioned throughput consumed, along with statistics for the table and any indexes\n involved in the operation. ConsumedCapacity is only returned if the request\n asked for it. For more information, see Provisioned Throughput in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

The capacity units consumed by an operation. The data returned includes the total\n provisioned throughput consumed, along with statistics for the table and any indexes\n involved in the operation. ConsumedCapacity is only returned if the request\n asked for it. For more information, see Provisioned capacity mode in the Amazon DynamoDB Developer\n Guide.

" } }, "com.amazonaws.dynamodb#ConsumedCapacityMultiple": { @@ -2000,7 +2000,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Creates a global table from an existing table. A global table creates a replication\n relationship between two or more DynamoDB tables with the same table name in the\n provided Regions.

\n \n

This operation only applies to Version\n 2017.11.29 (Legacy) of global tables. We recommend using\n Version 2019.11.21 (Current)\n when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than \n 2017.11.29 (Legacy). To determine which version you are using, see \n Determining the version. \n To update existing global tables from version 2017.11.29 (Legacy) to version\n 2019.11.21 (Current), see \n Updating global tables.\n

\n
\n

If you want to add a new replica table to a global table, each of the following\n conditions must be true:

\n
    \n
  • \n

    The table must have the same primary key as all of the other replicas.

    \n
  • \n
  • \n

    The table must have the same name as all of the other replicas.

    \n
  • \n
  • \n

    The table must have DynamoDB Streams enabled, with the stream containing both\n the new and the old images of the item.

    \n
  • \n
  • \n

    None of the replica tables in the global table can contain any data.

    \n
  • \n
\n

If global secondary indexes are specified, then the following conditions must also be\n met:

\n
    \n
  • \n

    The global secondary indexes must have the same name.

    \n
  • \n
  • \n

    The global secondary indexes must have the same hash key and sort key (if\n present).

    \n
  • \n
\n

If local secondary indexes are specified, then the following conditions must also be\n met:

\n
    \n
  • \n

    The local secondary indexes must have the same name.

    \n
  • \n
  • \n

    The local secondary indexes must have the same hash key and sort key (if\n present).

    \n
  • \n
\n \n

Write capacity settings should be set consistently across your replica tables and\n secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the\n write capacity settings for all of your global tables replicas and indexes.

\n

If you prefer to manage write capacity settings manually, you should provision\n equal replicated write capacity units to your replica tables. You should also\n provision equal replicated write capacity units to matching secondary indexes across\n your global table.

\n
" + "smithy.api#documentation": "

Creates a global table from an existing table. A global table creates a replication\n relationship between two or more DynamoDB tables with the same table name in the\n provided Regions.

\n \n

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

\n

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

\n
\n

If you want to add a new replica table to a global table, each of the following\n conditions must be true:

\n
    \n
  • \n

    The table must have the same primary key as all of the other replicas.

    \n
  • \n
  • \n

    The table must have the same name as all of the other replicas.

    \n
  • \n
  • \n

    The table must have DynamoDB Streams enabled, with the stream containing both\n the new and the old images of the item.

    \n
  • \n
  • \n

    None of the replica tables in the global table can contain any data.

    \n
  • \n
\n

If global secondary indexes are specified, then the following conditions must also be\n met:

\n
    \n
  • \n

    The global secondary indexes must have the same name.

    \n
  • \n
  • \n

    The global secondary indexes must have the same hash key and sort key (if\n present).

    \n
  • \n
\n

If local secondary indexes are specified, then the following conditions must also be\n met:

\n
    \n
  • \n

    The local secondary indexes must have the same name.

    \n
  • \n
  • \n

    The local secondary indexes must have the same hash key and sort key (if\n present).

    \n
  • \n
\n \n

Write capacity settings should be set consistently across your replica tables and\n secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage the\n write capacity settings for all of your global tables replicas and indexes.

\n

If you prefer to manage write capacity settings manually, you should provision\n equal replicated write capacity units to your replica tables. You should also\n provision equal replicated write capacity units to matching secondary indexes across\n your global table.

\n
" } }, "com.amazonaws.dynamodb#CreateGlobalTableInput": { @@ -2167,7 +2167,7 @@ "BillingMode": { "target": "com.amazonaws.dynamodb#BillingMode", "traits": { - "smithy.api#documentation": "

Controls how you are charged for read and write throughput and how you manage\n capacity. This setting can be changed later.

\n
    \n
  • \n

    \n PROVISIONED - We recommend using PROVISIONED for\n predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode.

    \n
  • \n
  • \n

    \n PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST\n for unpredictable workloads. PAY_PER_REQUEST sets the billing mode\n to On-Demand Mode.

    \n
  • \n
" + "smithy.api#documentation": "

Controls how you are charged for read and write throughput and how you manage\n capacity. This setting can be changed later.

\n
    \n
  • \n

    \n PROVISIONED - We recommend using PROVISIONED for\n predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode.

    \n
  • \n
  • \n

    \n PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST\n for unpredictable workloads. PAY_PER_REQUEST sets the billing mode\n to On-demand capacity mode.

    \n
  • \n
" } }, "ProvisionedThroughput": { @@ -2209,7 +2209,7 @@ "ResourcePolicy": { "target": "com.amazonaws.dynamodb#ResourcePolicy", "traits": { - "smithy.api#documentation": "

An Amazon Web Services resource-based policy document in JSON format that will be attached to the table.

\n

When you attach a resource-based policy while creating a table, the policy application is strongly consistent.

\n

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that apply for resource-based policies, see Resource-based policy considerations.

" + "smithy.api#documentation": "

An Amazon Web Services resource-based policy document in JSON format that will be attached to the table.

\n

When you attach a resource-based policy while creating a table, the policy application is strongly consistent.

\n

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that apply for resource-based policies, see Resource-based policy considerations.

\n \n

You need to specify the CreateTable and PutResourcePolicy IAM actions for authorizing a user to create a table with a resource-based policy.

\n
" } }, "OnDemandThroughput": { @@ -2566,7 +2566,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "

The capacity units consumed by the DeleteItem operation. The data\n returned includes the total provisioned throughput consumed, along with statistics for\n the table and any indexes involved in the operation. ConsumedCapacity is\n only returned if the ReturnConsumedCapacity parameter was specified. For\n more information, see Provisioned Throughput in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

The capacity units consumed by the DeleteItem operation. The data\n returned includes the total provisioned throughput consumed, along with statistics for\n the table and any indexes involved in the operation. ConsumedCapacity is\n only returned if the ReturnConsumedCapacity parameter was specified. For\n more information, see Provisioned capacity mode in the Amazon DynamoDB Developer\n Guide.

" } }, "ItemCollectionMetrics": { @@ -2725,7 +2725,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

The DeleteTable operation deletes a table and all of its items. After a\n DeleteTable request, the specified table is in the\n DELETING state until DynamoDB completes the deletion. If the table is\n in the ACTIVE state, you can delete it. If a table is in\n CREATING or UPDATING states, then DynamoDB returns a\n ResourceInUseException. If the specified table does not exist, DynamoDB\n returns a ResourceNotFoundException. If table is already in the\n DELETING state, no error is returned.

\n \n

This operation only applies to Version 2019.11.21 (Current) \n of global tables.\n

\n
\n \n

DynamoDB might continue to accept data read and write operations, such as\n GetItem and PutItem, on a table in the\n DELETING state until the table deletion is complete.

\n
\n

When you delete a table, any indexes on that table are also deleted.

\n

If you have DynamoDB Streams enabled on the table, then the corresponding stream on\n that table goes into the DISABLED state, and the stream is automatically\n deleted after 24 hours.

\n

Use the DescribeTable action to check the status of the table.

", + "smithy.api#documentation": "

The DeleteTable operation deletes a table and all of its items. After a\n DeleteTable request, the specified table is in the\n DELETING state until DynamoDB completes the deletion. If the table is\n in the ACTIVE state, you can delete it. If a table is in\n CREATING or UPDATING states, then DynamoDB returns a\n ResourceInUseException. If the specified table does not exist, DynamoDB\n returns a ResourceNotFoundException. If table is already in the\n DELETING state, no error is returned.

\n \n

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n

\n
\n \n

DynamoDB might continue to accept data read and write operations, such as\n GetItem and PutItem, on a table in the\n DELETING state until the table deletion is complete.

\n
\n

When you delete a table, any indexes on that table are also deleted.

\n

If you have DynamoDB Streams enabled on the table, then the corresponding stream on\n that table goes into the DISABLED state, and the stream is automatically\n deleted after 24 hours.

\n

Use the DescribeTable action to check the status of the table.

", "smithy.api#examples": [ { "title": "To delete a table", @@ -3088,7 +3088,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Returns information about the specified global table.

\n \n

This operation only applies to Version\n 2017.11.29 (Legacy) of global tables. We recommend using \n Version 2019.11.21 (Current)\n when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than \n 2017.11.29 (Legacy). To determine which version you are using, see \n Determining the version. \n To update existing global tables from version 2017.11.29 (Legacy) to version\n 2019.11.21 (Current), see \n Updating global tables.\n

\n
" + "smithy.api#documentation": "

Returns information about the specified global table.

\n \n

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

\n

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

\n
" } }, "com.amazonaws.dynamodb#DescribeGlobalTableInput": { @@ -3143,7 +3143,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Describes Region-specific settings for a global table.

\n \n

This operation only applies to Version\n 2017.11.29 (Legacy) of global tables. We recommend using\n Version 2019.11.21 (Current)\n when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than \n 2017.11.29 (Legacy). To determine which version you are using, see \n Determining the version. \n To update existing global tables from version 2017.11.29 (Legacy) to version\n 2019.11.21 (Current), see \n Updating global tables.\n

\n
" + "smithy.api#documentation": "

Describes Region-specific settings for a global table.

\n \n

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

\n

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

\n
" } }, "com.amazonaws.dynamodb#DescribeGlobalTableSettingsInput": { @@ -3388,7 +3388,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Returns information about the table, including the current status of the table, when\n it was created, the primary key schema, and any indexes on the table.

\n \n

This operation only applies to Version 2019.11.21 (Current) \n of global tables.\n

\n
\n \n

If you issue a DescribeTable request immediately after a\n CreateTable request, DynamoDB might return a\n ResourceNotFoundException. This is because\n DescribeTable uses an eventually consistent query, and the metadata\n for your table might not be available at that moment. Wait for a few seconds, and\n then try the DescribeTable request again.

\n
", + "smithy.api#documentation": "

Returns information about the table, including the current status of the table, when\n it was created, the primary key schema, and any indexes on the table.

\n \n

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n

\n
\n \n

If you issue a DescribeTable request immediately after a\n CreateTable request, DynamoDB might return a\n ResourceNotFoundException. This is because\n DescribeTable uses an eventually consistent query, and the metadata\n for your table might not be available at that moment. Wait for a few seconds, and\n then try the DescribeTable request again.

\n
", "smithy.waiters#waitable": { "TableExists": { "acceptors": [ @@ -3473,7 +3473,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes auto scaling settings across replicas of the global table at once.

\n \n

This operation only applies to Version 2019.11.21 (Current)\n of global tables.

\n
" + "smithy.api#documentation": "

Describes auto scaling settings across replicas of the global table at once.

\n \n

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).

\n
" } }, "com.amazonaws.dynamodb#DescribeTableReplicaAutoScalingInput": { @@ -5895,7 +5895,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "

The capacity units consumed by the GetItem operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Provisioned Throughput in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

The capacity units consumed by the GetItem operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer\n Guide.

" } } }, @@ -7554,7 +7554,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Lists all global tables that have a replica in the specified Region.

\n \n

This operation only applies to Version\n 2017.11.29 (Legacy) of global tables. We recommend using\n Version 2019.11.21 (Current)\n when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than \n 2017.11.29 (Legacy). To determine which version you are using, see \n Determining the version. \n To update existing global tables from version 2017.11.29 (Legacy) to version\n 2019.11.21 (Current), see \n Updating global tables.\n

\n
" + "smithy.api#documentation": "

Lists all global tables that have a replica in the specified Region.

\n \n

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

\n

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

\n
" } }, "com.amazonaws.dynamodb#ListGlobalTablesInput": { @@ -8600,7 +8600,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "

The capacity units consumed by the PutItem operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Provisioned Throughput in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

The capacity units consumed by the PutItem operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer\n Guide.

" } }, "ItemCollectionMetrics": { @@ -8925,7 +8925,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "

The capacity units consumed by the Query operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Provisioned Throughput in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

The capacity units consumed by the Query operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see Capacity unit consumption for read operations in the Amazon DynamoDB Developer\n Guide.

" } } }, @@ -10426,7 +10426,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "

The capacity units consumed by the Scan operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see \n Provisioned Throughput \n in the Amazon DynamoDB Developer Guide.

" + "smithy.api#documentation": "

The capacity units consumed by the Scan operation. The data returned\n includes the total provisioned throughput consumed, along with statistics for the table\n and any indexes involved in the operation. ConsumedCapacity is only\n returned if the ReturnConsumedCapacity parameter was specified. For more\n information, see \n Capacity unit consumption for read operations in the Amazon DynamoDB Developer Guide.

" } } }, @@ -11839,7 +11839,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Adds or removes replicas in the specified global table. The global table must already\n exist to be able to use this operation. Any replica to be added must be empty, have the\n same name as the global table, have the same key schema, have DynamoDB Streams enabled,\n and have the same provisioned and maximum write capacity units.

\n \n

This operation only applies to Version\n 2017.11.29 (Legacy) of global tables. We recommend using\n Version 2019.11.21 (Current)\n when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than \n 2017.11.29 (Legacy). To determine which version you are using, see \n Determining the version. \n To update existing global tables from version 2017.11.29 (Legacy) to version\n 2019.11.21 (Current), see \n Updating global tables.\n

\n
\n \n

\n This operation only applies to Version\n 2017.11.29 of global tables. If you are using global tables Version\n 2019.11.21 you can use UpdateTable instead.\n

\n

\n Although you can use UpdateGlobalTable to add replicas and remove\n replicas in a single request, for simplicity we recommend that you issue separate\n requests for adding or removing replicas.\n

\n
\n

If global secondary indexes are specified, then the following conditions must also be\n met:

\n
    \n
  • \n

    The global secondary indexes must have the same name.

    \n
  • \n
  • \n

    The global secondary indexes must have the same hash key and sort key (if\n present).

    \n
  • \n
  • \n

    The global secondary indexes must have the same provisioned and maximum write\n capacity units.

    \n
  • \n
" + "smithy.api#documentation": "

Adds or removes replicas in the specified global table. The global table must already\n exist to be able to use this operation. Any replica to be added must be empty, have the\n same name as the global table, have the same key schema, have DynamoDB Streams enabled,\n and have the same provisioned and maximum write capacity units.

\n \n

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

\n

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

\n
\n \n

\n For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). If you are using global tables Version\n 2019.11.21 you can use UpdateTable instead.\n

\n

\n Although you can use UpdateGlobalTable to add replicas and remove\n replicas in a single request, for simplicity we recommend that you issue separate\n requests for adding or removing replicas.\n

\n
\n

If global secondary indexes are specified, then the following conditions must also be\n met:

\n
    \n
  • \n

    The global secondary indexes must have the same name.

    \n
  • \n
  • \n

    The global secondary indexes must have the same hash key and sort key (if\n present).

    \n
  • \n
  • \n

    The global secondary indexes must have the same provisioned and maximum write\n capacity units.

    \n
  • \n
" } }, "com.amazonaws.dynamodb#UpdateGlobalTableInput": { @@ -11913,7 +11913,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Updates settings for a global table.

\n \n

This operation only applies to Version\n 2017.11.29 (Legacy) of global tables. We recommend using\n Version 2019.11.21 (Current)\n when creating new global tables, as it provides greater flexibility, higher efficiency and consumes less write capacity than \n 2017.11.29 (Legacy). To determine which version you are using, see \n Determining the version. \n To update existing global tables from version 2017.11.29 (Legacy) to version\n 2019.11.21 (Current), see \n Updating global tables.\n

\n
" + "smithy.api#documentation": "

Updates settings for a global table.

\n \n

This documentation is for version 2017.11.29 (Legacy) of global tables, which should be avoided for new global tables. Customers should use Global Tables version 2019.11.21 (Current) when possible, because it provides greater flexibility, higher efficiency, and consumes less write capacity than 2017.11.29 (Legacy).

\n

To determine which version you're using, see Determining the global table version you are using. To update existing global tables from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see Upgrading global tables.

\n
" } }, "com.amazonaws.dynamodb#UpdateGlobalTableSettingsInput": { @@ -11929,7 +11929,7 @@ "GlobalTableBillingMode": { "target": "com.amazonaws.dynamodb#BillingMode", "traits": { - "smithy.api#documentation": "

The billing mode of the global table. If GlobalTableBillingMode is not\n specified, the global table defaults to PROVISIONED capacity billing\n mode.

\n
    \n
  • \n

    \n PROVISIONED - We recommend using PROVISIONED for\n predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode.

    \n
  • \n
  • \n

    \n PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST\n for unpredictable workloads. PAY_PER_REQUEST sets the billing mode\n to On-Demand Mode.

    \n
  • \n
" + "smithy.api#documentation": "

The billing mode of the global table. If GlobalTableBillingMode is not\n specified, the global table defaults to PROVISIONED capacity billing\n mode.

\n
    \n
  • \n

    \n PROVISIONED - We recommend using PROVISIONED for\n predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode.

    \n
  • \n
  • \n

    \n PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST\n for unpredictable workloads. PAY_PER_REQUEST sets the billing mode\n to On-demand capacity mode.

    \n
  • \n
" } }, "GlobalTableProvisionedWriteCapacityUnits": { @@ -12167,7 +12167,7 @@ "ConsumedCapacity": { "target": "com.amazonaws.dynamodb#ConsumedCapacity", "traits": { - "smithy.api#documentation": "

The capacity units consumed by the UpdateItem operation. The data\n returned includes the total provisioned throughput consumed, along with statistics for\n the table and any indexes involved in the operation. ConsumedCapacity is\n only returned if the ReturnConsumedCapacity parameter was specified. For\n more information, see Provisioned Throughput in the Amazon DynamoDB Developer\n Guide.

" + "smithy.api#documentation": "

The capacity units consumed by the UpdateItem operation. The data\n returned includes the total provisioned throughput consumed, along with statistics for\n the table and any indexes involved in the operation. ConsumedCapacity is\n only returned if the ReturnConsumedCapacity parameter was specified. For\n more information, see Capacity unity consumption for write operations in the Amazon DynamoDB Developer\n Guide.

" } }, "ItemCollectionMetrics": { @@ -12362,7 +12362,7 @@ "aws.api#clientDiscoveredEndpoint": { "required": false }, - "smithy.api#documentation": "

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB\n Streams settings for a given table.

\n \n

This operation only applies to Version 2019.11.21 (Current) \n of global tables.\n

\n
\n

You can only perform one of the following operations at once:

\n
    \n
  • \n

    Modify the provisioned throughput settings of the table.

    \n
  • \n
  • \n

    Remove a global secondary index from the table.

    \n
  • \n
  • \n

    Create a new global secondary index on the table. After the index begins\n backfilling, you can use UpdateTable to perform other\n operations.

    \n
  • \n
\n

\n UpdateTable is an asynchronous operation; while it's executing, the table\n status changes from ACTIVE to UPDATING. While it's\n UPDATING, you can't issue another UpdateTable request.\n When the table returns to the ACTIVE state, the UpdateTable\n operation is complete.

" + "smithy.api#documentation": "

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB\n Streams settings for a given table.

\n \n

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n

\n
\n

You can only perform one of the following operations at once:

\n
    \n
  • \n

    Modify the provisioned throughput settings of the table.

    \n
  • \n
  • \n

    Remove a global secondary index from the table.

    \n
  • \n
  • \n

    Create a new global secondary index on the table. After the index begins\n backfilling, you can use UpdateTable to perform other\n operations.

    \n
  • \n
\n

\n UpdateTable is an asynchronous operation; while it's executing, the table\n status changes from ACTIVE to UPDATING. While it's\n UPDATING, you can't issue another UpdateTable request.\n When the table returns to the ACTIVE state, the UpdateTable\n operation is complete.

" } }, "com.amazonaws.dynamodb#UpdateTableInput": { @@ -12384,7 +12384,7 @@ "BillingMode": { "target": "com.amazonaws.dynamodb#BillingMode", "traits": { - "smithy.api#documentation": "

Controls how you are charged for read and write throughput and how you manage\n capacity. When switching from pay-per-request to provisioned capacity, initial\n provisioned capacity values must be set. The initial provisioned capacity values are\n estimated based on the consumed read and write capacity of your table and global\n secondary indexes over the past 30 minutes.

\n
    \n
  • \n

    \n PROVISIONED - We recommend using PROVISIONED for\n predictable workloads. PROVISIONED sets the billing mode to Provisioned Mode.

    \n
  • \n
  • \n

    \n PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST\n for unpredictable workloads. PAY_PER_REQUEST sets the billing mode\n to On-Demand Mode.

    \n
  • \n
" + "smithy.api#documentation": "

Controls how you are charged for read and write throughput and how you manage\n capacity. When switching from pay-per-request to provisioned capacity, initial\n provisioned capacity values must be set. The initial provisioned capacity values are\n estimated based on the consumed read and write capacity of your table and global\n secondary indexes over the past 30 minutes.

\n
    \n
  • \n

    \n PROVISIONED - We recommend using PROVISIONED for\n predictable workloads. PROVISIONED sets the billing mode to Provisioned capacity mode.

    \n
  • \n
  • \n

    \n PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST\n for unpredictable workloads. PAY_PER_REQUEST sets the billing mode\n to On-demand capacity mode.

    \n
  • \n
" } }, "ProvisionedThroughput": { @@ -12414,7 +12414,7 @@ "ReplicaUpdates": { "target": "com.amazonaws.dynamodb#ReplicationGroupUpdateList", "traits": { - "smithy.api#documentation": "

A list of replica update actions (create, delete, or update) for the table.

\n \n

This property only applies to Version 2019.11.21 (Current)\n of global tables.\n

\n
" + "smithy.api#documentation": "

A list of replica update actions (create, delete, or update) for the table.

\n \n

For global tables, this property only applies to global tables using Version 2019.11.21 (Current version). \n

\n
" } }, "TableClass": { @@ -12479,7 +12479,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates auto scaling settings on your global tables at once.

\n \n

This operation only applies to Version 2019.11.21 (Current) \n of global tables.\n

\n
" + "smithy.api#documentation": "

Updates auto scaling settings on your global tables at once.

\n \n

For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version).\n

\n
" } }, "com.amazonaws.dynamodb#UpdateTableReplicaAutoScalingInput": { diff --git a/models/ec2.json b/models/ec2.json index 9ca7f79502..fcfa79e9cf 100644 --- a/models/ec2.json +++ b/models/ec2.json @@ -288,7 +288,7 @@ "target": "com.amazonaws.ec2#AcceptAddressTransferResult" }, "traits": { - "smithy.api#documentation": "

Accepts an Elastic IP address transfer. For more information, see Accept a transferred Elastic IP address in the Amazon Virtual Private Cloud User Guide.

" + "smithy.api#documentation": "

Accepts an Elastic IP address transfer. For more information, see Accept a transferred Elastic IP address in the Amazon VPC User Guide.

" } }, "com.amazonaws.ec2#AcceptAddressTransferRequest": { @@ -1415,7 +1415,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details on the Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

" + "smithy.api#documentation": "

Details on the Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.

" } }, "com.amazonaws.ec2#AddressTransferList": { @@ -1538,7 +1538,7 @@ "target": "com.amazonaws.ec2#AllocateAddressResult" }, "traits": { - "smithy.api#documentation": "

Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate \n it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address \n pool and can be allocated to a different Amazon Web Services account.

\n

You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created \n from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own \n IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

\n

If you release an Elastic IP address, you might be able to recover it. You cannot recover\n an Elastic IP address that you released after it is allocated to another Amazon Web Services account. To attempt to recover an Elastic IP address that you released, specify\n it in this operation.

\n

For more information, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

\n

You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, \n to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).

", + "smithy.api#documentation": "

Allocates an Elastic IP address to your Amazon Web Services account. After you allocate the Elastic IP address you can associate \n it with an instance or network interface. After you release an Elastic IP address, it is released to the IP address \n pool and can be allocated to a different Amazon Web Services account.

\n

You can allocate an Elastic IP address from an address pool owned by Amazon Web Services or from an address pool created \n from a public IPv4 address range that you have brought to Amazon Web Services for use with your Amazon Web Services resources using bring your own \n IP addresses (BYOIP). For more information, see Bring Your Own IP Addresses (BYOIP) in the Amazon EC2 User Guide.

\n

If you release an Elastic IP address, you might be able to recover it. You cannot recover\n an Elastic IP address that you released after it is allocated to another Amazon Web Services account. To attempt to recover an Elastic IP address that you released, specify\n it in this operation.

\n

For more information, see Elastic IP Addresses in the Amazon EC2 User Guide.

\n

You can allocate a carrier IP address which is a public IP address from a telecommunication carrier, \n to a network interface which resides in a subnet in a Wavelength Zone (for example an EC2 instance).

", "smithy.api#examples": [ { "title": "To allocate an Elastic IP address", @@ -1578,7 +1578,7 @@ "NetworkBorderGroup": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services\n advertises IP addresses. Use this parameter to limit the IP address to this location. IP\n addresses cannot move between network border groups.

\n

Use DescribeAvailabilityZones to view the network border groups.

" + "smithy.api#documentation": "

A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services\n advertises IP addresses. Use this parameter to limit the IP address to this location. IP\n addresses cannot move between network border groups.

" } }, "CustomerOwnedIpv4Pool": { @@ -1698,7 +1698,7 @@ "target": "com.amazonaws.ec2#AutoPlacement", "traits": { "aws.protocols#ec2QueryName": "AutoPlacement", - "smithy.api#documentation": "

Indicates whether the host accepts any untargeted instance launches that match its\n instance type configuration, or if it only accepts Host tenancy instance launches that\n specify its unique host ID. For more information, see Understanding auto-placement and affinity in the\n Amazon EC2 User Guide.

\n

Default: on\n

", + "smithy.api#documentation": "

Indicates whether the host accepts any untargeted instance launches that match its\n instance type configuration, or if it only accepts Host tenancy instance launches that\n specify its unique host ID. For more information, see Understanding auto-placement and affinity in the\n Amazon EC2 User Guide.

\n

Default: off\n

", "smithy.api#xmlName": "autoPlacement" } }, @@ -1840,7 +1840,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -3134,6 +3134,9 @@ { "target": "com.amazonaws.ec2#DescribeTags" }, + { + "target": "com.amazonaws.ec2#DescribeTrafficMirrorFilterRules" + }, { "target": "com.amazonaws.ec2#DescribeTrafficMirrorFilters" }, @@ -5885,7 +5888,7 @@ "target": "com.amazonaws.ec2#AssignIpv6AddressesResult" }, "traits": { - "smithy.api#documentation": "

Assigns one or more IPv6 addresses to the specified network interface. You can\n specify one or more specific IPv6 addresses, or you can specify the number of IPv6\n addresses to be automatically assigned from within the subnet's IPv6 CIDR block range.\n You can assign as many IPv6 addresses to a network interface as you can assign private\n IPv4 addresses, and the limit varies per instance type. For information, see IP Addresses Per Network Interface Per Instance Type\n in the Amazon Elastic Compute Cloud User Guide.

\n

You must specify either the IPv6 addresses or the IPv6 address count in the request.

\n

You can optionally use Prefix Delegation on the network interface. You must specify\n either the IPV6 Prefix Delegation prefixes, or the IPv6 Prefix Delegation count. For\n information, see \n Assigning prefixes to Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Assigns one or more IPv6 addresses to the specified network interface. You can\n specify one or more specific IPv6 addresses, or you can specify the number of IPv6\n addresses to be automatically assigned from within the subnet's IPv6 CIDR block range.\n You can assign as many IPv6 addresses to a network interface as you can assign private\n IPv4 addresses, and the limit varies per instance type.

\n

You must specify either the IPv6 addresses or the IPv6 address count in the request.

\n

You can optionally use Prefix Delegation on the network interface. You must specify\n either the IPV6 Prefix Delegation prefixes, or the IPv6 Prefix Delegation count. For\n information, see \n Assigning prefixes to network interfaces in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#AssignIpv6AddressesRequest": { @@ -5976,7 +5979,7 @@ "target": "com.amazonaws.ec2#AssignPrivateIpAddressesResult" }, "traits": { - "smithy.api#documentation": "

Assigns one or more secondary private IP addresses to the specified network interface.

\n

You can specify one or more specific secondary IP addresses, or you can specify the number \n of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. \n The number of secondary IP addresses that you can assign to an instance varies by instance type.\n For information about instance types, see Instance Types in the Amazon Elastic Compute Cloud User Guide. For more information about \n Elastic IP addresses, see Elastic IP Addresses in the Amazon Elastic Compute Cloud User Guide.

\n

When you move a secondary private IP address to another network interface, any Elastic IP address \n that is associated with the IP address is also moved.

\n

Remapping an IP address is an asynchronous operation. When you move an IP address from one network\n interface to another, check network/interfaces/macs/mac/local-ipv4s in the instance\n metadata to confirm that the remapping is complete.

\n

You must specify either the IP addresses or the IP address count in the request.

\n

You can optionally use Prefix Delegation on the network interface. You must specify\n either the IPv4 Prefix Delegation prefixes, or the IPv4 Prefix Delegation count. For\n information, see \n Assigning prefixes to Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

Assigns one or more secondary private IP addresses to the specified network interface.

\n

You can specify one or more specific secondary IP addresses, or you can specify the number \n of secondary IP addresses to be automatically assigned within the subnet's CIDR block range. \n The number of secondary IP addresses that you can assign to an instance varies by instance type.\n For more information about Elastic IP addresses, see Elastic IP Addresses in the Amazon EC2 User Guide.

\n

When you move a secondary private IP address to another network interface, any Elastic IP address \n that is associated with the IP address is also moved.

\n

Remapping an IP address is an asynchronous operation. When you move an IP address from one network\n interface to another, check network/interfaces/macs/mac/local-ipv4s in the instance\n metadata to confirm that the remapping is complete.

\n

You must specify either the IP addresses or the IP address count in the request.

\n

You can optionally use Prefix Delegation on the network interface. You must specify\n either the IPv4 Prefix Delegation prefixes, or the IPv4 Prefix Delegation count. For\n information, see \n Assigning prefixes to network interfaces in the Amazon EC2 User Guide.

", "smithy.api#examples": [ { "title": "To assign a specific secondary private IP address to an interface", @@ -6096,7 +6099,7 @@ "target": "com.amazonaws.ec2#AssignPrivateNatGatewayAddressResult" }, "traits": { - "smithy.api#documentation": "

Assigns one or more private IPv4 addresses to a private NAT gateway. For more information, see \n Work with NAT gateways in the Amazon VPC User Guide.

" + "smithy.api#documentation": "

Assigns private IPv4 addresses to a private NAT gateway. For more information, see \n Work with NAT gateways in the Amazon VPC User Guide.

" } }, "com.amazonaws.ec2#AssignPrivateNatGatewayAddressRequest": { @@ -6327,7 +6330,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. \nFor more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -6375,7 +6378,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

\n

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

\n

For more information, see DHCP options sets\n in the Amazon VPC User Guide.

", + "smithy.api#documentation": "

Associates a set of DHCP options (that you've previously created) with the specified VPC, or associates no DHCP options with the VPC.

\n

After you associate the options with the VPC, any existing instances and all new instances that you launch in that VPC use the options. You don't need to restart or relaunch the instances. They automatically pick up the changes within a few hours, depending on how frequently the instance renews its DHCP lease. You can explicitly renew the lease using the operating system on the instance.

\n

For more information, see DHCP option sets\n in the Amazon VPC User Guide.

", "smithy.api#examples": [ { "title": "To associate a DHCP options set with a VPC", @@ -6774,7 +6777,7 @@ "target": "com.amazonaws.ec2#AssociateNatGatewayAddressResult" }, "traits": { - "smithy.api#documentation": "

Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT gateway. For more information, \n see Work with NAT gateways in the Amazon VPC User Guide.

\n

By default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can increase the limit by requesting a quota adjustment. For more information, see Elastic IP address quotas in the Amazon VPC User Guide.

\n \n

When you associate an EIP or secondary EIPs with a public NAT gateway, the network border group of the EIPs must match the network border group of the Availability Zone (AZ) that the public NAT gateway is in. If it's not the same, the EIP will fail to associate. You can see the network border group for the subnet's AZ by viewing the details of the subnet. Similarly, you can view the network border group of an EIP by viewing the details of the EIP address. For more information about network border groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC User Guide. \n

\n
" + "smithy.api#documentation": "

Associates Elastic IP addresses (EIPs) and private IPv4 addresses with a public NAT gateway. For more information, \n see Work with NAT gateways in the Amazon VPC User Guide.

\n

By default, you can associate up to 2 Elastic IP addresses per public NAT gateway. You can increase the limit by requesting a quota adjustment. \n For more information, see Elastic IP address quotas in the Amazon VPC User Guide.

\n \n

When you associate an EIP or secondary EIPs with a public NAT gateway, the network border group of the EIPs must match the network border group of the Availability Zone (AZ) that the public NAT gateway is in. If it's not the same, the EIP will fail to associate. You can see the network border group for the subnet's AZ by viewing the details of the subnet. Similarly, you can view the network border group of an EIP by viewing the details of the EIP address. For more information about network border groups and EIPs, see Allocate an Elastic IP address in the Amazon VPC User Guide. \n

\n
" } }, "com.amazonaws.ec2#AssociateNatGatewayAddressRequest": { @@ -7193,7 +7196,7 @@ "target": "com.amazonaws.ec2#AssociateTrunkInterfaceResult" }, "traits": { - "smithy.api#documentation": "

Associates a branch network interface with a trunk network interface.

\n

Before you create the association, run the create-network-interface command and set\n --interface-type to trunk. You must also create a network interface for each branch network interface that you want to associate with the trunk network interface.

" + "smithy.api#documentation": "

Associates a branch network interface with a trunk network interface.

\n

Before you create the association, use CreateNetworkInterface command and set the interface type\n to trunk. You must also create a network interface for \n each branch network interface that you want to associate with the trunk \n network interface.

" } }, "com.amazonaws.ec2#AssociateTrunkInterfaceRequest": { @@ -7230,7 +7233,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to Ensure\n Idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring\n idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -7260,7 +7263,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ClientToken", - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to Ensure\n Idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring\n idempotency.

", "smithy.api#xmlName": "clientToken" } } @@ -7278,7 +7281,7 @@ "target": "com.amazonaws.ec2#AssociateVpcCidrBlockResult" }, "traits": { - "smithy.api#documentation": "

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block,\n an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from an IPv6 address pool that\n you provisioned through bring your own IP addresses (BYOIP).

\n

You must specify one of the following in the request: an IPv4 CIDR block, an IPv6\n pool, or an Amazon-provided IPv6 CIDR block.

\n

For more information about associating CIDR blocks with your VPC and applicable\n restrictions, see IP addressing for your VPCs and subnets \n in the Amazon VPC User Guide.

" + "smithy.api#documentation": "

Associates a CIDR block with your VPC. You can associate a secondary IPv4 CIDR block,\n an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from an IPv6 address pool that\n you provisioned through bring your own IP addresses (BYOIP).

\n

You must specify one of the following in the request: an IPv4 CIDR block, an IPv6\n pool, or an Amazon-provided IPv6 CIDR block.

\n

For more information about associating CIDR blocks with your VPC and applicable\n restrictions, see IP addressing for your VPCs and subnets \n in the Amazon VPC User Guide.

" } }, "com.amazonaws.ec2#AssociateVpcCidrBlockRequest": { @@ -7874,7 +7877,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -8263,7 +8266,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. \nFor more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -8473,19 +8476,19 @@ "smithy.api#documentation": "

Adds the specified inbound (ingress) rules to a security group.

\n

An inbound rule permits instances to receive traffic from the specified IPv4 or IPv6 \n address range, the IP address ranges that are specified by a prefix list, or the instances \n that are associated with a destination security group. For more information, see Security group rules.

\n

You must specify exactly one of the following sources: an IPv4 or IPv6 address range,\n a prefix list, or a security group. You must specify a protocol for each rule (for example, TCP). \n If the protocol is TCP or UDP, you must also specify a port or port range. If the protocol is \n ICMP or ICMPv6, you must also specify the ICMP/ICMPv6 type and code.

\n

Rule changes are propagated to instances associated with the security group as quickly \n as possible. However, a small delay might occur.

\n

For examples of rules that you can add to security groups for specific access scenarios, \n see Security group rules for different use cases in the Amazon EC2 User Guide.

\n

For more information about security group quotas, see Amazon VPC quotas in the Amazon VPC User Guide.

", "smithy.api#examples": [ { - "title": "To add a rule that allows inbound SSH traffic from an IPv4 address range", - "documentation": "This example enables inbound traffic on TCP port 22 (SSH). The rule includes a description to help you identify it later.", + "title": "To add a rule that allows inbound HTTP traffic from another security group", + "documentation": "This example enables inbound traffic on TCP port 80 from the specified security group. The group must be in the same VPC or a peer VPC. Incoming traffic is allowed based on the private IP addresses of instances that are associated with the specified security group.", "input": { - "GroupId": "sg-903004f8", + "GroupId": "sg-111aaa22", "IpPermissions": [ { "IpProtocol": "tcp", - "FromPort": 22, - "ToPort": 22, - "IpRanges": [ + "FromPort": 80, + "ToPort": 80, + "UserIdGroupPairs": [ { - "CidrIp": "203.0.113.0/24", - "Description": "SSH access from the LA office" + "GroupId": "sg-1a2b3c4d", + "Description": "HTTP access from other instances" } ] } @@ -8494,19 +8497,19 @@ "output": {} }, { - "title": "To add a rule that allows inbound HTTP traffic from another security group", - "documentation": "This example enables inbound traffic on TCP port 80 from the specified security group. The group must be in the same VPC or a peer VPC. Incoming traffic is allowed based on the private IP addresses of instances that are associated with the specified security group.", + "title": "To add a rule that allows inbound SSH traffic from an IPv4 address range", + "documentation": "This example enables inbound traffic on TCP port 22 (SSH). The rule includes a description to help you identify it later.", "input": { - "GroupId": "sg-111aaa22", + "GroupId": "sg-903004f8", "IpPermissions": [ { "IpProtocol": "tcp", - "FromPort": 80, - "ToPort": 80, - "UserIdGroupPairs": [ + "FromPort": 22, + "ToPort": 22, + "IpRanges": [ { - "GroupId": "sg-1a2b3c4d", - "Description": "HTTP access from other instances" + "CidrIp": "203.0.113.0/24", + "Description": "SSH access from the LA office" } ] } @@ -10061,7 +10064,7 @@ "target": "com.amazonaws.ec2#CancelReservedInstancesListingResult" }, "traits": { - "smithy.api#documentation": "

Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace.

\n

For more information, see \n Reserved Instance Marketplace \n in the Amazon EC2 User Guide.

" + "smithy.api#documentation": "

Cancels the specified Reserved Instance listing in the Reserved Instance Marketplace.

\n

For more information, see Sell in the Reserved Instance\n Marketplace in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#CancelReservedInstancesListingRequest": { @@ -10815,7 +10818,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "aws.protocols#ec2QueryName": "TotalTargetCapacity", - "smithy.api#documentation": "

The total number of capacity units for which the Capacity Reservation Fleet reserves capacity. \n\t\t\tFor more information, see Total target capacity \n\t\t\tin the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

The total number of capacity units for which the Capacity Reservation Fleet reserves\n\t\t\tcapacity. For more information, see Total target\n\t\t\t\tcapacity in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "totalTargetCapacity" } }, @@ -10863,7 +10866,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "AllocationStrategy", - "smithy.api#documentation": "

The strategy used by the Capacity Reservation Fleet to determine which of the specified \n\t\t\tinstance types to use. For more information, see For more information, see \n\t\t\t\n\t\t\t\tAllocation strategy in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

The strategy used by the Capacity Reservation Fleet to determine which of the specified\n\t\t\tinstance types to use. For more information, see For more information, see Allocation\n\t\t\t\tstrategy in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "allocationStrategy" } }, @@ -11559,7 +11562,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides authorization for Amazon to bring a specific IP address range to a specific\n Amazon Web Services account using bring your own IP addresses (BYOIP). For more information, see Configuring your BYOIP address range in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Provides authorization for Amazon to bring a specific IP address range to a specific\n Amazon Web Services account using bring your own IP addresses (BYOIP). For more information, see Configuring your BYOIP address range in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#CidrBlock": { @@ -13179,7 +13182,7 @@ } }, "traits": { - "smithy.api#documentation": "

A security group connection tracking configuration that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

A security group connection tracking configuration that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#ConnectionTrackingSpecification": { @@ -13211,7 +13214,7 @@ } }, "traits": { - "smithy.api#documentation": "

A security group connection tracking specification that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

A security group connection tracking specification that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#ConnectionTrackingSpecificationRequest": { @@ -13237,7 +13240,7 @@ } }, "traits": { - "smithy.api#documentation": "

A security group connection tracking specification request that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

A security group connection tracking specification request that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#ConnectionTrackingSpecificationResponse": { @@ -13269,7 +13272,7 @@ } }, "traits": { - "smithy.api#documentation": "

A security group connection tracking specification response that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

A security group connection tracking specification response that enables you to set the idle timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#ConnectivityType": { @@ -13469,7 +13472,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. \n For more information, see Ensuring idempotency.

" + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. \n \tFor more information, see Ensuring idempotency.

" } } }, @@ -13634,7 +13637,7 @@ "target": "com.amazonaws.ec2#CopySnapshotResult" }, "traits": { - "smithy.api#documentation": "

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy a\n snapshot within the same Region, from one Region to another, or from a Region to an Outpost. \n You can't copy a snapshot from an Outpost to a Region, from one Outpost to another, or within \n the same Outpost.

\n

You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs).

\n

When copying snapshots to a Region, copies of encrypted EBS snapshots remain encrypted. \n \tCopies of unencrypted snapshots remain unencrypted, unless you enable encryption for the \n \tsnapshot copy operation. By default, encrypted snapshot copies use the default Key Management Service (KMS) \n \tKMS key; however, you can specify a different KMS key. To copy an encrypted \n \tsnapshot that has been shared from another account, you must have permissions for the KMS key \n \tused to encrypt the snapshot.

\n

Snapshots copied to an Outpost are encrypted by default using the default\n \t\tencryption key for the Region, or a different key that you specify in the request using \n \t\tKmsKeyId. Outposts do not support unencrypted \n \t snapshots. For more information, \n \t\t\tAmazon EBS local snapshots on Outposts in the Amazon EBS User Guide.

\n

Snapshots created by copying another snapshot have an arbitrary volume ID that should not\n be used for any purpose.

\n

For more information, see Copy an Amazon EBS snapshot in the\n Amazon EBS User Guide.

", + "smithy.api#documentation": "

Copies a point-in-time snapshot of an EBS volume and stores it in Amazon S3. You can copy a\n snapshot within the same Region, from one Region to another, or from a Region to an Outpost. \n You can't copy a snapshot from an Outpost to a Region, from one Outpost to another, or within \n the same Outpost.

\n

You can use the snapshot to create EBS volumes or Amazon Machine Images (AMIs).

\n

When copying snapshots to a Region, copies of encrypted EBS snapshots remain encrypted. \n \tCopies of unencrypted snapshots remain unencrypted, unless you enable encryption for the \n \tsnapshot copy operation. By default, encrypted snapshot copies use the default KMS key; \n \thowever, you can specify a different KMS key. To copy an encrypted \n \tsnapshot that has been shared from another account, you must have permissions for the KMS key \n \tused to encrypt the snapshot.

\n

Snapshots copied to an Outpost are encrypted by default using the default\n \t\tencryption key for the Region, or a different key that you specify in the request using \n \t\tKmsKeyId. Outposts do not support unencrypted \n \t snapshots. For more information, \n \t\t\tAmazon EBS local snapshots on Outposts in the Amazon EBS User Guide.

\n

Snapshots created by copying another snapshot have an arbitrary volume ID that should not\n be used for any purpose.

\n

For more information, see Copy an Amazon EBS snapshot in the\n Amazon EBS User Guide.

", "smithy.api#examples": [ { "title": "To copy a snapshot", @@ -13687,7 +13690,7 @@ "target": "com.amazonaws.ec2#KmsKeyId", "traits": { "aws.protocols#ec2QueryName": "KmsKeyId", - "smithy.api#documentation": "

The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption.\n If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is\n specified, the encrypted state must be true.

\n

You can specify the KMS key using any of the following:

\n
    \n
  • \n

    Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Key alias. For example, alias/ExampleAlias.

    \n
  • \n
  • \n

    Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

    \n
  • \n
\n

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, \n the action can appear to complete, but eventually fails.

", + "smithy.api#documentation": "

The identifier of the KMS key to use for Amazon EBS encryption.\n If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is\n specified, the encrypted state must be true.

\n

You can specify the KMS key using any of the following:

\n
    \n
  • \n

    Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Key alias. For example, alias/ExampleAlias.

    \n
  • \n
  • \n

    Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

    \n
  • \n
\n

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, \n the action can appear to complete, but eventually fails.

", "smithy.api#xmlName": "kmsKeyId" } }, @@ -13695,7 +13698,7 @@ "target": "com.amazonaws.ec2#CopySnapshotRequestPSU", "traits": { "aws.protocols#ec2QueryName": "PresignedUrl", - "smithy.api#documentation": "

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a\n pre-signed URL. This parameter is optional for unencrypted snapshots. For more information,\n see Query\n requests.

\n

The PresignedUrl should use the snapshot source endpoint, the\n CopySnapshot action, and include the SourceRegion,\n SourceSnapshotId, and DestinationRegion parameters. The\n PresignedUrl must be signed using Amazon Web Services Signature Version 4. Because EBS\n snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic\n that is described in Authenticating Requests: Using Query\n Parameters (Amazon Web Services Signature Version 4) in the Amazon Simple Storage Service API Reference. An\n invalid or improperly signed PresignedUrl will cause the copy operation to fail\n asynchronously, and the snapshot will move to an error state.

", + "smithy.api#documentation": "

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a\n pre-signed URL. This parameter is optional for unencrypted snapshots. For more information,\n see Query\n requests.

\n

The PresignedUrl should use the snapshot source endpoint, the\n CopySnapshot action, and include the SourceRegion,\n SourceSnapshotId, and DestinationRegion parameters. The\n PresignedUrl must be signed using Amazon Web Services Signature Version 4. Because EBS\n snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic\n that is described in \n Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) in the Amazon S3 API Reference. An\n invalid or improperly signed PresignedUrl will cause the copy operation to fail\n asynchronously, and the snapshot will move to an error state.

", "smithy.api#xmlName": "presignedUrl" } }, @@ -13905,7 +13908,7 @@ "target": "com.amazonaws.ec2#CreateCapacityReservationFleetResult" }, "traits": { - "smithy.api#documentation": "

Creates a Capacity Reservation Fleet. For more information, see Create a Capacity \n\t\t\tReservation Fleet in the Amazon EC2 User Guide.

" + "smithy.api#documentation": "

Creates a Capacity Reservation Fleet. For more information, see Create a\n\t\t\t\tCapacity Reservation Fleet in the\n\t\t\tAmazon EC2 User Guide.

" } }, "com.amazonaws.ec2#CreateCapacityReservationFleetRequest": { @@ -13914,7 +13917,7 @@ "AllocationStrategy": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The strategy used by the Capacity Reservation Fleet to determine which of the \n\t\t\tspecified instance types to use. Currently, only the prioritized \n\t\t\tallocation strategy is supported. For more information, see \n\t\t\t\tAllocation strategy in the Amazon EC2 User Guide.

\n

Valid values: prioritized\n

" + "smithy.api#documentation": "

The strategy used by the Capacity Reservation Fleet to determine which of the specified\n\t\t\tinstance types to use. Currently, only the prioritized allocation strategy\n\t\t\tis supported. For more information, see Allocation\n\t\t\t\tstrategy in the Amazon EC2 User Guide.

\n

Valid values: prioritized\n

" } }, "ClientToken": { @@ -13943,7 +13946,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The total number of capacity units to be reserved by the Capacity Reservation Fleet. This \n\t\t\tvalue, together with the instance type weights that you assign to each instance type used by \n\t\t\tthe Fleet determine the number of instances for which the Fleet reserves capacity. Both values \n\t\t\tare based on units that make sense for your workload. For more information, see \n\t\t\t\tTotal target capacity in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

The total number of capacity units to be reserved by the Capacity Reservation Fleet. This\n\t\t\tvalue, together with the instance type weights that you assign to each instance type\n\t\t\tused by the Fleet determine the number of instances for which the Fleet reserves\n\t\t\tcapacity. Both values are based on units that make sense for your workload. For more\n\t\t\tinformation, see Total target\n\t\t\t\tcapacity in the Amazon EC2 User Guide.

", "smithy.api#required": {} } }, @@ -14238,7 +14241,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to ensure\n idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to ensure\n idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -14350,7 +14353,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. \nFor more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -14483,7 +14486,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. \nFor more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -14667,7 +14670,7 @@ "BgpAsn": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

For devices that support BGP, the customer gateway's BGP ASN.

\n

Default: 65000

" + "smithy.api#documentation": "

For customer gateway devices that support BGP, specify the device's ASN. You must specify either BgpAsn or BgpAsnExtended when creating the customer gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended.

\n

Default: 65000

\n

Valid values: 1 to 2,147,483,647\n

" } }, "PublicIp": { @@ -14706,7 +14709,7 @@ "IpAddress": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

\n IPv4 address for the customer gateway device's outside interface. The address must be static.\n

" + "smithy.api#documentation": "

IPv4 address for the customer gateway device's outside interface. The address must be\n static. If OutsideIpAddressType in your VPN connection options is set to\n PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. If\n OutsideIpAddressType is set to PublicIpv4, you can use a\n public IPv4 address.

" } }, "DryRun": { @@ -14716,6 +14719,12 @@ "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually\n making the request, and provides an error response. If you have the required\n permissions, the error response is DryRunOperation. Otherwise, it is\n UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } + }, + "BgpAsnExtended": { + "target": "com.amazonaws.ec2#Long", + "traits": { + "smithy.api#documentation": "

For customer gateway devices that support BGP, specify the device's ASN. You must specify either BgpAsn or BgpAsnExtended when creating the customer gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended.

\n

Valid values: 2,147,483,648 to 4,294,967,295\n

" + } } }, "traits": { @@ -14847,7 +14856,7 @@ "target": "com.amazonaws.ec2#CreateDhcpOptionsResult" }, "traits": { - "smithy.api#documentation": "

Creates a custom set of DHCP options. After you create a DHCP option set, you associate\n\t it with a VPC. After you associate a DHCP option set with a VPC, all existing and newly \n\t launched instances in the VPC use this set of DHCP options.

\n

The following are the individual DHCP options you can specify. For more information, see \n DHCP options sets \n in the Amazon VPC User Guide.

\n
    \n
  • \n

    \n domain-name - If you're using AmazonProvidedDNS in us-east-1, \n specify ec2.internal. If you're using AmazonProvidedDNS in any other Region, \n specify region.compute.internal. Otherwise, specify a custom domain name.\n This value is used to complete unqualified DNS hostnames.

    \n

    Some Linux operating systems accept multiple domain names separated by spaces.\n However, Windows and other Linux operating systems treat the value as a single\n domain, which results in unexpected behavior. If your DHCP option set is\n associated with a VPC that has instances running operating systems that treat\n the value as a single domain, specify only one domain name.

    \n
  • \n
  • \n

    \n domain-name-servers - The IP addresses of up to four DNS servers,\n or AmazonProvidedDNS. To specify multiple domain name servers in a single parameter, \n separate the IP addresses using commas. To have your instances receive custom DNS \n hostnames as specified in domain-name, you must specify a custom DNS\n server.

    \n
  • \n
  • \n

    \n ntp-servers - The IP addresses of up to eight Network Time Protocol (NTP)\n servers (four IPv4 addresses and four IPv6 addresses).

    \n
  • \n
  • \n

    \n netbios-name-servers - The IP addresses of up to four NetBIOS name\n servers.

    \n
  • \n
  • \n

    \n netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that\n you specify 2. Broadcast and multicast are not supported. For more information about \n NetBIOS node types, see RFC 2132.

    \n
  • \n
  • \n

    \n ipv6-address-preferred-lease-time - A value (in seconds, minutes, hours, or years) for how frequently a running instance with an IPv6 assigned to it goes through DHCPv6 lease renewal. \n Acceptable values are between 140 and 2147483647 seconds (approximately 68 years). If no value is entered, the default lease time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase the lease time and avoid frequent \n lease renewal requests. Lease renewal typically occurs when half of the lease time has elapsed.

    \n
  • \n
", + "smithy.api#documentation": "

Creates a custom set of DHCP options. After you create a DHCP option set, you associate\n\t it with a VPC. After you associate a DHCP option set with a VPC, all existing and newly \n\t launched instances in the VPC use this set of DHCP options.

\n

The following are the individual DHCP options you can specify. For more information, see \n DHCP option sets \n in the Amazon VPC User Guide.

\n
    \n
  • \n

    \n domain-name - If you're using AmazonProvidedDNS in us-east-1, \n specify ec2.internal. If you're using AmazonProvidedDNS in any other Region, \n specify region.compute.internal. Otherwise, specify a custom domain name.\n This value is used to complete unqualified DNS hostnames.

    \n

    Some Linux operating systems accept multiple domain names separated by spaces.\n However, Windows and other Linux operating systems treat the value as a single\n domain, which results in unexpected behavior. If your DHCP option set is\n associated with a VPC that has instances running operating systems that treat\n the value as a single domain, specify only one domain name.

    \n
  • \n
  • \n

    \n domain-name-servers - The IP addresses of up to four DNS servers,\n or AmazonProvidedDNS. To specify multiple domain name servers in a single parameter, \n separate the IP addresses using commas. To have your instances receive custom DNS \n hostnames as specified in domain-name, you must specify a custom DNS\n server.

    \n
  • \n
  • \n

    \n ntp-servers - The IP addresses of up to eight Network Time Protocol (NTP)\n servers (four IPv4 addresses and four IPv6 addresses).

    \n
  • \n
  • \n

    \n netbios-name-servers - The IP addresses of up to four NetBIOS name\n servers.

    \n
  • \n
  • \n

    \n netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend that\n you specify 2. Broadcast and multicast are not supported. For more information about \n NetBIOS node types, see RFC 2132.

    \n
  • \n
  • \n

    \n ipv6-address-preferred-lease-time - A value (in seconds, minutes, hours, or years) for how frequently a running instance with an IPv6 assigned to it goes through DHCPv6 lease renewal. \n Acceptable values are between 140 and 2147483647 seconds (approximately 68 years). If no value is entered, the default lease time is 140 seconds. If you use long-term addressing for EC2 instances, you can increase the lease time and avoid frequent \n lease renewal requests. Lease renewal typically occurs when half of the lease time has elapsed.

    \n
  • \n
", "smithy.api#examples": [ { "title": "To create a DHCP options set", @@ -14952,7 +14961,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n\t\t\trequest. For more information, see Ensuring idempotency.

" + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n\t\t\trequest. For more information, see Ensuring idempotency.

" } }, "DryRun": { @@ -15261,7 +15270,7 @@ "target": "com.amazonaws.ec2#CreateFlowLogsResult" }, "traits": { - "smithy.api#documentation": "

Creates one or more flow logs to capture information about IP traffic for a specific network interface,\n subnet, or VPC.

\n

Flow log data for a monitored network interface is recorded as flow log records, which are log events \n consisting of fields that describe the traffic flow. For more information, see \n Flow log records \n in the Amazon Virtual Private Cloud User Guide.

\n

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network \n interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all \n of the monitored network interfaces are published to a single log file object that is stored in the specified \n bucket.

\n

For more information, see VPC Flow Logs in the Amazon Virtual Private Cloud User Guide.

" + "smithy.api#documentation": "

Creates one or more flow logs to capture information about IP traffic for a specific network interface,\n subnet, or VPC.

\n

Flow log data for a monitored network interface is recorded as flow log records, which are log events \n consisting of fields that describe the traffic flow. For more information, see \n Flow log records \n in the Amazon VPC User Guide.

\n

When publishing to CloudWatch Logs, flow log records are published to a log group, and each network \n interface has a unique log stream in the log group. When publishing to Amazon S3, flow log records for all \n of the monitored network interfaces are published to a single log file object that is stored in the specified \n bucket.

\n

For more information, see VPC Flow Logs \n in the Amazon VPC User Guide.

" } }, "com.amazonaws.ec2#CreateFlowLogsRequest": { @@ -15276,7 +15285,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to ensure\n idempotency.

" + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to ensure\n idempotency.

" } }, "DeliverLogsPermissionArn": { @@ -15348,7 +15357,7 @@ "MaxAggregationInterval": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The maximum interval of time during which a flow of packets is captured and aggregated into a flow log record. \n The possible values are 60 seconds (1 minute) or 600 seconds (10 minutes).\n This parameter must be 60 seconds for transit gateway resource types.

\n

When a network interface is attached to a Nitro-based\n instance, the aggregation interval is always 60 seconds or less, regardless\n of the value that you specify.

\n

Default: 600

" + "smithy.api#documentation": "

The maximum interval of time during which a flow of packets is captured and aggregated into a flow log record. \n The possible values are 60 seconds (1 minute) or 600 seconds (10 minutes).\n This parameter must be 60 seconds for transit gateway resource types.

\n

When a network interface is attached to a Nitro-based\n instance, the aggregation interval is always 60 seconds or less, regardless\n of the value that you specify.

\n

Default: 600

" } }, "DestinationOptions": { @@ -15444,7 +15453,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. \n For more information, see Ensuring Idempotency.

" + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. \n \tFor more information, see Ensuring Idempotency.

" } }, "TagSpecifications": { @@ -15587,7 +15596,7 @@ "target": "com.amazonaws.ec2#CreateInstanceConnectEndpointResult" }, "traits": { - "smithy.api#documentation": "

Creates an EC2 Instance Connect Endpoint.

\n

An EC2 Instance Connect Endpoint allows you to connect to an instance, without\n requiring the instance to have a public IPv4 address. For more information, see Connect to your instances without requiring a public IPv4 address using EC2\n Instance Connect Endpoint in the Amazon EC2 User\n Guide.

" + "smithy.api#documentation": "

Creates an EC2 Instance Connect Endpoint.

\n

An EC2 Instance Connect Endpoint allows you to connect to an instance, without\n requiring the instance to have a public IPv4 address. For more information, see Connect to your instances without requiring a public IPv4 address using EC2\n Instance Connect Endpoint in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#CreateInstanceConnectEndpointRequest": { @@ -15990,7 +15999,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -16065,7 +16074,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -16210,7 +16219,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -16308,7 +16317,7 @@ "target": "com.amazonaws.ec2#CreateLaunchTemplateResult" }, "traits": { - "smithy.api#documentation": "

Creates a launch template.

\n

A launch template contains the parameters to launch an instance. When you launch an\n instance using RunInstances, you can specify a launch template instead\n of providing the launch parameters in the request. For more information, see Launch\n an instance from a launch template in the\n Amazon Elastic Compute Cloud User Guide.

\n

To clone an existing launch template as the basis for a new launch template, use the \n Amazon EC2 console. The API, SDKs, and CLI do not support cloning a template. For more \n information, see Create a launch template from an existing launch template in the\n Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

Creates a launch template.

\n

A launch template contains the parameters to launch an instance. When you launch an\n instance using RunInstances, you can specify a launch template instead\n of providing the launch parameters in the request. For more information, see Launch\n an instance from a launch template in the\n Amazon EC2 User Guide.

\n

To clone an existing launch template as the basis for a new launch template, use the \n Amazon EC2 console. The API, SDKs, and CLI do not support cloning a template. For more \n information, see Create a launch template from an existing launch template in the\n Amazon EC2 User Guide.

", "smithy.api#examples": [ { "title": "To create a launch template", @@ -16436,7 +16445,7 @@ "target": "com.amazonaws.ec2#CreateLaunchTemplateVersionResult" }, "traits": { - "smithy.api#documentation": "

Creates a new version of a launch template. You must specify an existing launch\n template, either by name or ID. You can determine whether the new version inherits \n parameters from a source version, and add or overwrite parameters as needed.

\n

Launch template versions are numbered in the order in which they are created. You\n can't specify, change, or replace the numbering of launch template versions.

\n

Launch templates are immutable; after you create a launch template, you can't modify\n it. Instead, you can create a new version of the launch template that includes the\n changes that you require.

\n

For more information, see Modify a launch template (manage launch template versions) in the\n Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

Creates a new version of a launch template. You must specify an existing launch\n template, either by name or ID. You can determine whether the new version inherits \n parameters from a source version, and add or overwrite parameters as needed.

\n

Launch template versions are numbered in the order in which they are created. You\n can't specify, change, or replace the numbering of launch template versions.

\n

Launch templates are immutable; after you create a launch template, you can't modify\n it. Instead, you can create a new version of the launch template that includes the\n changes that you require.

\n

For more information, see Modify a launch template (manage launch template versions) in the\n Amazon EC2 User Guide.

", "smithy.api#examples": [ { "title": "To create a launch template version", @@ -16530,7 +16539,7 @@ "ResolveAlias": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

If true, and if a Systems Manager parameter is specified for ImageId,\n the AMI ID is displayed in the response for imageID. For more information, see Use a Systems \n Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide.

\n

Default: false\n

" + "smithy.api#documentation": "

If true, and if a Systems Manager parameter is specified for ImageId,\n the AMI ID is displayed in the response for imageID. For more information, see Use a Systems \n Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

\n

Default: false\n

" } } }, @@ -16891,7 +16900,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier you provide to ensure the idempotency of the\n request. For more information, see Ensuring\n Idempotency.

\n

Constraints: Up to 255 UTF-8 characters in length.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier you provide to ensure the idempotency of the\n request. For more information, see Ensuring\n idempotency.

\n

Constraints: Up to 255 UTF-8 characters in length.

", "smithy.api#idempotencyToken": {} } } @@ -16964,7 +16973,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n\t\t\trequest. For more information, see Ensuring idempotency.

\n

Constraint: Maximum 64 ASCII characters.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n\t\t\trequest. For more information, see Ensuring idempotency.

\n

Constraint: Maximum 64 ASCII characters.

", "smithy.api#idempotencyToken": {} } }, @@ -17011,7 +17020,7 @@ "SecondaryPrivateIpAddresses": { "target": "com.amazonaws.ec2#IpList", "traits": { - "smithy.api#documentation": "

Secondary private IPv4 addresses. For more information about secondary addresses, see Create a NAT gateway in the Amazon VPC User Guide.

", + "smithy.api#documentation": "

Secondary private IPv4 addresses. For more information about secondary addresses, see \n Create a NAT gateway in the Amazon VPC User Guide.

", "smithy.api#xmlName": "SecondaryPrivateIpAddress" } }, @@ -17254,7 +17263,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -17320,7 +17329,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, \n see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, \n see How to ensure idempotency.

", "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } @@ -17439,7 +17448,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, \n see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, \n see How to ensure idempotency.

", "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } @@ -17486,7 +17495,7 @@ "target": "com.amazonaws.ec2#CreateNetworkInterfaceResult" }, "traits": { - "smithy.api#documentation": "

Creates a network interface in the specified subnet.

\n

The number of IP addresses you can assign to a network interface varies by instance\n type. For more information, see IP Addresses Per ENI Per\n Instance Type in the Amazon Virtual Private Cloud User Guide.

\n

For more information about network interfaces, see Elastic network interfaces \n in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Creates a network interface in the specified subnet.

\n

The number of IP addresses you can assign to a network interface varies by instance\n type.

\n

For more information about network interfaces, see Elastic network interfaces \n in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#CreateNetworkInterfacePermission": { @@ -17679,7 +17688,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -17872,7 +17881,7 @@ "target": "com.amazonaws.ec2#CreateReplaceRootVolumeTaskResult" }, "traits": { - "smithy.api#documentation": "

Replaces the EBS-backed root volume for a running instance with a new \n volume that is restored to the original root volume's launch state, that is restored to a \n specific snapshot taken from the original root volume, or that is restored from an AMI \n that has the same key characteristics as that of the instance.

\n

For more information, see Replace a root volume in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Replaces the EBS-backed root volume for a running instance with a new \n volume that is restored to the original root volume's launch state, that is restored to a \n specific snapshot taken from the original root volume, or that is restored from an AMI \n that has the same key characteristics as that of the instance.

\n

For more information, see Replace a root volume in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#CreateReplaceRootVolumeTaskRequest": { @@ -17895,7 +17904,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. \n If you do not specify a client token, a randomly generated token is used for the request \n to ensure idempotency. For more information, see Ensuring idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. \n If you do not specify a client token, a randomly generated token is used for the request \n to ensure idempotency. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -17954,7 +17963,7 @@ "target": "com.amazonaws.ec2#CreateReservedInstancesListingResult" }, "traits": { - "smithy.api#documentation": "

Creates a listing for Amazon EC2 Standard Reserved Instances to be sold in the Reserved Instance\n\t\t\tMarketplace. You can submit one Standard Reserved Instance listing at a time. To get a list of your\n\t\t\tStandard Reserved Instances, you can use the DescribeReservedInstances operation.

\n \n

Only Standard Reserved Instances can be sold in the Reserved Instance Marketplace. \n Convertible Reserved Instances cannot be sold.

\n
\n

The Reserved Instance Marketplace matches sellers who want to resell Standard Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

\n

To sell your Standard Reserved Instances, you must first register as a seller in the Reserved Instance\n Marketplace. After completing the registration process, you can create a Reserved Instance\n Marketplace listing of some or all of your Standard Reserved Instances, and specify the upfront price\n to receive for them. Your Standard Reserved Instance listings then become available for purchase. To\n view the details of your Standard Reserved Instance listing, you can use the\n DescribeReservedInstancesListings operation.

\n

For more information, see Reserved Instance Marketplace in the\n\t\t\t\tAmazon EC2 User Guide.

" + "smithy.api#documentation": "

Creates a listing for Amazon EC2 Standard Reserved Instances to be sold in the Reserved Instance\n\t\t\tMarketplace. You can submit one Standard Reserved Instance listing at a time. To get a list of your\n\t\t\tStandard Reserved Instances, you can use the DescribeReservedInstances operation.

\n \n

Only Standard Reserved Instances can be sold in the Reserved Instance Marketplace. \n Convertible Reserved Instances cannot be sold.

\n
\n

The Reserved Instance Marketplace matches sellers who want to resell Standard Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

\n

To sell your Standard Reserved Instances, you must first register as a seller in the Reserved Instance\n Marketplace. After completing the registration process, you can create a Reserved Instance\n Marketplace listing of some or all of your Standard Reserved Instances, and specify the upfront price\n to receive for them. Your Standard Reserved Instance listings then become available for purchase. To\n view the details of your Standard Reserved Instance listing, you can use the\n DescribeReservedInstancesListings operation.

\n

For more information, see Sell in the Reserved Instance\n Marketplace in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#CreateReservedInstancesListingRequest": { @@ -18327,7 +18336,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -18465,7 +18474,7 @@ "target": "com.amazonaws.ec2#Snapshot" }, "traits": { - "smithy.api#documentation": "

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for\n \tbackups, to make copies of EBS volumes, and to save data before shutting down an\n \tinstance.

\n

You can create snapshots of volumes in a Region and volumes on an Outpost. If you \n \tcreate a snapshot of a volume in a Region, the snapshot must be stored in the same \n \tRegion as the volume. If you create a snapshot of a volume on an Outpost, the snapshot \n \tcan be stored on the same Outpost as the volume, or in the Region for that Outpost.

\n

When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the\n source volume are propagated to the snapshot.

\n

You can take a snapshot of an attached volume that is in use. However, snapshots only\n capture data that has been written to your Amazon EBS volume at the time the snapshot command is\n issued; this might exclude any data that has been cached by any applications or the operating\n system. If you can pause any file systems on the volume long enough to take a snapshot, your\n snapshot should be complete. However, if you cannot pause all file writes to the volume, you\n should unmount the volume from within the instance, issue the snapshot command, and then\n remount the volume to ensure a consistent and complete snapshot. You may remount and use your\n volume while the snapshot status is pending.

\n

When you create a snapshot for an EBS volume that serves as a root device, we recommend \n that you stop the instance before taking the snapshot.

\n

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that\n are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes\n and any associated snapshots always remain protected.

\n

You can tag your snapshots during creation. For more information, see Tag your Amazon EC2\n resources in the Amazon Elastic Compute Cloud User Guide.

\n

For more information, see Amazon Elastic Block Store and Amazon EBS encryption in the Amazon EBS User Guide.

", + "smithy.api#documentation": "

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for\n \tbackups, to make copies of EBS volumes, and to save data before shutting down an\n \tinstance.

\n

You can create snapshots of volumes in a Region and volumes on an Outpost. If you \n \tcreate a snapshot of a volume in a Region, the snapshot must be stored in the same \n \tRegion as the volume. If you create a snapshot of a volume on an Outpost, the snapshot \n \tcan be stored on the same Outpost as the volume, or in the Region for that Outpost.

\n

When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the\n source volume are propagated to the snapshot.

\n

You can take a snapshot of an attached volume that is in use. However, snapshots only\n capture data that has been written to your Amazon EBS volume at the time the snapshot command is\n issued; this might exclude any data that has been cached by any applications or the operating\n system. If you can pause any file systems on the volume long enough to take a snapshot, your\n snapshot should be complete. However, if you cannot pause all file writes to the volume, you\n should unmount the volume from within the instance, issue the snapshot command, and then\n remount the volume to ensure a consistent and complete snapshot. You may remount and use your\n volume while the snapshot status is pending.

\n

When you create a snapshot for an EBS volume that serves as a root device, we recommend \n that you stop the instance before taking the snapshot.

\n

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that\n are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes\n and any associated snapshots always remain protected.

\n

You can tag your snapshots during creation. For more information, see Tag your Amazon EC2\n resources in the Amazon EC2 User Guide.

\n

For more information, see Amazon EBS and Amazon EBS encryption in the Amazon EBS User Guide.

", "smithy.api#examples": [ { "title": "To create a snapshot", @@ -18615,7 +18624,7 @@ "target": "com.amazonaws.ec2#CreateSpotDatafeedSubscriptionResult" }, "traits": { - "smithy.api#documentation": "

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs.\n You can create one data feed per Amazon Web Services account. For more information, see\n Spot Instance data feed \n in the Amazon EC2 User Guide for Linux Instances.

", + "smithy.api#documentation": "

Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs.\n You can create one data feed per Amazon Web Services account. For more information, see\n Spot Instance data feed \n in the Amazon EC2 User Guide.

", "smithy.api#examples": [ { "title": "To create a Spot Instance datafeed", @@ -18794,7 +18803,7 @@ "target": "com.amazonaws.ec2#CreateSubnetCidrReservationResult" }, "traits": { - "smithy.api#documentation": "

Creates a subnet CIDR reservation. For more information, see Subnet CIDR reservations \n in the Amazon Virtual Private Cloud User Guide and Assign prefixes \n to network interfaces in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Creates a subnet CIDR reservation. For more information, see Subnet CIDR reservations \n in the Amazon VPC User Guide and Assign prefixes \n to network interfaces in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#CreateSubnetCidrReservationRequest": { @@ -18877,7 +18886,7 @@ "AvailabilityZone": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The Availability Zone or Local Zone for the subnet.

\n

Default: Amazon Web Services selects one for you. If you create more than one subnet in your VPC, we \n do not necessarily select a different zone for each subnet.

\n

To create a subnet in a Local Zone, set this value to the Local Zone ID, for example\n us-west-2-lax-1a. For information about the Regions that support Local Zones, \n see Local Zones locations.

\n

To create a subnet in an Outpost, set this value to the Availability Zone for the\n Outpost and specify the Outpost ARN.

" + "smithy.api#documentation": "

The Availability Zone or Local Zone for the subnet.

\n

Default: Amazon Web Services selects one for you. If you create more than one subnet in your VPC, we \n do not necessarily select a different zone for each subnet.

\n

To create a subnet in a Local Zone, set this value to the Local Zone ID, for example\n us-west-2-lax-1a. For information about the Regions that support Local Zones, \n see Available Local Zones.

\n

To create a subnet in an Outpost, set this value to the Availability Zone for the\n Outpost and specify the Outpost ARN.

" } }, "AvailabilityZoneId": { @@ -19043,7 +19052,7 @@ "target": "com.amazonaws.ec2#CreateTrafficMirrorFilterResult" }, "traits": { - "smithy.api#documentation": "

Creates a Traffic Mirror filter.

\n

A Traffic Mirror filter is a set of rules that defines the traffic to mirror.

\n

By default, no traffic is mirrored. To mirror traffic, use CreateTrafficMirrorFilterRule to add Traffic Mirror rules to the filter. The rules you\n add define what traffic gets mirrored. You can also use ModifyTrafficMirrorFilterNetworkServices to mirror supported network services.

" + "smithy.api#documentation": "

Creates a Traffic Mirror filter.

\n

A Traffic Mirror filter is a set of rules that defines the traffic to mirror.

\n

By default, no traffic is mirrored. To mirror traffic, use CreateTrafficMirrorFilterRule \n to add Traffic Mirror rules to the filter. The rules you add define what traffic gets mirrored. \n You can also use ModifyTrafficMirrorFilterNetworkServices to mirror supported network services.

" } }, "com.amazonaws.ec2#CreateTrafficMirrorFilterRequest": { @@ -19071,7 +19080,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -19095,7 +19104,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ClientToken", - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "smithy.api#xmlName": "clientToken" } } @@ -19200,9 +19209,16 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "smithy.api#idempotencyToken": {} } + }, + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

Traffic Mirroring tags specifications.

", + "smithy.api#xmlName": "TagSpecification" + } } }, "traits": { @@ -19224,7 +19240,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ClientToken", - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "smithy.api#xmlName": "clientToken" } } @@ -19289,7 +19305,7 @@ "VirtualNetworkId": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The VXLAN ID for the Traffic Mirror session. For more information about the VXLAN\n protocol, see RFC 7348. If you do\n not specify a VirtualNetworkId, an account-wide unique id is chosen at\n random.

" + "smithy.api#documentation": "

The VXLAN ID for the Traffic Mirror session. For more information about the VXLAN\n protocol, see RFC 7348. If you do\n not specify a VirtualNetworkId, an account-wide unique ID is chosen at\n random.

" } }, "Description": { @@ -19314,7 +19330,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -19338,7 +19354,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ClientToken", - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "smithy.api#xmlName": "clientToken" } } @@ -19396,7 +19412,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -19426,7 +19442,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ClientToken", - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", "smithy.api#xmlName": "clientToken" } } @@ -19468,7 +19484,7 @@ "target": "com.amazonaws.ec2#CreateTransitGatewayConnectPeerResult" }, "traits": { - "smithy.api#documentation": "

Creates a Connect peer for a specified transit gateway Connect attachment between a\n transit gateway and an appliance.

\n

The peer address and transit gateway address must be the same IP address family (IPv4 or IPv6).

\n

For more information, see Connect peers in the Transit Gateways Guide.

" + "smithy.api#documentation": "

Creates a Connect peer for a specified transit gateway Connect attachment between a\n transit gateway and an appliance.

\n

The peer address and transit gateway address must be the same IP address family (IPv4 or IPv6).

\n

For more information, see Connect peers\n in the Amazon Web Services Transit Gateways Guide.

" } }, "com.amazonaws.ec2#CreateTransitGatewayConnectPeerRequest": { @@ -20442,7 +20458,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -20533,7 +20549,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -20601,7 +20617,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -20779,7 +20795,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -20825,28 +20841,8 @@ "target": "com.amazonaws.ec2#Volume" }, "traits": { - "smithy.api#documentation": "

Creates an EBS volume that can be attached to an instance in the same Availability Zone.

\n

You can create a new empty volume or restore a volume from an EBS snapshot.\n Any Amazon Web Services Marketplace product codes from the snapshot are propagated to the volume.

\n

You can create encrypted volumes. Encrypted volumes must be attached to instances that \n support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically \n encrypted. For more information, see Amazon EBS encryption\n in the Amazon EBS User Guide.

\n

You can tag your volumes during creation. For more information, see Tag your Amazon EC2\n resources in the Amazon Elastic Compute Cloud User Guide.

\n

For more information, see Create an Amazon EBS volume in the\n Amazon EBS User Guide.

", + "smithy.api#documentation": "

Creates an EBS volume that can be attached to an instance in the same Availability Zone.

\n

You can create a new empty volume or restore a volume from an EBS snapshot.\n Any Amazon Web Services Marketplace product codes from the snapshot are propagated to the volume.

\n

You can create encrypted volumes. Encrypted volumes must be attached to instances that \n support Amazon EBS encryption. Volumes that are created from encrypted snapshots are also automatically \n encrypted. For more information, see Amazon EBS encryption\n in the Amazon EBS User Guide.

\n

You can tag your volumes during creation. For more information, see Tag your Amazon EC2\n resources in the Amazon EC2 User Guide.

\n

For more information, see Create an Amazon EBS volume in the\n Amazon EBS User Guide.

", "smithy.api#examples": [ - { - "title": "To create a new volume", - "documentation": "This example creates an 80 GiB General Purpose (SSD) volume in the Availability Zone ``us-east-1a``.", - "input": { - "AvailabilityZone": "us-east-1a", - "Size": 80, - "VolumeType": "gp2" - }, - "output": { - "AvailabilityZone": "us-east-1a", - "Encrypted": false, - "VolumeType": "gp2", - "VolumeId": "vol-6b60b7c7", - "State": "creating", - "Iops": 240, - "SnapshotId": "", - "CreateTime": "2016-08-29T18:52:32.724Z", - "Size": 80 - } - }, { "title": "To create a new Provisioned IOPS (SSD) volume from a snapshot", "documentation": "This example creates a new Provisioned IOPS (SSD) volume with 1000 provisioned IOPS from a snapshot in the Availability Zone ``us-east-1a``.", @@ -20868,6 +20864,26 @@ "CreateTime": "2016-08-29T18:52:32.724Z", "Size": 500 } + }, + { + "title": "To create a new volume", + "documentation": "This example creates an 80 GiB General Purpose (SSD) volume in the Availability Zone ``us-east-1a``.", + "input": { + "AvailabilityZone": "us-east-1a", + "Size": 80, + "VolumeType": "gp2" + }, + "output": { + "AvailabilityZone": "us-east-1a", + "Encrypted": false, + "VolumeType": "gp2", + "VolumeId": "vol-6b60b7c7", + "State": "creating", + "Iops": 240, + "SnapshotId": "", + "CreateTime": "2016-08-29T18:52:32.724Z", + "Size": 80 + } } ] } @@ -20947,13 +20963,13 @@ "Iops": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents \n the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline \n performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

\n

The following are the supported values for each volume type:

\n
    \n
  • \n

    \n gp3: 3,000 - 16,000 IOPS

    \n
  • \n
  • \n

    \n io1: 100 - 64,000 IOPS

    \n
  • \n
  • \n

    \n io2: 100 - 256,000 IOPS

    \n
  • \n
\n

For io2 volumes, you can achieve up to 256,000 IOPS on \ninstances \nbuilt on the Nitro System. On other instances, you can achieve performance up to 32,000 IOPS.

\n

This parameter is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS.\n This parameter is not supported for gp2, st1, sc1, or standard volumes.

" + "smithy.api#documentation": "

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents \n the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline \n performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

\n

The following are the supported values for each volume type:

\n
    \n
  • \n

    \n gp3: 3,000 - 16,000 IOPS

    \n
  • \n
  • \n

    \n io1: 100 - 64,000 IOPS

    \n
  • \n
  • \n

    \n io2: 100 - 256,000 IOPS

    \n
  • \n
\n

For io2 volumes, you can achieve up to 256,000 IOPS on \ninstances \nbuilt on the Nitro System. On other instances, you can achieve performance up to 32,000 IOPS.

\n

This parameter is required for io1 and io2 volumes. The default for gp3 volumes is 3,000 IOPS.\n This parameter is not supported for gp2, st1, sc1, or standard volumes.

" } }, "KmsKeyId": { "target": "com.amazonaws.ec2#KmsKeyId", "traits": { - "smithy.api#documentation": "

The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption.\n If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is\n specified, the encrypted state must be true.

\n

You can specify the KMS key using any of the following:

\n
    \n
  • \n

    Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Key alias. For example, alias/ExampleAlias.

    \n
  • \n
  • \n

    Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

    \n
  • \n
\n

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, \n the action can appear to complete, but eventually fails.

" + "smithy.api#documentation": "

The identifier of the KMS key to use for Amazon EBS encryption.\n If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is\n specified, the encrypted state must be true.

\n

You can specify the KMS key using any of the following:

\n
    \n
  • \n

    Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Key alias. For example, alias/ExampleAlias.

    \n
  • \n
  • \n

    Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

    \n
  • \n
\n

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, \n the action can appear to complete, but eventually fails.

" } }, "OutpostArn": { @@ -20998,7 +21014,7 @@ "MultiAttachEnabled": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the \n \tvolume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is \n \tsupported with io1 and io2 volumes only. For more information, \n \tsee \n \t\tAmazon EBS Multi-Attach in the Amazon EBS User Guide.

" + "smithy.api#documentation": "

Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the \n volume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is \n \tsupported with io1 and io2 volumes only. For more information, \n \tsee \n \t\tAmazon EBS Multi-Attach in the Amazon EBS User Guide.

" } }, "Throughput": { @@ -21010,7 +21026,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency \n of the request. For more information, see Ensure \n Idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency \n of the request. For more information, see Ensure \n Idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -21070,7 +21086,7 @@ "target": "com.amazonaws.ec2#CreateVpcEndpointConnectionNotificationResult" }, "traits": { - "smithy.api#documentation": "

Creates a connection notification for a specified VPC endpoint or VPC endpoint\n service. A connection notification notifies you of specific endpoint events. You must\n create an SNS topic to receive notifications. For more information, see Create a Topic in\n the Amazon Simple Notification Service Developer Guide.

\n

You can create a connection notification for interface endpoints only.

" + "smithy.api#documentation": "

Creates a connection notification for a specified VPC endpoint or VPC endpoint\n service. A connection notification notifies you of specific endpoint events. You must\n create an SNS topic to receive notifications. For more information, see Creating an Amazon SNS topic in\n the Amazon SNS Developer Guide.

\n

You can create a connection notification for interface endpoints only.

" } }, "com.amazonaws.ec2#CreateVpcEndpointConnectionNotificationRequest": { @@ -21113,7 +21129,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to ensure\n idempotency.

" + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to ensure\n idempotency.

" } } }, @@ -21218,7 +21234,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to ensure\n idempotency.

" + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to ensure\n idempotency.

" } }, "PrivateDnsEnabled": { @@ -21327,7 +21343,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n For more information, see How to ensure\n idempotency.

" + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.\n For more information, see How to ensure\n idempotency.

" } }, "TagSpecifications": { @@ -21375,7 +21391,7 @@ "target": "com.amazonaws.ec2#CreateVpcPeeringConnectionResult" }, "traits": { - "smithy.api#documentation": "

Requests a VPC peering connection between two VPCs: a requester VPC that you own and\n\t\t an accepter VPC with which to create the connection. The accepter VPC can belong to\n\t\t another Amazon Web Services account and can be in a different Region to the requester VPC. \n The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

\n \n

Limitations and rules apply to a VPC peering connection. For more information, see \n the limitations section in the VPC Peering Guide.

\n
\n

The owner of the accepter VPC must accept the peering request to activate the peering\n connection. The VPC peering connection request expires after 7 days, after which it\n cannot be accepted or rejected.

\n

If you create a VPC peering connection request between VPCs with overlapping CIDR\n blocks, the VPC peering connection has a status of failed.

" + "smithy.api#documentation": "

Requests a VPC peering connection between two VPCs: a requester VPC that you own and\n\t\t an accepter VPC with which to create the connection. The accepter VPC can belong to\n\t\t another Amazon Web Services account and can be in a different Region to the requester VPC. \n The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

\n \n

Limitations and rules apply to a VPC peering connection. For more information, see \n the VPC peering limitations in the VPC Peering Guide.

\n
\n

The owner of the accepter VPC must accept the peering request to activate the peering\n connection. The VPC peering connection request expires after 7 days, after which it\n cannot be accepted or rejected.

\n

If you create a VPC peering connection request between VPCs with overlapping CIDR\n blocks, the VPC peering connection has a status of failed.

" } }, "com.amazonaws.ec2#CreateVpcPeeringConnectionRequest": { @@ -21804,7 +21820,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "BgpAsn", - "smithy.api#documentation": "

The customer gateway's Border Gateway Protocol (BGP) Autonomous System Number\n (ASN).

", + "smithy.api#documentation": "

The customer gateway device's Border Gateway Protocol (BGP) Autonomous System Number\n (ASN).

\n

Valid values: 1 to 2,147,483,647\n

", "smithy.api#xmlName": "bgpAsn" } }, @@ -21820,7 +21836,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "IpAddress", - "smithy.api#documentation": "

The IP address of the customer gateway device's outside interface.

", + "smithy.api#documentation": "

\n IPv4 address for the customer gateway device's outside interface. The address must be static. If OutsideIpAddressType in your VPN connection options is set to PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. If OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address.\n

", "smithy.api#xmlName": "ipAddress" } }, @@ -21863,6 +21879,14 @@ "smithy.api#documentation": "

Any tags assigned to the customer gateway.

", "smithy.api#xmlName": "tagSet" } + }, + "BgpAsnExtended": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "BgpAsnExtended", + "smithy.api#documentation": "

The customer gateway device's Border Gateway Protocol (BGP) Autonomous System Number\n (ASN).

\n

Valid values: 2,147,483,648 to 4,294,967,295\n

", + "smithy.api#xmlName": "bgpAsnExtended" + } } }, "traits": { @@ -23404,7 +23428,7 @@ "target": "com.amazonaws.ec2#DeleteLaunchTemplateVersionsResult" }, "traits": { - "smithy.api#documentation": "

Deletes one or more versions of a launch template.

\n

You can't delete the default version of a launch template; you must first assign a\n different version as the default. If the default version is the only version for the\n launch template, you must delete the entire launch template using DeleteLaunchTemplate.

\n

You can delete up to 200 launch template versions in a single request. To delete more\n than 200 versions in a single request, use DeleteLaunchTemplate, which\n deletes the launch template and all of its versions.

\n

For more information, see Delete a launch template version in the EC2 User\n Guide.

", + "smithy.api#documentation": "

Deletes one or more versions of a launch template.

\n

You can't delete the default version of a launch template; you must first assign a\n different version as the default. If the default version is the only version for the\n launch template, you must delete the entire launch template using DeleteLaunchTemplate.

\n

You can delete up to 200 launch template versions in a single request. To delete more\n than 200 versions in a single request, use DeleteLaunchTemplate, which\n deletes the launch template and all of its versions.

\n

For more information, see Delete a launch template version in the\n Amazon EC2 User Guide.

", "smithy.api#examples": [ { "title": "To delete a launch template version", @@ -25742,7 +25766,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -25799,7 +25823,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -25862,7 +25886,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -25919,7 +25943,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -26966,7 +26990,7 @@ "target": "com.amazonaws.ec2#DescribeAddressTransfersResult" }, "traits": { - "smithy.api#documentation": "

Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

\n

When you transfer an Elastic IP address, there is a two-step handshake\n between the source and transfer Amazon Web Services accounts. When the source account starts the transfer,\n the transfer account has seven days to accept the Elastic IP address\n transfer. During those seven days, the source account can view the\n pending transfer by using this action. After seven days, the\n transfer expires and ownership of the Elastic IP\n address returns to the source\n account. Accepted transfers are visible to the source account for three days\n after the transfers have been accepted.

", + "smithy.api#documentation": "

Describes an Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.

\n

When you transfer an Elastic IP address, there is a two-step handshake\n between the source and transfer Amazon Web Services accounts. When the source account starts the transfer,\n the transfer account has seven days to accept the Elastic IP address\n transfer. During those seven days, the source account can view the\n pending transfer by using this action. After seven days, the\n transfer expires and ownership of the Elastic IP\n address returns to the source\n account. Accepted transfers are visible to the source account for three days\n after the transfers have been accepted.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -27271,7 +27295,7 @@ "target": "com.amazonaws.ec2#DescribeAvailabilityZonesResult" }, "traits": { - "smithy.api#documentation": "

Describes the Availability Zones, Local Zones, and Wavelength Zones that are available to\n you. If there is an event impacting a zone, you can use this request to view the state and any\n provided messages for that zone.

\n

For more information about Availability Zones, Local Zones, and Wavelength Zones, see\n Regions and zones \n in the Amazon Elastic Compute Cloud User Guide.

\n \n

The order of the elements in the response, including those within nested\n structures, might vary. Applications should not assume the elements appear in a\n particular order.

\n
", + "smithy.api#documentation": "

Describes the Availability Zones, Local Zones, and Wavelength Zones that are available to\n you. If there is an event impacting a zone, you can use this request to view the state and any\n provided messages for that zone.

\n

For more information about Availability Zones, Local Zones, and Wavelength Zones, see\n Regions and zones \n in the Amazon EC2 User Guide.

\n \n

The order of the elements in the response, including those within nested\n structures, might vary. Applications should not assume the elements appear in a\n particular order.

\n
", "smithy.api#examples": [ { "title": "To describe your Availability Zones", @@ -27992,7 +28016,7 @@ "target": "com.amazonaws.ec2#DescribeClassicLinkInstancesResult" }, "traits": { - "smithy.api#documentation": "\n

This action is deprecated.

\n
\n

Describes one or more of your linked EC2-Classic instances. This request only returns\n\t\t\tinformation about EC2-Classic instances linked to a VPC through ClassicLink. You cannot\n\t\t\tuse this request to return information about other instances.

", + "smithy.api#documentation": "\n

This action is deprecated.

\n
\n

Describes your linked EC2-Classic instances. This request only returns\n\t\t\tinformation about EC2-Classic instances linked to a VPC through ClassicLink. You cannot\n\t\t\tuse this request to return information about other instances.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -28883,7 +28907,7 @@ "target": "com.amazonaws.ec2#DescribeDhcpOptionsResult" }, "traits": { - "smithy.api#documentation": "

Describes one or more of your DHCP options sets.

\n

For more information, see DHCP options sets in the\n\t\t\t\tAmazon VPC User Guide.

", + "smithy.api#documentation": "

Describes your DHCP option sets. The default is to describe all your DHCP option sets. \n\t\t Alternatively, you can specify specific DHCP option set IDs or filter the results to\n\t\t include only the DHCP option sets that match specific criteria.

\n

For more information, see DHCP option sets in the\n\t\t\t\tAmazon VPC User Guide.

", "smithy.api#examples": [ { "title": "To describe a DHCP options set", @@ -28938,7 +28962,7 @@ "DhcpOptionsIds": { "target": "com.amazonaws.ec2#DhcpOptionsIdStringList", "traits": { - "smithy.api#documentation": "

The IDs of one or more DHCP options sets.

\n

Default: Describes all your DHCP options sets.

", + "smithy.api#documentation": "

The IDs of DHCP option sets.

", "smithy.api#xmlName": "DhcpOptionsId" } }, @@ -28981,7 +29005,7 @@ "target": "com.amazonaws.ec2#DhcpOptionsList", "traits": { "aws.protocols#ec2QueryName": "DhcpOptionsSet", - "smithy.api#documentation": "

Information about one or more DHCP options sets.

", + "smithy.api#documentation": "

Information about the DHCP options sets.

", "smithy.api#xmlName": "dhcpOptionsSet" } }, @@ -29007,7 +29031,7 @@ "target": "com.amazonaws.ec2#DescribeEgressOnlyInternetGatewaysResult" }, "traits": { - "smithy.api#documentation": "

Describes one or more of your egress-only internet gateways.

", + "smithy.api#documentation": "

Describes your egress-only internet gateways. The default is to describe all your egress-only internet gateways. \n Alternatively, you can specify specific egress-only internet gateway IDs or filter the results to\n include only the egress-only internet gateways that match specific criteria.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -29098,7 +29122,7 @@ "target": "com.amazonaws.ec2#DescribeElasticGpusResult" }, "traits": { - "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For \n workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, \n G4dn, or G5 instances.

\n
\n

Describes the Elastic Graphics accelerator associated with your instances. For more information\n about Elastic Graphics, see Amazon Elastic Graphics.

" + "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, \n we recommend that you use Amazon EC2 G4, G5, or G6 instances.

\n
\n

Describes the Elastic Graphics accelerator associated with your instances.

" } }, "com.amazonaws.ec2#DescribeElasticGpusMaxResults": { @@ -29123,7 +29147,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" } }, "Filters": { @@ -31267,20 +31291,6 @@ "traits": { "smithy.api#documentation": "

Describes the specified attribute of the specified instance. You can specify only one\n attribute at a time. Valid attribute values are: instanceType |\n kernel | ramdisk | userData |\n disableApiTermination | instanceInitiatedShutdownBehavior\n | rootDeviceName | blockDeviceMapping |\n productCodes | sourceDestCheck | groupSet |\n ebsOptimized | sriovNetSupport\n

", "smithy.api#examples": [ - { - "title": "To describe the instance type", - "documentation": "This example describes the instance type of the specified instance.\n", - "input": { - "InstanceId": "i-1234567890abcdef0", - "Attribute": "instanceType" - }, - "output": { - "InstanceId": "i-1234567890abcdef0", - "InstanceType": { - "Value": "t1.micro" - } - } - }, { "title": "To describe the block device mapping for an instance", "documentation": "This example describes the ``blockDeviceMapping`` attribute of the specified instance.\n", @@ -31311,6 +31321,20 @@ } ] } + }, + { + "title": "To describe the instance type", + "documentation": "This example describes the instance type of the specified instance.\n", + "input": { + "InstanceId": "i-1234567890abcdef0", + "Attribute": "instanceType" + }, + "output": { + "InstanceId": "i-1234567890abcdef0", + "InstanceType": { + "Value": "t1.micro" + } + } } ] } @@ -31942,7 +31966,7 @@ "target": "com.amazonaws.ec2#DescribeInstanceTypeOfferingsResult" }, "traits": { - "smithy.api#documentation": "

Lists the instance types that are offered for the specified location. If no location is specified, the default\n is to list the instance types that are offered in the current Region.

", + "smithy.api#documentation": "

Lists the instance types that are offered for the specified location. If no location is\n specified, the default is to list the instance types that are offered in the current\n Region.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -31963,13 +31987,13 @@ "LocationType": { "target": "com.amazonaws.ec2#LocationType", "traits": { - "smithy.api#documentation": "

The location type.

\n
    \n
  • \n

    \n availability-zone - The Availability Zone. When you specify a location filter, it must be\n an Availability Zone for the current Region.

    \n
  • \n
  • \n

    \n availability-zone-id - The AZ ID. When you specify a location filter, it must be\n an AZ ID for the current Region.

    \n
  • \n
  • \n

    \n outpost - The Outpost ARN. When you specify a location filter, it must be an Outpost ARN\n for the current Region.

    \n
  • \n
  • \n

    \n region - The current Region. If you specify a location filter, it must match the current Region.

    \n
  • \n
" + "smithy.api#documentation": "

The location type.

\n
    \n
  • \n

    \n availability-zone - The Availability Zone. When you specify a location\n filter, it must be an Availability Zone for the current Region.

    \n
  • \n
  • \n

    \n availability-zone-id - The AZ ID. When you specify a location filter, it must\n be an AZ ID for the current Region.

    \n
  • \n
  • \n

    \n outpost - The Outpost ARN. When you specify a location filter, it must be an\n Outpost ARN for the current Region.

    \n
  • \n
  • \n

    \n region - The current Region. If you specify a location filter, it must match\n the current Region.

    \n
  • \n
" } }, "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

One or more filters. Filter names and values are case-sensitive.

\n
    \n
  • \n

    \n instance-type - The instance type. For a list of possible values, see \n Instance.

    \n
  • \n
  • \n

    \n location - The location. For a list of possible identifiers, see Regions and Zones.

    \n
  • \n
", + "smithy.api#documentation": "

One or more filters. Filter names and values are case-sensitive.

\n
    \n
  • \n

    \n instance-type - The instance type. For a list of possible values, see Instance.

    \n
  • \n
  • \n

    \n location - The location. For a list of possible identifiers, see Regions and Zones.

    \n
  • \n
", "smithy.api#xmlName": "Filter" } }, @@ -32023,7 +32047,7 @@ "target": "com.amazonaws.ec2#DescribeInstanceTypesResult" }, "traits": { - "smithy.api#documentation": "

Describes the specified instance types. By default, all instance types for the current Region are described.\n Alternatively, you can filter the results.

", + "smithy.api#documentation": "

Describes the specified instance types. By default, all instance types for the current\n Region are described. Alternatively, you can filter the results.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -32051,7 +32075,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

One or more filters. Filter names and values are case-sensitive.

\n
    \n
  • \n

    \n auto-recovery-supported - Indicates whether Amazon CloudWatch action based recovery is supported (true | false).

    \n
  • \n
  • \n

    \n bare-metal - Indicates whether it is a bare metal instance type (true | false).

    \n
  • \n
  • \n

    \n burstable-performance-supported - Indicates whether the instance type is a \n burstable performance T instance type (true | false).

    \n
  • \n
  • \n

    \n current-generation - Indicates whether this instance type is the latest\n generation instance type of an instance family (true | false).

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline\n bandwidth performance for an EBS-optimized instance type, in Mbps.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage\n operations per second for an EBS-optimized instance type.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline\n throughput performance for an EBS-optimized instance type, in MB/s.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth\n performance for an EBS-optimized instance type, in Mbps.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage\n operations per second for an EBS-optimized instance type.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum\n throughput performance for an EBS-optimized instance type, in MB/s.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-support - Indicates whether the instance type is\n EBS-optimized (supported | unsupported |\n default).

    \n
  • \n
  • \n

    \n ebs-info.encryption-support - Indicates whether EBS encryption is supported\n (supported | unsupported).

    \n
  • \n
  • \n

    \n ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe)\n is supported for EBS volumes (required | supported | unsupported).

    \n
  • \n
  • \n

    \n free-tier-eligible - Indicates whether the instance type is eligible to use\n in the free tier (true | false).

    \n
  • \n
  • \n

    \n hibernation-supported - Indicates whether On-Demand hibernation is supported (true | false).

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor (nitro | xen).

    \n
  • \n
  • \n

    \n instance-storage-info.disk.count - The number of local disks.

    \n
  • \n
  • \n

    \n instance-storage-info.disk.size-in-gb - The storage size of each instance storage disk, in\n GB.

    \n
  • \n
  • \n

    \n instance-storage-info.disk.type - The storage technology for the local\n instance storage disks (hdd | ssd).

    \n
  • \n
  • \n

    \n instance-storage-info.encryption-support - Indicates whether data is encrypted at rest \n (required | supported | unsupported).

    \n
  • \n
  • \n

    \n instance-storage-info.nvme-support - Indicates whether non-volatile memory\n express (NVMe) is supported for instance store (required | supported |\n unsupported).

    \n
  • \n
  • \n

    \n instance-storage-info.total-size-in-gb - The total amount of storage available from all local\n instance storage, in GB.

    \n
  • \n
  • \n

    \n instance-storage-supported - Indicates whether the instance type has local\n instance storage (true | false).

    \n
  • \n
  • \n

    \n instance-type - The instance type (for example c5.2xlarge or\n c5*).

    \n
  • \n
  • \n

    \n memory-info.size-in-mib - The memory size.

    \n
  • \n
  • \n

    \n network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic \n Fabric Adapters (EFAs) per instance.

    \n
  • \n
  • \n

    \n network-info.efa-supported - Indicates whether the instance type supports\n Elastic Fabric Adapter (EFA) (true | false).

    \n
  • \n
  • \n

    \n network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is\n supported or required (required | supported |\n unsupported).

    \n
  • \n
  • \n

    \n network-info.encryption-in-transit-supported - Indicates whether the instance type \n automatically encrypts in-transit traffic between instances (true | false).

    \n
  • \n
  • \n

    \n network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 addresses per\n network interface.

    \n
  • \n
  • \n

    \n network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 addresses per\n network interface.

    \n
  • \n
  • \n

    \n network-info.ipv6-supported - Indicates whether the instance type supports IPv6 (true | false).

    \n
  • \n
  • \n

    \n network-info.maximum-network-cards - The maximum number of network cards per\n instance.

    \n
  • \n
  • \n

    \n network-info.maximum-network-interfaces - The maximum number of network interfaces per instance.

    \n
  • \n
  • \n

    \n network-info.network-performance - The network performance (for example, \"25\n Gigabit\").

    \n
  • \n
  • \n

    \n nitro-enclaves-support - Indicates whether Nitro Enclaves is supported (supported |\n unsupported).

    \n
  • \n
  • \n

    \n nitro-tpm-support - Indicates whether NitroTPM is supported (supported |\n unsupported).

    \n
  • \n
  • \n

    \n nitro-tpm-info.supported-versions - The supported NitroTPM version (2.0).

    \n
  • \n
  • \n

    \n processor-info.supported-architecture - The CPU architecture\n (arm64 | i386 | x86_64).

    \n
  • \n
  • \n

    \n processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in GHz.

    \n
  • \n
  • \n

    \n processor-info.supported-features - The supported CPU features (amd-sev-snp).

    \n
  • \n
  • \n

    \n supported-boot-mode - The boot mode (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n supported-root-device-type - The root device type (ebs |\n instance-store).

    \n
  • \n
  • \n

    \n supported-usage-class - The usage class (on-demand |\n spot).

    \n
  • \n
  • \n

    \n supported-virtualization-type - The virtualization type (hvm |\n paravirtual).

    \n
  • \n
  • \n

    \n vcpu-info.default-cores - The default number of cores for the instance type.

    \n
  • \n
  • \n

    \n vcpu-info.default-threads-per-core - The default number of threads per core for the instance\n type.

    \n
  • \n
  • \n

    \n vcpu-info.default-vcpus - The default number of vCPUs for the instance type.

    \n
  • \n
  • \n

    \n vcpu-info.valid-cores - The number of cores that can be configured for the instance type.

    \n
  • \n
  • \n

    \n vcpu-info.valid-threads-per-core - The number of threads per core that can be configured for the instance type.\n For example, \"1\" or \"1,2\".

    \n
  • \n
", + "smithy.api#documentation": "

One or more filters. Filter names and values are case-sensitive.

\n
    \n
  • \n

    \n auto-recovery-supported - Indicates whether Amazon CloudWatch action\n based recovery is supported (true | false).

    \n
  • \n
  • \n

    \n bare-metal - Indicates whether it is a bare metal instance type\n (true | false).

    \n
  • \n
  • \n

    \n burstable-performance-supported - Indicates whether the instance type is a\n burstable performance T instance type (true | false).

    \n
  • \n
  • \n

    \n current-generation - Indicates whether this instance type is the latest\n generation instance type of an instance family (true | false).

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline\n bandwidth performance for an EBS-optimized instance type, in Mbps.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage\n operations per second for an EBS-optimized instance type.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline\n throughput performance for an EBS-optimized instance type, in MB/s.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth\n performance for an EBS-optimized instance type, in Mbps.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage\n operations per second for an EBS-optimized instance type.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum\n throughput performance for an EBS-optimized instance type, in MB/s.

    \n
  • \n
  • \n

    \n ebs-info.ebs-optimized-support - Indicates whether the instance type is\n EBS-optimized (supported | unsupported |\n default).

    \n
  • \n
  • \n

    \n ebs-info.encryption-support - Indicates whether EBS encryption is supported\n (supported | unsupported).

    \n
  • \n
  • \n

    \n ebs-info.nvme-support - Indicates whether non-volatile memory express (NVMe)\n is supported for EBS volumes (required | supported |\n unsupported).

    \n
  • \n
  • \n

    \n free-tier-eligible - Indicates whether the instance type is eligible to use\n in the free tier (true | false).

    \n
  • \n
  • \n

    \n hibernation-supported - Indicates whether On-Demand hibernation is supported\n (true | false).

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor (nitro | xen).

    \n
  • \n
  • \n

    \n instance-storage-info.disk.count - The number of local disks.

    \n
  • \n
  • \n

    \n instance-storage-info.disk.size-in-gb - The storage size of each instance\n storage disk, in GB.

    \n
  • \n
  • \n

    \n instance-storage-info.disk.type - The storage technology for the local\n instance storage disks (hdd | ssd).

    \n
  • \n
  • \n

    \n instance-storage-info.encryption-support - Indicates whether data is\n encrypted at rest (required | supported |\n unsupported).

    \n
  • \n
  • \n

    \n instance-storage-info.nvme-support - Indicates whether non-volatile memory\n express (NVMe) is supported for instance store (required | supported\n | unsupported).

    \n
  • \n
  • \n

    \n instance-storage-info.total-size-in-gb - The total amount of storage\n available from all local instance storage, in GB.

    \n
  • \n
  • \n

    \n instance-storage-supported - Indicates whether the instance type has local\n instance storage (true | false).

    \n
  • \n
  • \n

    \n instance-type - The instance type (for example c5.2xlarge or\n c5*).

    \n
  • \n
  • \n

    \n memory-info.size-in-mib - The memory size.

    \n
  • \n
  • \n

    \n network-info.efa-info.maximum-efa-interfaces - The maximum number of Elastic\n Fabric Adapters (EFAs) per instance.

    \n
  • \n
  • \n

    \n network-info.efa-supported - Indicates whether the instance type supports\n Elastic Fabric Adapter (EFA) (true | false).

    \n
  • \n
  • \n

    \n network-info.ena-support - Indicates whether Elastic Network Adapter (ENA) is\n supported or required (required | supported |\n unsupported).

    \n
  • \n
  • \n

    \n network-info.encryption-in-transit-supported - Indicates whether the instance\n type automatically encrypts in-transit traffic between instances (true | false).

    \n
  • \n
  • \n

    \n network-info.ipv4-addresses-per-interface - The maximum number of private\n IPv4 addresses per network interface.

    \n
  • \n
  • \n

    \n network-info.ipv6-addresses-per-interface - The maximum number of private\n IPv6 addresses per network interface.

    \n
  • \n
  • \n

    \n network-info.ipv6-supported - Indicates whether the instance type supports\n IPv6 (true | false).

    \n
  • \n
  • \n

    \n network-info.maximum-network-cards - The maximum number of network cards per\n instance.

    \n
  • \n
  • \n

    \n network-info.maximum-network-interfaces - The maximum number of network\n interfaces per instance.

    \n
  • \n
  • \n

    \n network-info.network-performance - The network performance (for example, \"25\n Gigabit\").

    \n
  • \n
  • \n

    \n nitro-enclaves-support - Indicates whether Nitro Enclaves is supported\n (supported | unsupported).

    \n
  • \n
  • \n

    \n nitro-tpm-support - Indicates whether NitroTPM is supported\n (supported | unsupported).

    \n
  • \n
  • \n

    \n nitro-tpm-info.supported-versions - The supported NitroTPM version\n (2.0).

    \n
  • \n
  • \n

    \n processor-info.supported-architecture - The CPU architecture\n (arm64 | i386 | x86_64).

    \n
  • \n
  • \n

    \n processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in\n GHz.

    \n
  • \n
  • \n

    \n processor-info.supported-features - The supported CPU features\n (amd-sev-snp).

    \n
  • \n
  • \n

    \n supported-boot-mode - The boot mode (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n supported-root-device-type - The root device type (ebs |\n instance-store).

    \n
  • \n
  • \n

    \n supported-usage-class - The usage class (on-demand |\n spot).

    \n
  • \n
  • \n

    \n supported-virtualization-type - The virtualization type (hvm |\n paravirtual).

    \n
  • \n
  • \n

    \n vcpu-info.default-cores - The default number of cores for the instance\n type.

    \n
  • \n
  • \n

    \n vcpu-info.default-threads-per-core - The default number of threads per core\n for the instance type.

    \n
  • \n
  • \n

    \n vcpu-info.default-vcpus - The default number of vCPUs for the instance\n type.

    \n
  • \n
  • \n

    \n vcpu-info.valid-cores - The number of cores that can be configured for the\n instance type.

    \n
  • \n
  • \n

    \n vcpu-info.valid-threads-per-core - The number of threads per core that can be\n configured for the instance type. For example, \"1\" or \"1,2\".

    \n
  • \n
", "smithy.api#xmlName": "Filter" } }, @@ -32118,14 +32142,14 @@ "output": {} }, { - "title": "To describe the instances with a specific instance type", - "documentation": "This example describes the instances with the t2.micro instance type.", + "title": "To describe the instances with a specific tag", + "documentation": "This example describes the instances with the Purpose=test tag.", "input": { "Filters": [ { - "Name": "instance-type", + "Name": "tag:Purpose", "Values": [ - "t2.micro" + "test" ] } ] @@ -32133,14 +32157,14 @@ "output": {} }, { - "title": "To describe the instances with a specific tag", - "documentation": "This example describes the instances with the Purpose=test tag.", + "title": "To describe the instances with a specific instance type", + "documentation": "This example describes the instances with the t2.micro instance type.", "input": { "Filters": [ { - "Name": "tag:Purpose", + "Name": "instance-type", "Values": [ - "test" + "t2.micro" ] } ] @@ -32326,7 +32350,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n affinity - The affinity setting for an instance running on a\n Dedicated Host (default | host).

    \n
  • \n
  • \n

    \n architecture - The instance architecture (i386 |\n x86_64 | arm64).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n block-device-mapping.attach-time - The attach time for an EBS\n volume mapped to the instance, for example,\n 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean that\n indicates whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in\n the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.status - The status for the EBS volume\n (attaching | attached | detaching |\n detached).

    \n
  • \n
  • \n

    \n block-device-mapping.volume-id - The volume ID of the EBS\n volume.

    \n
  • \n
  • \n

    \n boot-mode - The boot mode that was specified by the AMI\n (legacy-bios | uefi |\n uefi-preferred).

    \n
  • \n
  • \n

    \n capacity-reservation-id - The ID of the Capacity Reservation into which the\n instance was launched.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-preference\n - The instance's Capacity Reservation preference (open | none).

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id\n - The ID of the targeted Capacity Reservation.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn\n - The ARN of the targeted Capacity Reservation group.

    \n
  • \n
  • \n

    \n client-token - The idempotency token you provided when you\n launched the instance.

    \n
  • \n
  • \n

    \n current-instance-boot-mode - The boot mode that is used to launch\n the instance at launch or start (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n dns-name - The public DNS name of the instance.

    \n
  • \n
  • \n

    \n ebs-optimized - A Boolean that indicates whether the instance is\n optimized for Amazon EBS I/O.

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether the instance is\n enabled for enhanced networking with ENA.

    \n
  • \n
  • \n

    \n enclave-options.enabled - A Boolean that indicates whether the\n instance is enabled for Amazon Web Services Nitro Enclaves.

    \n
  • \n
  • \n

    \n hibernation-options.configured - A Boolean that indicates whether\n the instance is enabled for hibernation. A value of true means that\n the instance is enabled for hibernation.

    \n
  • \n
  • \n

    \n host-id - The ID of the Dedicated Host on which the instance is\n running, if applicable.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type of the instance\n (ovm | xen). The value xen is used\n for both Xen and Nitro hypervisors.

    \n
  • \n
  • \n

    \n iam-instance-profile.arn - The instance profile associated with\n the instance. Specified as an ARN.

    \n
  • \n
  • \n

    \n iam-instance-profile.id - The instance profile associated with\n the instance. Specified as an ID.

    \n
  • \n
  • \n

    \n iam-instance-profile.name - The instance profile associated with\n the instance. Specified as an name.

    \n
  • \n
  • \n

    \n image-id - The ID of the image used to launch the\n instance.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or\n a Capacity Block (spot | scheduled | capacity-block).

    \n
  • \n
  • \n

    \n instance-state-code - The state of the instance, as a 16-bit\n unsigned integer. The high byte is used for internal purposes and should be\n ignored. The low byte is set based on the state represented. The valid values\n are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64\n (stopping), and 80 (stopped).

    \n
  • \n
  • \n

    \n instance-state-name - The state of the instance\n (pending | running | shutting-down |\n terminated | stopping |\n stopped).

    \n
  • \n
  • \n

    \n instance-type - The type of instance (for example,\n t2.micro).

    \n
  • \n
  • \n

    \n instance.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n instance.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n ip-address - The public IPv4 address of the instance.

    \n
  • \n
  • \n

    \n ipv6-address - The IPv6 address of the instance.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n key-name - The name of the key pair used when the instance was\n launched.

    \n
  • \n
  • \n

    \n launch-index - When launching multiple instances, this is the\n index for the instance in the launch group (for example, 0, 1, 2, and so on).\n

    \n
  • \n
  • \n

    \n launch-time - The time when the instance was launched, in the ISO\n 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard\n (*), for example, 2021-09-29T*, which matches an\n entire day.

    \n
  • \n
  • \n

    \n maintenance-options.auto-recovery - The current automatic\n recovery behavior of the instance (disabled | default).

    \n
  • \n
  • \n

    \n metadata-options.http-endpoint - The status of access to the HTTP\n metadata endpoint on your instance (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv4 - Indicates whether the IPv4\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv6 - Indicates whether the IPv6\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-put-response-hop-limit - The HTTP metadata\n request put response hop limit (integer, possible values 1 to\n 64)

    \n
  • \n
  • \n

    \n metadata-options.http-tokens - The metadata request authorization\n state (optional | required)

    \n
  • \n
  • \n

    \n metadata-options.instance-metadata-tags - The status of access to\n instance tags from the instance metadata (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.state - The state of the metadata option changes\n (pending | applied).

    \n
  • \n
  • \n

    \n monitoring-state - Indicates whether detailed monitoring is\n enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n network-interface.addresses.association.allocation-id - The allocation ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.association-id - The association ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.carrier-ip - The carrier IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.ip-owner-id - The owner\n ID of the private IPv4 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-ip - The ID of the\n association of an Elastic IP address (IPv4) with a network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Specifies whether the IPv4\n address of the network interface is the primary private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-dns-name - The private DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-ip-address - The private IPv4\n address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.allocation-id - The allocation ID\n returned when you allocated the Elastic IP address (IPv4) for your network\n interface.

    \n
  • \n
  • \n

    \n network-interface.association.association-id - The association ID\n returned when the network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.association.carrier-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.ip-owner-id - The owner of the\n Elastic IP address (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.association.public-ip - The address of the\n Elastic IP address (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n network-interface.attachment.attach-time - The time that the\n network interface was attached to an instance.

    \n
  • \n
  • \n

    \n network-interface.attachment.attachment-id - The ID of the\n interface attachment.

    \n
  • \n
  • \n

    \n network-interface.attachment.delete-on-termination - Specifies\n whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.attachment.device-index - The device index to\n which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-id - The ID of the instance\n to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-owner-id - The owner ID of\n the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.network-card-index - The index of the network card.

    \n
  • \n
  • \n

    \n network-interface.attachment.status - The status of the\n attachment (attaching | attached |\n detaching | detached).

    \n
  • \n
  • \n

    \n network-interface.availability-zone - The Availability Zone for\n the network interface.

    \n
  • \n
  • \n

    \n network-interface.deny-all-igw-traffic - A Boolean that indicates whether \n a network interface with an IPv6 address is unreachable from the public internet.

    \n
  • \n
  • \n

    \n network-interface.description - The description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.group-name - The name of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-address - The IPv6 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.ipv6-address - The IPv6 address\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this\n is the primary IPv6 address.

    \n
  • \n
  • \n

    \n network-interface.ipv6-native - A Boolean that indicates whether this is\n an IPv6 only network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.mac-address - The MAC address of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.outpost-arn - The ARN of the Outpost.

    \n
  • \n
  • \n

    \n network-interface.owner-id - The ID of the owner of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-dns-name - The private DNS name of the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.private-ip-address - The private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.requester-id - The requester ID for the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.requester-managed - Indicates whether the\n network interface is being managed by Amazon Web Services.

    \n
  • \n
  • \n

    \n network-interface.status - The status of the network interface\n (available) | in-use).

    \n
  • \n
  • \n

    \n network-interface.source-dest-check - Whether the network\n interface performs source/destination checking. A value of true\n means that checking is enabled, and false means that checking is\n disabled. The value must be false for the network interface to\n perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-key - The key of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-value - The value of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.vpc-id - The ID of the VPC for the network\n interface.

    \n
  • \n
  • \n

    \n outpost-arn - The Amazon Resource Name (ARN) of the\n Outpost.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the instance\n owner.

    \n
  • \n
  • \n

    \n placement-group-name - The name of the placement group for the\n instance.

    \n
  • \n
  • \n

    \n placement-partition-number - The partition in which the instance is\n located.

    \n
  • \n
  • \n

    \n platform - The platform. To list only Windows instances, use\n windows.

    \n
  • \n
  • \n

    \n platform-details - The platform (Linux/UNIX |\n Red Hat BYOL Linux | Red Hat Enterprise Linux |\n Red Hat Enterprise Linux with HA | Red Hat Enterprise\n Linux with SQL Server Standard and HA | Red Hat Enterprise\n Linux with SQL Server Enterprise and HA | Red Hat Enterprise\n Linux with SQL Server Standard | Red Hat Enterprise Linux with\n SQL Server Web | Red Hat Enterprise Linux with SQL Server\n Enterprise | SQL Server Enterprise | SQL Server\n Standard | SQL Server Web | SUSE Linux |\n Ubuntu Pro | Windows | Windows BYOL |\n Windows with SQL Server Enterprise | Windows with SQL\n Server Standard | Windows with SQL Server Web).

    \n
  • \n
  • \n

    \n private-dns-name - The private IPv4 DNS name of the\n instance.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-a-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS A records.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-aaaa-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS AAAA records.

    \n
  • \n
  • \n

    \n private-dns-name-options.hostname-type - The type of hostname\n (ip-name | resource-name).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address of the\n instance.

    \n
  • \n
  • \n

    \n product-code - The product code associated with the AMI used to\n launch the instance.

    \n
  • \n
  • \n

    \n product-code.type - The type of product code (devpay\n | marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n reason - The reason for the current state of the instance (for\n example, shows \"User Initiated [date]\" when you stop or terminate the instance).\n Similar to the state-reason-code filter.

    \n
  • \n
  • \n

    \n requester-id - The ID of the entity that launched the instance on\n your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so\n on).

    \n
  • \n
  • \n

    \n reservation-id - The ID of the instance's reservation. A\n reservation ID is created any time you launch an instance. A reservation ID has\n a one-to-one relationship with an instance launch request, but can be associated\n with more than one instance if you launch multiple instances using the same\n launch request. For example, if you launch one instance, you get one reservation\n ID. If you launch ten instances using the same launch request, you also get one\n reservation ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for\n example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume\n (ebs | instance-store).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the instance performs\n source/destination checking. A value of true means that checking is\n enabled, and false means that checking is disabled. The value must\n be false for the instance to perform network address translation\n (NAT) in your VPC.

    \n
  • \n
  • \n

    \n spot-instance-request-id - The ID of the Spot Instance\n request.

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - A message that describes the state\n change.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the instance.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n tenancy - The tenancy of an instance (dedicated |\n default | host).

    \n
  • \n
  • \n

    \n tpm-support - Indicates if the instance is configured for\n NitroTPM support (v2.0).

    \n
  • \n
  • \n

    \n usage-operation - The usage operation value for the instance\n (RunInstances | RunInstances:00g0 |\n RunInstances:0010 | RunInstances:1010 |\n RunInstances:1014 | RunInstances:1110 |\n RunInstances:0014 | RunInstances:0210 |\n RunInstances:0110 | RunInstances:0100 |\n RunInstances:0004 | RunInstances:0200 |\n RunInstances:000g | RunInstances:0g00 |\n RunInstances:0002 | RunInstances:0800 |\n RunInstances:0102 | RunInstances:0006 |\n RunInstances:0202).

    \n
  • \n
  • \n

    \n usage-operation-update-time - The time that the usage operation\n was last updated, for example, 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type of the instance\n (paravirtual | hvm).

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC that the instance is running in.

    \n
  • \n
", + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n affinity - The affinity setting for an instance running on a\n Dedicated Host (default | host).

    \n
  • \n
  • \n

    \n architecture - The instance architecture (i386 |\n x86_64 | arm64).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n block-device-mapping.attach-time - The attach time for an EBS\n volume mapped to the instance, for example,\n 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean that\n indicates whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in\n the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.status - The status for the EBS volume\n (attaching | attached | detaching |\n detached).

    \n
  • \n
  • \n

    \n block-device-mapping.volume-id - The volume ID of the EBS\n volume.

    \n
  • \n
  • \n

    \n boot-mode - The boot mode that was specified by the AMI\n (legacy-bios | uefi |\n uefi-preferred).

    \n
  • \n
  • \n

    \n capacity-reservation-id - The ID of the Capacity Reservation into which the\n instance was launched.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-preference\n - The instance's Capacity Reservation preference (open | none).

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id\n - The ID of the targeted Capacity Reservation.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn\n - The ARN of the targeted Capacity Reservation group.

    \n
  • \n
  • \n

    \n client-token - The idempotency token you provided when you\n launched the instance.

    \n
  • \n
  • \n

    \n current-instance-boot-mode - The boot mode that is used to launch\n the instance at launch or start (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n dns-name - The public DNS name of the instance.

    \n
  • \n
  • \n

    \n ebs-optimized - A Boolean that indicates whether the instance is\n optimized for Amazon EBS I/O.

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether the instance is\n enabled for enhanced networking with ENA.

    \n
  • \n
  • \n

    \n enclave-options.enabled - A Boolean that indicates whether the\n instance is enabled for Amazon Web Services Nitro Enclaves.

    \n
  • \n
  • \n

    \n hibernation-options.configured - A Boolean that indicates whether\n the instance is enabled for hibernation. A value of true means that\n the instance is enabled for hibernation.

    \n
  • \n
  • \n

    \n host-id - The ID of the Dedicated Host on which the instance is\n running, if applicable.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type of the instance\n (ovm | xen). The value xen is used\n for both Xen and Nitro hypervisors.

    \n
  • \n
  • \n

    \n iam-instance-profile.arn - The instance profile associated with\n the instance. Specified as an ARN.

    \n
  • \n
  • \n

    \n iam-instance-profile.id - The instance profile associated with\n the instance. Specified as an ID.

    \n
  • \n
  • \n

    \n iam-instance-profile.name - The instance profile associated with\n the instance. Specified as an name.

    \n
  • \n
  • \n

    \n image-id - The ID of the image used to launch the\n instance.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or\n a Capacity Block (spot | scheduled | capacity-block).

    \n
  • \n
  • \n

    \n instance-state-code - The state of the instance, as a 16-bit\n unsigned integer. The high byte is used for internal purposes and should be\n ignored. The low byte is set based on the state represented. The valid values\n are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64\n (stopping), and 80 (stopped).

    \n
  • \n
  • \n

    \n instance-state-name - The state of the instance\n (pending | running | shutting-down |\n terminated | stopping |\n stopped).

    \n
  • \n
  • \n

    \n instance-type - The type of instance (for example,\n t2.micro).

    \n
  • \n
  • \n

    \n instance.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n instance.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n ip-address - The public IPv4 address of the instance.

    \n
  • \n
  • \n

    \n ipv6-address - The IPv6 address of the instance.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n key-name - The name of the key pair used when the instance was\n launched.

    \n
  • \n
  • \n

    \n launch-index - When launching multiple instances, this is the\n index for the instance in the launch group (for example, 0, 1, 2, and so on).\n

    \n
  • \n
  • \n

    \n launch-time - The time when the instance was launched, in the ISO\n 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard\n (*), for example, 2021-09-29T*, which matches an\n entire day.

    \n
  • \n
  • \n

    \n maintenance-options.auto-recovery - The current automatic\n recovery behavior of the instance (disabled | default).

    \n
  • \n
  • \n

    \n metadata-options.http-endpoint - The status of access to the HTTP\n metadata endpoint on your instance (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv4 - Indicates whether the IPv4\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv6 - Indicates whether the IPv6\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-put-response-hop-limit - The HTTP metadata\n request put response hop limit (integer, possible values 1 to\n 64)

    \n
  • \n
  • \n

    \n metadata-options.http-tokens - The metadata request authorization\n state (optional | required)

    \n
  • \n
  • \n

    \n metadata-options.instance-metadata-tags - The status of access to\n instance tags from the instance metadata (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.state - The state of the metadata option changes\n (pending | applied).

    \n
  • \n
  • \n

    \n monitoring-state - Indicates whether detailed monitoring is\n enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n network-interface.addresses.association.allocation-id - The allocation ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.association-id - The association ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.carrier-ip - The carrier IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.ip-owner-id - The owner\n ID of the private IPv4 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-ip - The ID of the\n association of an Elastic IP address (IPv4) with a network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Specifies whether the IPv4\n address of the network interface is the primary private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-dns-name - The private DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-ip-address - The private IPv4\n address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.allocation-id - The allocation ID\n returned when you allocated the Elastic IP address (IPv4) for your network\n interface.

    \n
  • \n
  • \n

    \n network-interface.association.association-id - The association ID\n returned when the network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.association.carrier-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.ip-owner-id - The owner of the\n Elastic IP address (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.association.public-ip - The address of the\n Elastic IP address (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n network-interface.attachment.attach-time - The time that the\n network interface was attached to an instance.

    \n
  • \n
  • \n

    \n network-interface.attachment.attachment-id - The ID of the\n interface attachment.

    \n
  • \n
  • \n

    \n network-interface.attachment.delete-on-termination - Specifies\n whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.attachment.device-index - The device index to\n which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-id - The ID of the instance\n to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-owner-id - The owner ID of\n the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.network-card-index - The index of the network card.

    \n
  • \n
  • \n

    \n network-interface.attachment.status - The status of the\n attachment (attaching | attached |\n detaching | detached).

    \n
  • \n
  • \n

    \n network-interface.availability-zone - The Availability Zone for\n the network interface.

    \n
  • \n
  • \n

    \n network-interface.deny-all-igw-traffic - A Boolean that indicates whether \n a network interface with an IPv6 address is unreachable from the public internet.

    \n
  • \n
  • \n

    \n network-interface.description - The description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.group-name - The name of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-address - The IPv6 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.ipv6-address - The IPv6 address\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this\n is the primary IPv6 address.

    \n
  • \n
  • \n

    \n network-interface.ipv6-native - A Boolean that indicates whether this is\n an IPv6 only network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.mac-address - The MAC address of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.outpost-arn - The ARN of the Outpost.

    \n
  • \n
  • \n

    \n network-interface.owner-id - The ID of the owner of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-dns-name - The private DNS name of the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.private-ip-address - The private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.requester-id - The requester ID for the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.requester-managed - Indicates whether the\n network interface is being managed by Amazon Web Services.

    \n
  • \n
  • \n

    \n network-interface.status - The status of the network interface\n (available) | in-use).

    \n
  • \n
  • \n

    \n network-interface.source-dest-check - Whether the network\n interface performs source/destination checking. A value of true\n means that checking is enabled, and false means that checking is\n disabled. The value must be false for the network interface to\n perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-key - The key of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-value - The value of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.vpc-id - The ID of the VPC for the network\n interface.

    \n
  • \n
  • \n

    \n outpost-arn - The Amazon Resource Name (ARN) of the\n Outpost.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the instance\n owner.

    \n
  • \n
  • \n

    \n placement-group-name - The name of the placement group for the\n instance.

    \n
  • \n
  • \n

    \n placement-partition-number - The partition in which the instance is\n located.

    \n
  • \n
  • \n

    \n platform - The platform. To list only Windows instances, use\n windows.

    \n
  • \n
  • \n

    \n platform-details - The platform (Linux/UNIX |\n Red Hat BYOL Linux | Red Hat Enterprise Linux |\n Red Hat Enterprise Linux with HA | Red Hat Enterprise\n Linux with SQL Server Standard and HA | Red Hat Enterprise\n Linux with SQL Server Enterprise and HA | Red Hat Enterprise\n Linux with SQL Server Standard | Red Hat Enterprise Linux with\n SQL Server Web | Red Hat Enterprise Linux with SQL Server\n Enterprise | SQL Server Enterprise | SQL Server\n Standard | SQL Server Web | SUSE Linux |\n Ubuntu Pro | Windows | Windows BYOL |\n Windows with SQL Server Enterprise | Windows with SQL\n Server Standard | Windows with SQL Server Web).

    \n
  • \n
  • \n

    \n private-dns-name - The private IPv4 DNS name of the\n instance.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-a-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS A records.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-aaaa-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS AAAA records.

    \n
  • \n
  • \n

    \n private-dns-name-options.hostname-type - The type of hostname\n (ip-name | resource-name).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address of the instance.\n This can only be used to filter by the primary IP address of the network\n interface attached to the instance. To filter by additional IP addresses\n assigned to the network interface, use the filter\n network-interface.addresses.private-ip-address.

    \n
  • \n
  • \n

    \n product-code - The product code associated with the AMI used to\n launch the instance.

    \n
  • \n
  • \n

    \n product-code.type - The type of product code (devpay\n | marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n reason - The reason for the current state of the instance (for\n example, shows \"User Initiated [date]\" when you stop or terminate the instance).\n Similar to the state-reason-code filter.

    \n
  • \n
  • \n

    \n requester-id - The ID of the entity that launched the instance on\n your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so\n on).

    \n
  • \n
  • \n

    \n reservation-id - The ID of the instance's reservation. A\n reservation ID is created any time you launch an instance. A reservation ID has\n a one-to-one relationship with an instance launch request, but can be associated\n with more than one instance if you launch multiple instances using the same\n launch request. For example, if you launch one instance, you get one reservation\n ID. If you launch ten instances using the same launch request, you also get one\n reservation ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for\n example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume\n (ebs | instance-store).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the instance performs\n source/destination checking. A value of true means that checking is\n enabled, and false means that checking is disabled. The value must\n be false for the instance to perform network address translation\n (NAT) in your VPC.

    \n
  • \n
  • \n

    \n spot-instance-request-id - The ID of the Spot Instance\n request.

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - A message that describes the state\n change.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the instance.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n tenancy - The tenancy of an instance (dedicated |\n default | host).

    \n
  • \n
  • \n

    \n tpm-support - Indicates if the instance is configured for\n NitroTPM support (v2.0).

    \n
  • \n
  • \n

    \n usage-operation - The usage operation value for the instance\n (RunInstances | RunInstances:00g0 |\n RunInstances:0010 | RunInstances:1010 |\n RunInstances:1014 | RunInstances:1110 |\n RunInstances:0014 | RunInstances:0210 |\n RunInstances:0110 | RunInstances:0100 |\n RunInstances:0004 | RunInstances:0200 |\n RunInstances:000g | RunInstances:0g00 |\n RunInstances:0002 | RunInstances:0800 |\n RunInstances:0102 | RunInstances:0006 |\n RunInstances:0202).

    \n
  • \n
  • \n

    \n usage-operation-update-time - The time that the usage operation\n was last updated, for example, 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type of the instance\n (paravirtual | hvm).

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC that the instance is running in.

    \n
  • \n
", "smithy.api#xmlName": "Filter" } }, @@ -32399,7 +32423,7 @@ "target": "com.amazonaws.ec2#DescribeInternetGatewaysResult" }, "traits": { - "smithy.api#documentation": "

Describes one or more of your internet gateways.

", + "smithy.api#documentation": "

Describes your internet gateways. The default is to describe all your internet gateways. \n Alternatively, you can specify specific internet gateway IDs or filter the results to\n include only the internet gateways that match specific criteria.

", "smithy.api#examples": [ { "title": "To describe the Internet gateway for a VPC", @@ -32523,7 +32547,7 @@ "target": "com.amazonaws.ec2#InternetGatewayList", "traits": { "aws.protocols#ec2QueryName": "InternetGatewaySet", - "smithy.api#documentation": "

Information about one or more internet gateways.

", + "smithy.api#documentation": "

Information about the internet gateways.

", "smithy.api#xmlName": "internetGatewaySet" } }, @@ -33359,7 +33383,7 @@ "ResolveAlias": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

If true, and if a Systems Manager parameter is specified for ImageId,\n the AMI ID is displayed in the response for imageId.

\n

If false, and if a Systems Manager parameter is specified for ImageId,\n the parameter is displayed in the response for imageId.

\n

For more information, see Use a Systems \n Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide.

\n

Default: false\n

" + "smithy.api#documentation": "

If true, and if a Systems Manager parameter is specified for ImageId,\n the AMI ID is displayed in the response for imageId.

\n

If false, and if a Systems Manager parameter is specified for ImageId,\n the parameter is displayed in the response for imageId.

\n

For more information, see Use a Systems \n Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

\n

Default: false\n

" } } }, @@ -34378,7 +34402,7 @@ "target": "com.amazonaws.ec2#DescribeNatGatewaysResult" }, "traits": { - "smithy.api#documentation": "

Describes one or more of your NAT gateways.

", + "smithy.api#documentation": "

Describes your NAT gateways. The default is to describe all your NAT gateways. \n Alternatively, you can specify specific NAT gateway IDs or filter the results to\n include only the NAT gateways that match specific criteria.

", "smithy.api#examples": [ { "title": "To describe a NAT gateway", @@ -34580,7 +34604,7 @@ "target": "com.amazonaws.ec2#DescribeNetworkAclsResult" }, "traits": { - "smithy.api#documentation": "

Describes one or more of your network ACLs.

\n

For more information, see Network ACLs in the\n\t\t\t\tAmazon VPC User Guide.

", + "smithy.api#documentation": "

Describes your network ACLs. The default is to describe all your network ACLs. \n Alternatively, you can specify specific network ACL IDs or filter the results to\n include only the network ACLs that match specific criteria.

\n

For more information, see Network ACLs in the\n\t\t\t\tAmazon VPC User Guide.

", "smithy.api#examples": [ { "title": "To describe a network ACL", @@ -34663,7 +34687,7 @@ "NetworkAclIds": { "target": "com.amazonaws.ec2#NetworkAclIdStringList", "traits": { - "smithy.api#documentation": "

The IDs of the network ACLs.

\n

Default: Describes all your network ACLs.

", + "smithy.api#documentation": "

The IDs of the network ACLs.

", "smithy.api#xmlName": "NetworkAclId" } }, @@ -34691,7 +34715,7 @@ "target": "com.amazonaws.ec2#NetworkAclList", "traits": { "aws.protocols#ec2QueryName": "NetworkAclSet", - "smithy.api#documentation": "

Information about one or more network ACLs.

", + "smithy.api#documentation": "

Information about the network ACLs.

", "smithy.api#xmlName": "networkAclSet" } }, @@ -35832,7 +35856,7 @@ "target": "com.amazonaws.ec2#DescribeRegionsResult" }, "traits": { - "smithy.api#documentation": "

Describes the Regions that are enabled for your account, or all Regions.

\n

For a list of the Regions supported by Amazon EC2, see \n Amazon Elastic Compute Cloud endpoints and quotas.

\n

For information about enabling and disabling Regions for your account, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference.

\n \n

The order of the elements in the response, including those within nested structures,\n might vary. Applications should not assume the elements appear in a particular order.

\n
", + "smithy.api#documentation": "

Describes the Regions that are enabled for your account, or all Regions.

\n

For a list of the Regions supported by Amazon EC2, see Amazon EC2 service endpoints.

\n

For information about enabling and disabling Regions for your account, see Specify which Amazon Web Services Regions \n your account can use in the Amazon Web Services Account Management Reference Guide.

\n \n

The order of the elements in the response, including those within nested structures,\n might vary. Applications should not assume the elements appear in a particular order.

\n
", "smithy.api#examples": [ { "title": "To describe your regions", @@ -35963,7 +35987,7 @@ "target": "com.amazonaws.ec2#DescribeReplaceRootVolumeTasksResult" }, "traits": { - "smithy.api#documentation": "

Describes a root volume replacement task. For more information, see \n Replace a root volume in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

Describes a root volume replacement task. For more information, see \n Replace a root volume in the Amazon EC2 User Guide.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -36066,7 +36090,7 @@ "target": "com.amazonaws.ec2#DescribeReservedInstancesListingsResult" }, "traits": { - "smithy.api#documentation": "

Describes your account's Reserved Instance listings in the Reserved Instance Marketplace.

\n

The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

\n

As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase.

\n

As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase.

\n

For more information, see Reserved Instance Marketplace \n in the Amazon EC2 User Guide.

\n \n

The order of the elements in the response, including those within nested\n structures, might vary. Applications should not assume the elements appear in a\n particular order.

\n
" + "smithy.api#documentation": "

Describes your account's Reserved Instance listings in the Reserved Instance Marketplace.

\n

The Reserved Instance Marketplace matches sellers who want to resell Reserved Instance capacity that they no longer need with buyers who want to purchase additional capacity. Reserved Instances bought and sold through the Reserved Instance Marketplace work like any other Reserved Instances.

\n

As a seller, you choose to list some or all of your Reserved Instances, and you specify the upfront price to receive for them. Your Reserved Instances are then listed in the Reserved Instance Marketplace and are available for purchase.

\n

As a buyer, you specify the configuration of the Reserved Instance to purchase, and the Marketplace matches what you're searching for with what's available. The Marketplace first sells the lowest priced Reserved Instances to you, and continues to sell available Reserved Instance listings to you until your demand is met. You are charged based on the total price of all of the listings that you purchase.

\n

For more information, see Sell in the Reserved Instance\n Marketplace in the Amazon EC2 User Guide.

\n \n

The order of the elements in the response, including those within nested\n structures, might vary. Applications should not assume the elements appear in a\n particular order.

\n
" } }, "com.amazonaws.ec2#DescribeReservedInstancesListingsRequest": { @@ -36127,7 +36151,7 @@ "target": "com.amazonaws.ec2#DescribeReservedInstancesModificationsResult" }, "traits": { - "smithy.api#documentation": "

Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned.

\n

For more information, see Modifying Reserved Instances in the Amazon EC2 User Guide.

\n \n

The order of the elements in the response, including those within nested\n structures, might vary. Applications should not assume the elements appear in a\n particular order.

\n
", + "smithy.api#documentation": "

Describes the modifications made to your Reserved Instances. If no parameter is specified, information about all your Reserved Instances modification requests is returned. If a modification ID is specified, only information about the specific modification is returned.

\n

For more information, see Modify Reserved Instances in the\n Amazon EC2 User Guide.

\n \n

The order of the elements in the response, including those within nested\n structures, might vary. Applications should not assume the elements appear in a\n particular order.

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -36200,7 +36224,7 @@ "target": "com.amazonaws.ec2#DescribeReservedInstancesOfferingsResult" }, "traits": { - "smithy.api#documentation": "

Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used.

\n

If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances.

\n

For more information, see Reserved Instance Marketplace\n\t\t\t\tin the Amazon EC2 User Guide.

\n \n

The order of the elements in the response, including those within nested\n structures, might vary. Applications should not assume the elements appear in a\n particular order.

\n
", + "smithy.api#documentation": "

Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used.

\n

If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances.

\n

For more information, see Sell in the Reserved Instance\n Marketplace in the Amazon EC2 User Guide.

\n \n

The order of the elements in the response, including those within nested\n structures, might vary. Applications should not assume the elements appear in a\n particular order.

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -36234,7 +36258,7 @@ "InstanceType": { "target": "com.amazonaws.ec2#InstanceType", "traits": { - "smithy.api#documentation": "

The instance type that the reservation will cover (for example, m1.small). For more information, see \n Instance types in the\n Amazon EC2 User Guide.

" + "smithy.api#documentation": "

The instance type that the reservation will cover (for example, m1.small).\n For more information, see Amazon EC2 instance types in the\n Amazon EC2 User Guide.

" } }, "MaxDuration": { @@ -36416,7 +36440,7 @@ "target": "com.amazonaws.ec2#DescribeRouteTablesResult" }, "traits": { - "smithy.api#documentation": "

Describes one or more of your route tables.

\n

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

\n

For more information, see Route tables in the\n\t\t\t\tAmazon VPC User Guide.

", + "smithy.api#documentation": "

Describes your route tables. The default is to describe all your route tables. \n Alternatively, you can specify specific route table IDs or filter the results to\n include only the route tables that match specific criteria.

\n

Each subnet in your VPC must be associated with a route table. If a subnet is not explicitly associated with any route table, it is implicitly associated with the main route table. This command does not return the subnet ID for implicit associations.

\n

For more information, see Route tables in the\n\t\t\t\tAmazon VPC User Guide.

", "smithy.api#examples": [ { "title": "To describe a route table", @@ -36490,7 +36514,7 @@ "RouteTableIds": { "target": "com.amazonaws.ec2#RouteTableIdStringList", "traits": { - "smithy.api#documentation": "

The IDs of the route tables.

\n

Default: Describes all your route tables.

", + "smithy.api#documentation": "

The IDs of the route tables.

", "smithy.api#xmlName": "RouteTableId" } }, @@ -36518,7 +36542,7 @@ "target": "com.amazonaws.ec2#RouteTableList", "traits": { "aws.protocols#ec2QueryName": "RouteTableSet", - "smithy.api#documentation": "

Information about one or more route tables.

", + "smithy.api#documentation": "

Information about the route tables.

", "smithy.api#xmlName": "routeTableSet" } }, @@ -37338,13 +37362,13 @@ "MaxResults": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of snapshots to return for this request.\n This value can be between 5 and 1,000; if this value is larger than 1,000, only 1,000 results are returned. \n If this parameter is not used, then the request returns all snapshots. \n You cannot specify this parameter and the snapshot IDs parameter in the same request. For more information, \n see Pagination.

" + "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output. \n\tFor more information, see Pagination.

" } }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The token returned from a previous paginated request.\n Pagination continues from the end of the items returned by the previous request.

" + "smithy.api#documentation": "

The token returned from a previous paginated request.\n Pagination continues from the end of the items returned by the previous request.

" } }, "OwnerIds": { @@ -37396,7 +37420,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "NextToken", - "smithy.api#documentation": "

The token to include in another request to return the next page of snapshots. \n This value is null when there are no more snapshots to return.

", + "smithy.api#documentation": "

The token to include in another request to get the next page of items. \n This value is null when there are no more items to return.

", "smithy.api#xmlName": "nextToken" } } @@ -37414,7 +37438,7 @@ "target": "com.amazonaws.ec2#DescribeSpotDatafeedSubscriptionResult" }, "traits": { - "smithy.api#documentation": "

Describes the data feed for Spot Instances. For more information, see Spot\n Instance data feed in the Amazon EC2 User Guide for Linux Instances.

", + "smithy.api#documentation": "

Describes the data feed for Spot Instances. For more information, see Spot\n Instance data feed in the Amazon EC2 User Guide.

", "smithy.api#examples": [ { "title": "To describe the datafeed for your AWS account", @@ -38049,7 +38073,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n availability-zone-group - The Availability Zone group.

    \n
  • \n
  • \n

    \n create-time - The time stamp when the Spot Instance request was\n created.

    \n
  • \n
  • \n

    \n fault-code - The fault code related to the request.

    \n
  • \n
  • \n

    \n fault-message - The fault message related to the request.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance that fulfilled the\n request.

    \n
  • \n
  • \n

    \n launch-group - The Spot Instance launch group.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.delete-on-termination - Indicates\n whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.device-name - The device name for the\n volume in the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n launch.block-device-mapping.snapshot-id - The ID of the snapshot\n for the EBS volume.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.volume-size - The size of the EBS\n volume, in GiB.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.volume-type - The type of EBS volume:\n gp2 or gp3 for General Purpose SSD, io1 \n or io2 for Provisioned IOPS SSD, st1 for Throughput\n Optimized HDD, sc1 for Cold HDD, or standard for\n Magnetic.

    \n
  • \n
  • \n

    \n launch.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n launch.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n launch.image-id - The ID of the AMI.

    \n
  • \n
  • \n

    \n launch.instance-type - The type of instance (for example,\n m3.medium).

    \n
  • \n
  • \n

    \n launch.kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n launch.key-name - The name of the key pair the instance launched\n with.

    \n
  • \n
  • \n

    \n launch.monitoring-enabled - Whether detailed monitoring is\n enabled for the Spot Instance.

    \n
  • \n
  • \n

    \n launch.ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n launched-availability-zone - The Availability Zone in which the\n request is launched.

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Indicates whether the IP\n address is the primary private IP address.

    \n
  • \n
  • \n

    \n network-interface.delete-on-termination - Indicates whether the\n network interface is deleted when the instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.description - A description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.device-index - The index of the device for the\n network interface attachment on the instance.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of the security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-ip-address - The primary private IP\n address of the network interface.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n instance.

    \n
  • \n
  • \n

    \n product-description - The product description associated with the\n instance (Linux/UNIX | Windows).

    \n
  • \n
  • \n

    \n spot-instance-request-id - The Spot Instance request ID.

    \n
  • \n
  • \n

    \n spot-price - The maximum hourly price for any Spot Instance\n launched to fulfill the request.

    \n
  • \n
  • \n

    \n state - The state of the Spot Instance request (open\n | active | closed | cancelled |\n failed). Spot request status information can help you track\n your Amazon EC2 Spot Instance requests. For more information, see Spot\n request status in the Amazon EC2 User Guide for Linux Instances.

    \n
  • \n
  • \n

    \n status-code - The short code describing the most recent\n evaluation of your Spot Instance request.

    \n
  • \n
  • \n

    \n status-message - The message explaining the status of the Spot\n Instance request.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n type - The type of Spot Instance request (one-time |\n persistent).

    \n
  • \n
  • \n

    \n valid-from - The start date of the request.

    \n
  • \n
  • \n

    \n valid-until - The end date of the request.

    \n
  • \n
", + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n availability-zone-group - The Availability Zone group.

    \n
  • \n
  • \n

    \n create-time - The time stamp when the Spot Instance request was\n created.

    \n
  • \n
  • \n

    \n fault-code - The fault code related to the request.

    \n
  • \n
  • \n

    \n fault-message - The fault message related to the request.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance that fulfilled the\n request.

    \n
  • \n
  • \n

    \n launch-group - The Spot Instance launch group.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.delete-on-termination - Indicates\n whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.device-name - The device name for the\n volume in the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n launch.block-device-mapping.snapshot-id - The ID of the snapshot\n for the EBS volume.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.volume-size - The size of the EBS\n volume, in GiB.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.volume-type - The type of EBS volume:\n gp2 or gp3 for General Purpose SSD, io1 \n or io2 for Provisioned IOPS SSD, st1 for Throughput\n Optimized HDD, sc1 for Cold HDD, or standard for\n Magnetic.

    \n
  • \n
  • \n

    \n launch.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n launch.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n launch.image-id - The ID of the AMI.

    \n
  • \n
  • \n

    \n launch.instance-type - The type of instance (for example,\n m3.medium).

    \n
  • \n
  • \n

    \n launch.kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n launch.key-name - The name of the key pair the instance launched\n with.

    \n
  • \n
  • \n

    \n launch.monitoring-enabled - Whether detailed monitoring is\n enabled for the Spot Instance.

    \n
  • \n
  • \n

    \n launch.ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n launched-availability-zone - The Availability Zone in which the\n request is launched.

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Indicates whether the IP\n address is the primary private IP address.

    \n
  • \n
  • \n

    \n network-interface.delete-on-termination - Indicates whether the\n network interface is deleted when the instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.description - A description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.device-index - The index of the device for the\n network interface attachment on the instance.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of the security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-ip-address - The primary private IP\n address of the network interface.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n instance.

    \n
  • \n
  • \n

    \n product-description - The product description associated with the\n instance (Linux/UNIX | Windows).

    \n
  • \n
  • \n

    \n spot-instance-request-id - The Spot Instance request ID.

    \n
  • \n
  • \n

    \n spot-price - The maximum hourly price for any Spot Instance\n launched to fulfill the request.

    \n
  • \n
  • \n

    \n state - The state of the Spot Instance request (open\n | active | closed | cancelled |\n failed). Spot request status information can help you track\n your Amazon EC2 Spot Instance requests. For more information, see Spot\n request status in the Amazon EC2 User Guide.

    \n
  • \n
  • \n

    \n status-code - The short code describing the most recent\n evaluation of your Spot Instance request.

    \n
  • \n
  • \n

    \n status-message - The message explaining the status of the Spot\n Instance request.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n type - The type of Spot Instance request (one-time |\n persistent).

    \n
  • \n
  • \n

    \n valid-from - The start date of the request.

    \n
  • \n
  • \n

    \n valid-until - The end date of the request.

    \n
  • \n
", "smithy.api#xmlName": "Filter" } }, @@ -38120,7 +38144,7 @@ "target": "com.amazonaws.ec2#DescribeSpotPriceHistoryResult" }, "traits": { - "smithy.api#documentation": "

Describes the Spot price history. For more information, see Spot Instance pricing history in the\n Amazon EC2 User Guide for Linux Instances.

\n

When you specify a start and end time, the operation returns the prices of the\n instance types within that time range. It also returns the last price change before the\n start time, which is the effective price as of the start time.

", + "smithy.api#documentation": "

Describes the Spot price history. For more information, see Spot Instance pricing history in the\n Amazon EC2 User Guide.

\n

When you specify a start and end time, the operation returns the prices of the\n instance types within that time range. It also returns the last price change before the\n start time, which is the effective price as of the start time.

", "smithy.api#examples": [ { "title": "To describe Spot price history for Linux/UNIX (Amazon VPC)", @@ -38497,7 +38521,7 @@ "target": "com.amazonaws.ec2#DescribeSubnetsResult" }, "traits": { - "smithy.api#documentation": "

Describes one or more of your subnets.

\n

For more information, see Subnets in the\n\t\t\t\tAmazon VPC User Guide.

", + "smithy.api#documentation": "

Describes your subnets. The default is to describe all your subnets. \n Alternatively, you can specify specific subnet IDs or filter the results to\n include only the subnets that match specific criteria.

\n

For more information, see Subnets in the\n\t\t\t\tAmazon VPC User Guide.

", "smithy.api#examples": [ { "title": "To describe the subnets for a VPC", @@ -38611,7 +38635,7 @@ "target": "com.amazonaws.ec2#SubnetList", "traits": { "aws.protocols#ec2QueryName": "SubnetSet", - "smithy.api#documentation": "

Information about one or more subnets.

", + "smithy.api#documentation": "

Information about the subnets.

", "smithy.api#xmlName": "subnetSet" } }, @@ -38741,6 +38765,88 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#DescribeTrafficMirrorFilterRules": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DescribeTrafficMirrorFilterRulesRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DescribeTrafficMirrorFilterRulesResult" + }, + "traits": { + "smithy.api#documentation": "

Describe traffic mirror filters that determine the traffic that is mirrored.

" + } + }, + "com.amazonaws.ec2#DescribeTrafficMirrorFilterRulesRequest": { + "type": "structure", + "members": { + "TrafficMirrorFilterRuleIds": { + "target": "com.amazonaws.ec2#TrafficMirrorFilterRuleIdList", + "traits": { + "smithy.api#documentation": "

Traffic filter rule IDs.

", + "smithy.api#xmlName": "TrafficMirrorFilterRuleId" + } + }, + "TrafficMirrorFilterId": { + "target": "com.amazonaws.ec2#TrafficMirrorFilterId", + "traits": { + "smithy.api#documentation": "

Traffic filter ID.

" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

Traffic mirror filters.

\n
    \n
  • \n

    \n traffic-mirror-filter-rule-id: The ID of the Traffic Mirror rule.

    \n
  • \n
  • \n

    \n traffic-mirror-filter-id: The ID of the filter that this rule is associated with.

    \n
  • \n
  • \n

    \n rule-number: The number of the Traffic Mirror rule.

    \n
  • \n
  • \n

    \n rule-action: The action taken on the filtered traffic. Possible actions are accept and reject.

    \n
  • \n
  • \n

    \n traffic-direction: The traffic direction. Possible directions are ingress and egress.

    \n
  • \n
  • \n

    \n protocol: The protocol, for example UDP, assigned to the Traffic Mirror rule.

    \n
  • \n
  • \n

    \n source-cidr-block: The source CIDR block assigned to the Traffic Mirror rule.

    \n
  • \n
  • \n

    \n destination-cidr-block: The destination CIDR block assigned to the Traffic Mirror rule.

    \n
  • \n
  • \n

    \n description: The description of the Traffic Mirror rule.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#TrafficMirroringMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return with a single call.\n\tTo retrieve the remaining results, make another call with the returned nextToken value.

" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#NextToken", + "traits": { + "smithy.api#documentation": "

The token for the next page of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#DescribeTrafficMirrorFilterRulesResult": { + "type": "structure", + "members": { + "TrafficMirrorFilterRules": { + "target": "com.amazonaws.ec2#TrafficMirrorFilterRuleSet", + "traits": { + "aws.protocols#ec2QueryName": "TrafficMirrorFilterRuleSet", + "smithy.api#documentation": "

Traffic mirror rules.

", + "smithy.api#xmlName": "trafficMirrorFilterRuleSet" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "

The token to use to retrieve the next page of results. The value is null when there are no more results to return.

", + "smithy.api#xmlName": "nextToken" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#DescribeTrafficMirrorFilters": { "type": "operation", "input": { @@ -40535,13 +40641,13 @@ "MaxResults": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of items to return for this request. To get the next page of items,\n make another request with the token returned in the output. This value can be between 5 and 1,000;\n if the value is larger than 1,000, only 1,000 results are returned. If this parameter is not used, \n then all items are returned. You cannot specify this parameter and the volume IDs parameter in the \n same request. For more information, see Pagination.

" + "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output. \n\tFor more information, see Pagination.

" } }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The token returned from a previous paginated request.\n Pagination continues from the end of the items returned by the previous request.

" + "smithy.api#documentation": "

The token returned from a previous paginated request.\n Pagination continues from the end of the items returned by the previous request.

" } }, "VolumeIds": { @@ -40571,7 +40677,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "NextToken", - "smithy.api#documentation": "

The token to include in another request to get the next page of items. \n This value is null when there are no more items to return.

", + "smithy.api#documentation": "

The token to include in another request to get the next page of items. \n This value is null when there are no more items to return.

", "smithy.api#xmlName": "nextToken" } }, @@ -40764,7 +40870,7 @@ "target": "com.amazonaws.ec2#DescribeVolumesModificationsResult" }, "traits": { - "smithy.api#documentation": "

Describes the most recent volume modification request for the specified EBS volumes.

\n

If a volume has never been modified, some information in the output will be null.\n If a volume has been modified more than once, the output includes only the most \n recent modification request.

\n

You can also use CloudWatch Events to check the status of a modification to an EBS\n volume. For information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. For more information, see\n Monitor the progress of volume modifications in the Amazon EBS User Guide.

", + "smithy.api#documentation": "

Describes the most recent volume modification request for the specified EBS volumes.

\n

If a volume has never been modified, some information in the output will be null.\n If a volume has been modified more than once, the output includes only the most \n recent modification request.

\n

For more information, see \n Monitor the progress of volume modifications in the Amazon EBS User Guide.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -40799,7 +40905,7 @@ "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The token returned by a previous paginated request.\n Pagination continues from the end of the items returned by the previous request.

" + "smithy.api#documentation": "

The token returned from a previous paginated request.\n Pagination continues from the end of the items returned by the previous request.

" } }, "MaxResults": { @@ -40828,7 +40934,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "NextToken", - "smithy.api#documentation": "

The token to include in another request to get the next page of items. \n This value is null if there are no more items to return.

", + "smithy.api#documentation": "

The token to include in another request to get the next page of items. \n This value is null when there are no more items to return.

", "smithy.api#xmlName": "nextToken" } } @@ -40866,7 +40972,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "aws.protocols#ec2QueryName": "MaxResults", - "smithy.api#documentation": "

The maximum number of volumes to return for this request. \n This value can be between 5 and 500; if you specify a value larger than 500, only 500 items are returned. \n If this parameter is not used, then all items are returned. You cannot specify this parameter and the\n volume IDs parameter in the same request. For more information, see Pagination.

", + "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output. \n\tFor more information, see Pagination.

", "smithy.api#xmlName": "maxResults" } }, @@ -40874,7 +40980,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "NextToken", - "smithy.api#documentation": "

The token returned from a previous paginated request. \n Pagination continues from the end of the items returned from the previous request.

", + "smithy.api#documentation": "

The token returned from a previous paginated request.\n Pagination continues from the end of the items returned by the previous request.

", "smithy.api#xmlName": "nextToken" } } @@ -40898,7 +41004,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "NextToken", - "smithy.api#documentation": "

The token to include in another request to get the next page of items. \n This value is null when there are no more items to return.

", + "smithy.api#documentation": "

The token to include in another request to get the next page of items. \n This value is null when there are no more items to return.

", "smithy.api#xmlName": "nextToken" } } @@ -41584,7 +41690,7 @@ "target": "com.amazonaws.ec2#DescribeVpcEndpointsResult" }, "traits": { - "smithy.api#documentation": "

Describes your VPC endpoints.

", + "smithy.api#documentation": "

Describes your VPC endpoints. The default is to describe all your VPC endpoints. \n Alternatively, you can specify specific VPC endpoint IDs or filter the results to\n include only the VPC endpoints that match specific criteria.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -41640,7 +41746,7 @@ "target": "com.amazonaws.ec2#VpcEndpointSet", "traits": { "aws.protocols#ec2QueryName": "VpcEndpointSet", - "smithy.api#documentation": "

Information about the endpoints.

", + "smithy.api#documentation": "

Information about the VPC endpoints.

", "smithy.api#xmlName": "vpcEndpointSet" } }, @@ -41666,7 +41772,7 @@ "target": "com.amazonaws.ec2#DescribeVpcPeeringConnectionsResult" }, "traits": { - "smithy.api#documentation": "

Describes one or more of your VPC peering connections.

", + "smithy.api#documentation": "

Describes your VPC peering connections. The default is to describe all your VPC peering connections. \n Alternatively, you can specify specific VPC peering connection IDs or filter the results to\n include only the VPC peering connections that match specific criteria.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -41802,7 +41908,7 @@ "target": "com.amazonaws.ec2#DescribeVpcsResult" }, "traits": { - "smithy.api#documentation": "

Describes one or more of your VPCs.

", + "smithy.api#documentation": "

Describes your VPCs. The default is to describe all your VPCs. \n Alternatively, you can specify specific VPC IDs or filter the results to\n include only the VPCs that match specific criteria.

", "smithy.api#examples": [ { "title": "To describe a VPC", @@ -41899,7 +42005,7 @@ "VpcIds": { "target": "com.amazonaws.ec2#VpcIdStringList", "traits": { - "smithy.api#documentation": "

The IDs of the VPCs.

\n

Default: Describes all your VPCs.

", + "smithy.api#documentation": "

The IDs of the VPCs.

", "smithy.api#xmlName": "VpcId" } }, @@ -41935,7 +42041,7 @@ "target": "com.amazonaws.ec2#VpcList", "traits": { "aws.protocols#ec2QueryName": "VpcSet", - "smithy.api#documentation": "

Information about one or more VPCs.

", + "smithy.api#documentation": "

Information about the VPCs.

", "smithy.api#xmlName": "vpcSet" } }, @@ -42421,7 +42527,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -42781,7 +42887,7 @@ "target": "com.amazonaws.ec2#DisableAddressTransferResult" }, "traits": { - "smithy.api#documentation": "

Disables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

" + "smithy.api#documentation": "

Disables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.

" } }, "com.amazonaws.ec2#DisableAddressTransferRequest": { @@ -44636,7 +44742,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to Ensure\n Idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring\n idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -44666,7 +44772,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ClientToken", - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to Ensure\n Idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring\n idempotency.

", "smithy.api#xmlName": "clientToken" } } @@ -45283,7 +45389,7 @@ "target": "com.amazonaws.ec2#EbsOptimizedSupport", "traits": { "aws.protocols#ec2QueryName": "EbsOptimizedSupport", - "smithy.api#documentation": "

Indicates whether the instance type is Amazon EBS-optimized. For more information, see Amazon EBS-optimized\n instances in Amazon EC2 User Guide.

", + "smithy.api#documentation": "

Indicates whether the instance type is Amazon EBS-optimized. For more information, see Amazon EBS-optimized\n instances in Amazon EC2 User Guide.

", "smithy.api#xmlName": "ebsOptimizedSupport" } }, @@ -45442,7 +45548,7 @@ "target": "com.amazonaws.ec2#BaselineIops", "traits": { "aws.protocols#ec2QueryName": "BaselineIops", - "smithy.api#documentation": "

The baseline input/output storage operations per seconds for an EBS-optimized instance type.

", + "smithy.api#documentation": "

The baseline input/output storage operations per seconds for an EBS-optimized instance\n type.

", "smithy.api#xmlName": "baselineIops" } }, @@ -45466,7 +45572,7 @@ "target": "com.amazonaws.ec2#MaximumIops", "traits": { "aws.protocols#ec2QueryName": "MaximumIops", - "smithy.api#documentation": "

The maximum input/output storage operations per second for an EBS-optimized instance type.

", + "smithy.api#documentation": "

The maximum input/output storage operations per second for an EBS-optimized instance\n type.

", "smithy.api#xmlName": "maximumIops" } } @@ -45828,7 +45934,7 @@ } }, "traits": { - "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For \n workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, \n G4dn, or G5 instances.

\n
\n

Describes the association between an instance and an Elastic Graphics accelerator.

" + "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, \n we recommend that you use Amazon EC2 G4, G5, or G6 instances.

\n
\n

Describes the association between an instance and an Elastic Graphics accelerator.

" } }, "com.amazonaws.ec2#ElasticGpuAssociationList": { @@ -45853,7 +45959,7 @@ } }, "traits": { - "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For \n workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, \n G4dn, or G5 instances.

\n
\n

Describes the status of an Elastic Graphics accelerator.

" + "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, \n we recommend that you use Amazon EC2 G4, G5, or G6 instances.

\n
\n

Describes the status of an Elastic Graphics accelerator.

" } }, "com.amazonaws.ec2#ElasticGpuId": { @@ -45884,13 +45990,13 @@ "target": "com.amazonaws.ec2#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The type of Elastic Graphics accelerator. For more information about the values to specify for\n Type, see Elastic Graphics Basics, specifically the Elastic Graphics accelerator column, in the \n Amazon Elastic Compute Cloud User Guide for Windows Instances.

", + "smithy.api#documentation": "

The type of Elastic Graphics accelerator.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For \n workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, \n G4dn, or G5 instances.

\n
\n

A specification for an Elastic Graphics accelerator.

" + "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, \n we recommend that you use Amazon EC2 G4, G5, or G6 instances.

\n
\n

A specification for an Elastic Graphics accelerator.

" } }, "com.amazonaws.ec2#ElasticGpuSpecificationList": { @@ -46025,7 +46131,7 @@ } }, "traits": { - "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For \n workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, \n G4dn, or G5 instances.

\n
\n

Describes an Elastic Graphics accelerator.

" + "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, \n we recommend that you use Amazon EC2 G4, G5, or G6 instances.

\n
\n

Describes an Elastic Graphics accelerator.

" } }, "com.amazonaws.ec2#ElasticInferenceAccelerator": { @@ -46222,7 +46328,7 @@ "target": "com.amazonaws.ec2#EnableAddressTransferResult" }, "traits": { - "smithy.api#documentation": "

Enables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon Virtual Private Cloud User Guide.

" + "smithy.api#documentation": "

Enables Elastic IP address transfer. For more information, see Transfer Elastic IP addresses in the Amazon VPC User Guide.

" } }, "com.amazonaws.ec2#EnableAddressTransferRequest": { @@ -48753,7 +48859,7 @@ "target": "com.amazonaws.ec2#ExportTransitGatewayRoutesResult" }, "traits": { - "smithy.api#documentation": "

Exports routes from the specified transit gateway route table to the specified S3 bucket.\n By default, all routes are exported. Alternatively, you can filter by CIDR range.

\n

The routes are saved to the specified bucket in a JSON file. For more information, see\n Export Route Tables\n to Amazon S3 in Transit Gateways.

" + "smithy.api#documentation": "

Exports routes from the specified transit gateway route table to the specified S3 bucket.\n By default, all routes are exported. Alternatively, you can filter by CIDR range.

\n

The routes are saved to the specified bucket in a JSON file. For more information, see\n Export route tables\n to Amazon S3 in the Amazon Web Services Transit Gateways Guide.

" } }, "com.amazonaws.ec2#ExportTransitGatewayRoutesRequest": { @@ -49413,7 +49519,7 @@ "target": "com.amazonaws.ec2#Double", "traits": { "aws.protocols#ec2QueryName": "FulfilledCapacity", - "smithy.api#documentation": "

The number of capacity units fulfilled by the Capacity Reservation. For more information, see \n\t\t\t\n\t\t\t\tTotal target capacity in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

The number of capacity units fulfilled by the Capacity Reservation. For more information,\n\t\t\tsee Total target\n\t\t\t\tcapacity in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "fulfilledCapacity" } }, @@ -49437,7 +49543,7 @@ "target": "com.amazonaws.ec2#DoubleWithConstraints", "traits": { "aws.protocols#ec2QueryName": "Weight", - "smithy.api#documentation": "

The weight of the instance type in the Capacity Reservation Fleet. For more information, \n\t\t\tsee \n\t\t\t\tInstance type weight in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

The weight of the instance type in the Capacity Reservation Fleet. For more information, see\n\t\t\t\tInstance type\n\t\t\t\tweight in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "weight" } }, @@ -49445,7 +49551,7 @@ "target": "com.amazonaws.ec2#IntegerWithConstraints", "traits": { "aws.protocols#ec2QueryName": "Priority", - "smithy.api#documentation": "

The priority of the instance type in the Capacity Reservation Fleet. For more information, \n\t\t\tsee \n\t\t\t\tInstance type priority in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

The priority of the instance type in the Capacity Reservation Fleet. For more information,\n\t\t\tsee Instance type\n\t\t\t\tpriority in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "priority" } } @@ -50316,7 +50422,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "aws.protocols#ec2QueryName": "MaxAggregationInterval", - "smithy.api#documentation": "

The maximum interval of time, in seconds, during which a flow of packets is captured and aggregated into a flow log record.

\n

When a network interface is attached to a Nitro-based\n instance, the aggregation interval is always 60 seconds (1 minute) or less,\n regardless of the specified value.

\n

Valid Values: 60 | 600\n

", + "smithy.api#documentation": "

The maximum interval of time, in seconds, during which a flow of packets is captured and aggregated into a flow log record.

\n

When a network interface is attached to a Nitro-based\n instance, the aggregation interval is always 60 seconds (1 minute) or less,\n regardless of the specified value.

\n

Valid Values: 60 | 600\n

", "smithy.api#xmlName": "maxAggregationInterval" } }, @@ -52499,7 +52605,7 @@ "target": "com.amazonaws.ec2#GetIpamPoolAllocationsResult" }, "traits": { - "smithy.api#documentation": "

Get a list of all the CIDR allocations in an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations.

\n \n

If you use this action after AllocateIpamPoolCidr or ReleaseIpamPoolAllocation, note that all EC2 API actions follow an eventual consistency model.

\n
", + "smithy.api#documentation": "

Get a list of all the CIDR allocations in an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations.

\n \n

If you use this action after AllocateIpamPoolCidr or ReleaseIpamPoolAllocation, note that all EC2 API actions follow an eventual consistency model.

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -53220,7 +53326,7 @@ "target": "com.amazonaws.ec2#GetPasswordDataResult" }, "traits": { - "smithy.api#documentation": "

Retrieves the encrypted administrator password for a running Windows instance.

\n

The Windows password is generated at boot by the EC2Config service or\n EC2Launch scripts (Windows Server 2016 and later). This usually only\n happens the first time an instance is launched. For more information, see EC2Config and EC2Launch in the\n Amazon EC2 User Guide.

\n

For the EC2Config service, the password is not generated for rebundled\n AMIs unless Ec2SetPassword is enabled before bundling.

\n

The password is encrypted using the key pair that you specified when you launched the\n instance. You must provide the corresponding key pair file.

\n

When you launch an instance, password generation and encryption may take a few\n minutes. If you try to retrieve the password before it's available, the output returns\n an empty string. We recommend that you wait up to 15 minutes after launching an instance\n before trying to retrieve the generated password.

", + "smithy.api#documentation": "

Retrieves the encrypted administrator password for a running Windows instance.

\n

The Windows password is generated at boot by the EC2Config service or\n EC2Launch scripts (Windows Server 2016 and later). This usually only\n happens the first time an instance is launched. For more information, see EC2Config and EC2Launch in the\n Amazon EC2 User Guide.

\n

For the EC2Config service, the password is not generated for rebundled\n AMIs unless Ec2SetPassword is enabled before bundling.

\n

The password is encrypted using the key pair that you specified when you launched the\n instance. You must provide the corresponding key pair file.

\n

When you launch an instance, password generation and encryption may take a few\n minutes. If you try to retrieve the password before it's available, the output returns\n an empty string. We recommend that you wait up to 15 minutes after launching an instance\n before trying to retrieve the generated password.

", "smithy.waiters#waitable": { "PasswordDataAvailable": { "acceptors": [ @@ -53605,7 +53711,7 @@ "target": "com.amazonaws.ec2#GetSpotPlacementScoresResult" }, "traits": { - "smithy.api#documentation": "

Calculates the Spot placement score for a Region or Availability Zone based on the\n specified target capacity and compute requirements.

\n

You can specify your compute requirements either by using\n InstanceRequirementsWithMetadata and letting Amazon EC2 choose the optimal\n instance types to fulfill your Spot request, or you can specify the instance types by using\n InstanceTypes.

\n

For more information, see Spot placement score in\n the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

Calculates the Spot placement score for a Region or Availability Zone based on the\n specified target capacity and compute requirements.

\n

You can specify your compute requirements either by using\n InstanceRequirementsWithMetadata and letting Amazon EC2 choose the optimal\n instance types to fulfill your Spot request, or you can specify the instance types by using\n InstanceTypes.

\n

For more information, see Spot placement score in\n the Amazon EC2 User Guide.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -54897,7 +55003,7 @@ } }, "traits": { - "smithy.api#documentation": "

Indicates whether your instance is configured for hibernation. This parameter is valid\n only if the instance meets the hibernation\n prerequisites. For more information, see Hibernate your instance in the\n Amazon EC2 User Guide.

" + "smithy.api#documentation": "

Indicates whether your instance is configured for hibernation. This parameter is valid\n only if the instance meets the hibernation\n prerequisites. For more information, see Hibernate your Amazon EC2\n instance in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#HibernationOptionsRequest": { @@ -54911,7 +55017,7 @@ } }, "traits": { - "smithy.api#documentation": "

Indicates whether your instance is configured for hibernation. This parameter is valid\n only if the instance meets the hibernation\n prerequisites. For more information, see Hibernate your instance in the\n Amazon EC2 User Guide.

" + "smithy.api#documentation": "

Indicates whether your instance is configured for hibernation. This parameter is valid\n only if the instance meets the hibernation\n prerequisites. For more information, see Hibernate your Amazon EC2\n instance in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#HistoryRecord": { @@ -57813,7 +57919,7 @@ "target": "com.amazonaws.ec2#totalInferenceMemory", "traits": { "aws.protocols#ec2QueryName": "TotalInferenceMemoryInMiB", - "smithy.api#documentation": "

The total size of the memory for the inference accelerators for the instance type, in MiB.

", + "smithy.api#documentation": "

The total size of the memory for the inference accelerators for the instance type, in\n MiB.

", "smithy.api#xmlName": "totalInferenceMemoryInMiB" } } @@ -60008,7 +60114,7 @@ "target": "com.amazonaws.ec2#ConnectionTrackingSpecificationResponse", "traits": { "aws.protocols#ec2QueryName": "ConnectionTrackingConfiguration", - "smithy.api#documentation": "

A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "connectionTrackingConfiguration" } } @@ -60295,7 +60401,7 @@ "ConnectionTrackingSpecification": { "target": "com.amazonaws.ec2#ConnectionTrackingSpecificationRequest", "traits": { - "smithy.api#documentation": "

A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

" } } }, @@ -66027,6 +66133,150 @@ "traits": { "smithy.api#enumValue": "gr6.8xlarge" } + }, + "c7i_flex_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i-flex.large" + } + }, + "c7i_flex_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i-flex.xlarge" + } + }, + "c7i_flex_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i-flex.2xlarge" + } + }, + "c7i_flex_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i-flex.4xlarge" + } + }, + "c7i_flex_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7i-flex.8xlarge" + } + }, + "u7i_12tb_224xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "u7i-12tb.224xlarge" + } + }, + "u7in_16tb_224xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "u7in-16tb.224xlarge" + } + }, + "u7in_24tb_224xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "u7in-24tb.224xlarge" + } + }, + "u7in_32tb_224xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "u7in-32tb.224xlarge" + } + }, + "u7ib_12tb_224xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "u7ib-12tb.224xlarge" + } + }, + "c7gn_metal": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "c7gn.metal" + } + }, + "r8g_medium": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.medium" + } + }, + "r8g_large": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.large" + } + }, + "r8g_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.xlarge" + } + }, + "r8g_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.2xlarge" + } + }, + "r8g_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.4xlarge" + } + }, + "r8g_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.8xlarge" + } + }, + "r8g_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.12xlarge" + } + }, + "r8g_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.16xlarge" + } + }, + "r8g_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.24xlarge" + } + }, + "r8g_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.48xlarge" + } + }, + "r8g_metal_24xl": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.metal-24xl" + } + }, + "r8g_metal_48xl": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "r8g.metal-48xl" + } + }, + "mac2_m1ultra_metal": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "mac2-m1ultra.metal" + } } } }, @@ -66054,7 +66304,7 @@ "target": "com.amazonaws.ec2#InstanceType", "traits": { "aws.protocols#ec2QueryName": "InstanceType", - "smithy.api#documentation": "

The instance type. For more information, see Instance types in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

The instance type. For more information, see Instance types in the Amazon EC2\n User Guide.

", "smithy.api#xmlName": "instanceType" } }, @@ -66214,7 +66464,7 @@ "target": "com.amazonaws.ec2#BurstablePerformanceFlag", "traits": { "aws.protocols#ec2QueryName": "BurstablePerformanceSupported", - "smithy.api#documentation": "

Indicates whether the instance type is a burstable performance T instance \n type. For more information, see Burstable \n performance instances.

", + "smithy.api#documentation": "

Indicates whether the instance type is a burstable performance T instance type. For more\n information, see Burstable performance\n instances.

", "smithy.api#xmlName": "burstablePerformanceSupported" } }, @@ -66238,7 +66488,7 @@ "target": "com.amazonaws.ec2#BootModeTypeList", "traits": { "aws.protocols#ec2QueryName": "SupportedBootModes", - "smithy.api#documentation": "

The supported boot modes. For more information, see Boot modes in the\n Amazon EC2 User Guide.

", + "smithy.api#documentation": "

The supported boot modes. For more information, see Boot modes in the Amazon EC2 User\n Guide.

", "smithy.api#xmlName": "supportedBootModes" } }, @@ -66281,6 +66531,14 @@ "smithy.api#documentation": "

Describes the Neuron accelerator settings for the instance type.

", "smithy.api#xmlName": "neuronInfo" } + }, + "PhcSupport": { + "target": "com.amazonaws.ec2#PhcSupport", + "traits": { + "aws.protocols#ec2QueryName": "PhcSupport", + "smithy.api#documentation": "

Indicates whether a local Precision Time Protocol (PTP) hardware clock (PHC) is\n supported.

", + "smithy.api#xmlName": "phcSupport" + } } }, "traits": { @@ -66334,7 +66592,7 @@ "target": "com.amazonaws.ec2#InstanceType", "traits": { "aws.protocols#ec2QueryName": "InstanceType", - "smithy.api#documentation": "

The instance type. For more information, see Instance types in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

The instance type. For more information, see Instance types in the Amazon EC2\n User Guide.

", "smithy.api#xmlName": "instanceType" } }, @@ -66350,7 +66608,7 @@ "target": "com.amazonaws.ec2#Location", "traits": { "aws.protocols#ec2QueryName": "Location", - "smithy.api#documentation": "

The identifier for the location. This depends on the location type. For example, if the location type is\n region, the location is the Region code (for example, us-east-2.)

", + "smithy.api#documentation": "

The identifier for the location. This depends on the location type. For example, if the\n location type is region, the location is the Region code (for example,\n us-east-2.)

", "smithy.api#xmlName": "location" } } @@ -67747,7 +68005,7 @@ "target": "com.amazonaws.ec2#IpamPoolPublicIpSource", "traits": { "aws.protocols#ec2QueryName": "PublicIpSource", - "smithy.api#documentation": "

The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is BYOIP. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. \n By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool. For information on increasing the default limit, see Quotas for your IPAM in the Amazon VPC IPAM User Guide.

", + "smithy.api#documentation": "

The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is BYOIP. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. \n By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool. For information on increasing the default limit, see Quotas for your IPAM in the Amazon VPC IPAM User Guide.

", "smithy.api#xmlName": "publicIpSource" } }, @@ -69323,7 +69581,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Ipv4Prefix", - "smithy.api#documentation": "

The IPv4 prefix. For information, see \n Assigning prefixes to Amazon EC2 network interfaces in the\n Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

The IPv4 prefix. For information, see \n Assigning prefixes to network interfaces in the\n Amazon EC2 User Guide.

", "smithy.api#xmlName": "ipv4Prefix" } } @@ -69338,7 +69596,7 @@ "Ipv4Prefix": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The IPv4 prefix. For information, see \n Assigning prefixes to Amazon EC2 network interfaces in the\n Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

The IPv4 prefix. For information, see \n Assigning prefixes to network interfaces in the\n Amazon EC2 User Guide.

" } } }, @@ -70942,7 +71200,7 @@ } }, "traits": { - "smithy.api#documentation": "

The metadata options for the instance. For more information, see Instance metadata and user data in the\n Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

The metadata options for the instance. For more information, see Instance metadata and user data in the\n Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#LaunchTemplateInstanceMetadataOptionsRequest": { @@ -70980,7 +71238,7 @@ } }, "traits": { - "smithy.api#documentation": "

The metadata options for the instance. For more information, see Instance metadata and user data in the\n Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

The metadata options for the instance. For more information, see Instance metadata and user data in the\n Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#LaunchTemplateInstanceMetadataOptionsState": { @@ -71209,7 +71467,7 @@ "target": "com.amazonaws.ec2#ConnectionTrackingSpecification", "traits": { "aws.protocols#ec2QueryName": "ConnectionTrackingSpecification", - "smithy.api#documentation": "

A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

A security group connection tracking specification that enables you to set the timeout\n for connection tracking on an Elastic network interface. For more information, see\n Idle connection tracking timeout in the\n Amazon EC2 User Guide.

", "smithy.api#xmlName": "connectionTrackingSpecification" } } @@ -71270,7 +71528,7 @@ "InterfaceType": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The type of network interface. To create an Elastic Fabric Adapter (EFA), specify\n efa. For more information, see Elastic Fabric Adapter in the\n Amazon Elastic Compute Cloud User Guide.

\n

If you are not creating an EFA, specify interface or omit this\n parameter.

\n

Valid values: interface | efa\n

" + "smithy.api#documentation": "

The type of network interface. To create an Elastic Fabric Adapter (EFA), specify\n efa. For more information, see Elastic Fabric Adapter in the\n Amazon EC2 User Guide.

\n

If you are not creating an EFA, specify interface or omit this\n parameter.

\n

Valid values: interface | efa\n

" } }, "Ipv6AddressCount": { @@ -71362,7 +71620,7 @@ "ConnectionTrackingSpecification": { "target": "com.amazonaws.ec2#ConnectionTrackingSpecificationRequest", "traits": { - "smithy.api#documentation": "

A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

A security group connection tracking specification that enables you to set the timeout\n for connection tracking on an Elastic network interface. For more information, see\n Idle connection tracking timeout in the\n Amazon EC2 User Guide.

" } } }, @@ -73719,7 +73977,7 @@ "target": "com.amazonaws.ec2#TotalMediaMemory", "traits": { "aws.protocols#ec2QueryName": "TotalMediaMemoryInMiB", - "smithy.api#documentation": "

The total size of the memory for the media accelerators for the instance type, in MiB.

", + "smithy.api#documentation": "

The total size of the memory for the media accelerators for the instance type, in\n MiB.

", "smithy.api#xmlName": "totalMediaMemoryInMiB" } } @@ -74081,7 +74339,7 @@ "target": "com.amazonaws.ec2#ModifyAvailabilityZoneGroupResult" }, "traits": { - "smithy.api#documentation": "

Changes the opt-in status of the Local Zone and Wavelength Zone group for your\n account.

\n

Use \n \t\tDescribeAvailabilityZones to view the value for GroupName.

" + "smithy.api#documentation": "

Changes the opt-in status of the specified zone group for your account.

" } }, "com.amazonaws.ec2#ModifyAvailabilityZoneGroupRequest": { @@ -74099,7 +74357,7 @@ "target": "com.amazonaws.ec2#ModifyAvailabilityZoneOptInStatus", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Indicates whether you are opted in to the Local Zone group or Wavelength Zone group. The\n only valid value is opted-in. You must contact Amazon Web Services Support to opt out of a Local Zone or Wavelength Zone group.

", + "smithy.api#documentation": "

Indicates whether to opt in to the zone group. The only valid value is opted-in. \n You must contact Amazon Web Services Support to opt out of a Local Zone or Wavelength Zone group.

", "smithy.api#required": {} } }, @@ -74185,7 +74443,7 @@ "TotalTargetCapacity": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The total number of capacity units to be reserved by the Capacity Reservation Fleet. This value, \n\t\t\ttogether with the instance type weights that you assign to each instance type used by the Fleet \n\t\t\tdetermine the number of instances for which the Fleet reserves capacity. Both values are based on \n\t\t\tunits that make sense for your workload. For more information, see Total target capacity \n\t\t\tin the Amazon EC2 User Guide.

" + "smithy.api#documentation": "

The total number of capacity units to be reserved by the Capacity Reservation Fleet. This\n\t\t\tvalue, together with the instance type weights that you assign to each instance type\n\t\t\tused by the Fleet determine the number of instances for which the Fleet reserves\n\t\t\tcapacity. Both values are based on units that make sense for your workload. For more\n\t\t\tinformation, see Total target\n\t\t\t\tcapacity in the Amazon EC2 User Guide.

" } }, "EndDate": { @@ -74495,7 +74753,7 @@ "target": "com.amazonaws.ec2#KmsKeyId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption.\n If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is\n specified, the encrypted state must be true.

\n

You can specify the KMS key using any of the following:

\n
    \n
  • \n

    Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Key alias. For example, alias/ExampleAlias.

    \n
  • \n
  • \n

    Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

    \n
  • \n
\n

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, \n the action can appear to complete, but eventually fails.

\n

Amazon EBS does not support asymmetric KMS keys.

", + "smithy.api#documentation": "

The identifier of the KMS key to use for Amazon EBS encryption.\n If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is\n specified, the encrypted state must be true.

\n

You can specify the KMS key using any of the following:

\n
    \n
  • \n

    Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Key alias. For example, alias/ExampleAlias.

    \n
  • \n
  • \n

    Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  • \n
  • \n

    Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.

    \n
  • \n
\n

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, \n the action can appear to complete, but eventually fails.

\n

Amazon EBS does not support asymmetric KMS keys.

", "smithy.api#required": {} } }, @@ -74738,7 +74996,7 @@ "HostRecovery": { "target": "com.amazonaws.ec2#HostRecovery", "traits": { - "smithy.api#documentation": "

Indicates whether to enable or disable host recovery for the Dedicated Host. For more\n information, see Host recovery\n in the Amazon EC2 User Guide.

" + "smithy.api#documentation": "

Indicates whether to enable or disable host recovery for the Dedicated Host. For more\n information, see Host recovery in\n the Amazon EC2 User Guide.

" } }, "InstanceType": { @@ -74756,7 +75014,7 @@ "HostMaintenance": { "target": "com.amazonaws.ec2#HostMaintenance", "traits": { - "smithy.api#documentation": "

Indicates whether to enable or disable host maintenance for the Dedicated Host. For\n more information, see Host\n maintenance in the Amazon EC2 User Guide.

" + "smithy.api#documentation": "

Indicates whether to enable or disable host maintenance for the Dedicated Host. For\n more information, see Host\n maintenance in the Amazon EC2 User Guide.

" } } }, @@ -74886,14 +75144,14 @@ "smithy.api#documentation": "

Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.

\n

To specify the attribute, you can use the Attribute parameter, or one of the following parameters: \n Description, ImdsSupport, or LaunchPermission.

\n

Images with an Amazon Web Services Marketplace product code cannot be made public.

\n

To enable the SriovNetSupport enhanced networking attribute of an image, enable SriovNetSupport on an instance \n and create an AMI from the instance.

", "smithy.api#examples": [ { - "title": "To make an AMI public", - "documentation": "This example makes the specified AMI public.", + "title": "To grant launch permissions", + "documentation": "This example grants launch permissions for the specified AMI to the specified AWS account.", "input": { "ImageId": "ami-5731123e", "LaunchPermission": { "Add": [ { - "Group": "all" + "UserId": "123456789012" } ] } @@ -74901,14 +75159,14 @@ "output": {} }, { - "title": "To grant launch permissions", - "documentation": "This example grants launch permissions for the specified AMI to the specified AWS account.", + "title": "To make an AMI public", + "documentation": "This example makes the specified AMI public.", "input": { "ImageId": "ami-5731123e", "LaunchPermission": { "Add": [ { - "UserId": "123456789012" + "Group": "all" } ] } @@ -75026,23 +75284,23 @@ "smithy.api#documentation": "

Modifies the specified attribute of the specified instance. You can specify only one\n attribute at a time.

\n

\n Note: Using this action to change the security groups\n associated with an elastic network interface (ENI) attached to an instance can\n result in an error if the instance has more than one ENI. To change the security groups\n associated with an ENI attached to an instance that has multiple ENIs, we recommend that\n you use the ModifyNetworkInterfaceAttribute action.

\n

To modify some attributes, the instance must be stopped. For more information, see\n Modify a stopped instance in the\n Amazon EC2 User Guide.

", "smithy.api#examples": [ { - "title": "To modify the instance type", - "documentation": "This example modifies the instance type of the specified stopped instance.", + "title": "To enable enhanced networking", + "documentation": "This example enables enhanced networking for the specified stopped instance.", "input": { "InstanceId": "i-1234567890abcdef0", - "InstanceType": { - "Value": "m5.large" + "EnaSupport": { + "Value": true } }, "output": {} }, { - "title": "To enable enhanced networking", - "documentation": "This example enables enhanced networking for the specified stopped instance.", + "title": "To modify the instance type", + "documentation": "This example modifies the instance type of the specified stopped instance.", "input": { "InstanceId": "i-1234567890abcdef0", - "EnaSupport": { - "Value": true + "InstanceType": { + "Value": "m5.large" } }, "output": {} @@ -75183,7 +75441,7 @@ "DisableApiStop": { "target": "com.amazonaws.ec2#AttributeBooleanValue", "traits": { - "smithy.api#documentation": "

Indicates whether an instance is enabled for stop protection. For more information,\n see Stop\n Protection.

\n

" + "smithy.api#documentation": "

Indicates whether an instance is enabled for stop protection. For more information,\n see Enable stop\n protection for your instance.

\n

" } } }, @@ -76609,7 +76867,7 @@ "target": "com.amazonaws.ec2#ModifyReservedInstancesResult" }, "traits": { - "smithy.api#documentation": "

Modifies the configuration of your Reserved Instances, such as the Availability Zone, \n instance count, or instance type. The Reserved Instances to be modified must be identical, \n except for Availability Zone, network platform, and instance type.

\n

For more information, see Modifying Reserved\n\t\t\t\tInstances in the Amazon EC2 User Guide.

" + "smithy.api#documentation": "

Modifies the configuration of your Reserved Instances, such as the Availability Zone, \n instance count, or instance type. The Reserved Instances to be modified must be identical, \n except for Availability Zone, network platform, and instance type.

\n

For more information, see Modify Reserved Instances in the\n Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#ModifyReservedInstancesRequest": { @@ -76735,27 +76993,27 @@ "smithy.api#documentation": "

Adds or removes permission settings for the specified snapshot. You may add or remove\n specified Amazon Web Services account IDs from a snapshot's list of create volume permissions, but you cannot\n do both in a single operation. If you need to both add and remove account IDs for a snapshot,\n you must use multiple operations. You can make up to 500 modifications to a snapshot in a single operation.

\n

Encrypted snapshots and snapshots with Amazon Web Services Marketplace product codes cannot be made\n public. Snapshots encrypted with your default KMS key cannot be shared with other accounts.

\n

For more information about modifying snapshot permissions, see Share a snapshot in the\n Amazon EBS User Guide.

", "smithy.api#examples": [ { - "title": "To modify a snapshot attribute", - "documentation": "This example modifies snapshot ``snap-1234567890abcdef0`` to remove the create volume permission for a user with the account ID ``123456789012``. If the command succeeds, no output is returned.", + "title": "To make a snapshot public", + "documentation": "This example makes the snapshot ``snap-1234567890abcdef0`` public.", "input": { "SnapshotId": "snap-1234567890abcdef0", "Attribute": "createVolumePermission", - "OperationType": "remove", - "UserIds": [ - "123456789012" + "OperationType": "add", + "GroupNames": [ + "all" ] }, "output": {} }, { - "title": "To make a snapshot public", - "documentation": "This example makes the snapshot ``snap-1234567890abcdef0`` public.", + "title": "To modify a snapshot attribute", + "documentation": "This example modifies snapshot ``snap-1234567890abcdef0`` to remove the create volume permission for a user with the account ID ``123456789012``. If the command succeeds, no output is returned.", "input": { "SnapshotId": "snap-1234567890abcdef0", "Attribute": "createVolumePermission", - "OperationType": "add", - "GroupNames": [ - "all" + "OperationType": "remove", + "UserIds": [ + "123456789012" ] }, "output": {} @@ -77247,7 +77505,7 @@ "target": "com.amazonaws.ec2#TrafficMirrorFilterRule", "traits": { "aws.protocols#ec2QueryName": "TrafficMirrorFilterRule", - "smithy.api#documentation": "

Modifies a Traffic Mirror rule.

", + "smithy.api#documentation": "\n

Tags are not returned for ModifyTrafficMirrorFilterRule.

\n
\n

A Traffic Mirror rule.

", "smithy.api#xmlName": "trafficMirrorFilterRule" } } @@ -77752,7 +78010,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -77843,7 +78101,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -77933,7 +78191,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -78012,7 +78270,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -78095,7 +78353,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -78146,7 +78404,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -78285,7 +78543,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive token that you provide to ensure idempotency of your\n modification request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -78325,7 +78583,7 @@ "target": "com.amazonaws.ec2#ModifyVolumeResult" }, "traits": { - "smithy.api#documentation": "

You can modify several parameters of an existing EBS volume, including volume size, volume\n type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance\n type, you might be able to apply these changes without stopping the instance or detaching the\n volume from it. For more information about modifying EBS volumes, see Amazon EBS Elastic Volumes \n in the Amazon EBS User Guide.

\n

When you complete a resize operation on your volume, you need to extend the volume's\n file-system size to take advantage of the new storage capacity. For more information, see Extend the file system.

\n

You can use CloudWatch Events to check the status of a modification to an EBS volume. For\n information about CloudWatch Events, see the Amazon CloudWatch Events User Guide. You can also track the status of a\n modification using DescribeVolumesModifications. For information\n about tracking status changes using either method, see Monitor the progress of volume modifications.

\n

With previous-generation instance types, resizing an EBS volume might require detaching and\n reattaching the volume or stopping and restarting the instance.

\n

After modifying a volume, you must wait at least six hours and ensure that the volume \n is in the in-use or available state before you can modify the same \n volume. This is sometimes referred to as a cooldown period.

" + "smithy.api#documentation": "

You can modify several parameters of an existing EBS volume, including volume size, volume\n type, and IOPS capacity. If your EBS volume is attached to a current-generation EC2 instance\n type, you might be able to apply these changes without stopping the instance or detaching the\n volume from it. For more information about modifying EBS volumes, see Amazon EBS Elastic Volumes \n in the Amazon EBS User Guide.

\n

When you complete a resize operation on your volume, you need to extend the volume's\n file-system size to take advantage of the new storage capacity. For more information, see Extend the file system.

\n

For more information, see Monitor the progress of volume modifications in the Amazon EBS User Guide.

\n

With previous-generation instance types, resizing an EBS volume might require detaching and\n reattaching the volume or stopping and restarting the instance.

\n

After modifying a volume, you must wait at least six hours and ensure that the volume \n is in the in-use or available state before you can modify the same \n volume. This is sometimes referred to as a cooldown period.

" } }, "com.amazonaws.ec2#ModifyVolumeAttribute": { @@ -78416,7 +78674,7 @@ "Iops": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The target IOPS rate of the volume. This parameter is valid only for gp3, io1, and io2 volumes.

\n

The following are the supported values for each volume type:

\n
    \n
  • \n

    \n gp3: 3,000 - 16,000 IOPS

    \n
  • \n
  • \n

    \n io1: 100 - 64,000 IOPS

    \n
  • \n
  • \n

    \n io2: 100 - 256,000 IOPS

    \n
  • \n
\n

For io2 volumes, you can achieve up to 256,000 IOPS on \ninstances \nbuilt on the Nitro System. On other instances, you can achieve performance up to 32,000 IOPS.

\n

Default: The existing value is retained if you keep the same volume type. If you change\n the volume type to io1, io2, or gp3, the default is 3,000.

" + "smithy.api#documentation": "

The target IOPS rate of the volume. This parameter is valid only for gp3, io1, and io2 volumes.

\n

The following are the supported values for each volume type:

\n
    \n
  • \n

    \n gp3: 3,000 - 16,000 IOPS

    \n
  • \n
  • \n

    \n io1: 100 - 64,000 IOPS

    \n
  • \n
  • \n

    \n io2: 100 - 256,000 IOPS

    \n
  • \n
\n

For io2 volumes, you can achieve up to 256,000 IOPS on \ninstances \nbuilt on the Nitro System. On other instances, you can achieve performance up to 32,000 IOPS.

\n

Default: The existing value is retained if you keep the same volume type. If you change\n the volume type to io1, io2, or gp3, the default is 3,000.

" } }, "Throughput": { @@ -78428,7 +78686,7 @@ "MultiAttachEnabled": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Specifies whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the \n\t\tvolume to up to 16 \n\t\t\tNitro-based instances in the same Availability Zone. This parameter is \n\t\tsupported with io1 and io2 volumes only. For more information, see \n\t \n\t\t\tAmazon EBS Multi-Attach in the Amazon EBS User Guide.

" + "smithy.api#documentation": "

Specifies whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the \n\t volume to up to 16 \n\t\t\tNitro-based instances in the same Availability Zone. This parameter is \n\t\tsupported with io1 and io2 volumes only. For more information, see \n\t \n\t\t\tAmazon EBS Multi-Attach in the Amazon EBS User Guide.

" } } }, @@ -79847,7 +80105,7 @@ "target": "com.amazonaws.ec2#ProvisionedBandwidth", "traits": { "aws.protocols#ec2QueryName": "ProvisionedBandwidth", - "smithy.api#documentation": "

Reserved. If you need to sustain traffic greater than the documented limits, contact us through \n the Support Center.

", + "smithy.api#documentation": "

Reserved. If you need to sustain traffic greater than the documented limits, \n contact Amazon Web Services Support.

", "smithy.api#xmlName": "provisionedBandwidth" } }, @@ -80084,7 +80342,7 @@ "target": "com.amazonaws.ec2#NetworkAclAssociationList", "traits": { "aws.protocols#ec2QueryName": "AssociationSet", - "smithy.api#documentation": "

Any associations between the network ACL and one or more subnets

", + "smithy.api#documentation": "

Any associations between the network ACL and your subnets

", "smithy.api#xmlName": "associationSet" } }, @@ -80486,7 +80744,7 @@ "target": "com.amazonaws.ec2#EncryptionInTransitSupported", "traits": { "aws.protocols#ec2QueryName": "EncryptionInTransitSupported", - "smithy.api#documentation": "

Indicates whether the instance type automatically encrypts in-transit traffic between instances.

", + "smithy.api#documentation": "

Indicates whether the instance type automatically encrypts in-transit traffic between\n instances.

", "smithy.api#xmlName": "encryptionInTransitSupported" } }, @@ -80494,7 +80752,7 @@ "target": "com.amazonaws.ec2#EnaSrdSupported", "traits": { "aws.protocols#ec2QueryName": "EnaSrdSupported", - "smithy.api#documentation": "

Indicates whether the instance type supports ENA Express. ENA Express uses Amazon Web Services Scalable \n Reliable Datagram (SRD) technology to increase the maximum bandwidth used per stream and \n minimize tail latency of network traffic between EC2 instances.

", + "smithy.api#documentation": "

Indicates whether the instance type supports ENA Express. ENA Express uses Amazon Web Services Scalable Reliable Datagram (SRD) technology to increase the maximum bandwidth used per stream\n and minimize tail latency of network traffic between EC2 instances.

", "smithy.api#xmlName": "enaSrdSupported" } } @@ -81062,7 +81320,7 @@ "target": "com.amazonaws.ec2#ConnectionTrackingConfiguration", "traits": { "aws.protocols#ec2QueryName": "ConnectionTrackingConfiguration", - "smithy.api#documentation": "

A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

A security group connection tracking configuration that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "connectionTrackingConfiguration" } }, @@ -81564,7 +81822,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "IsPrimaryIpv6", - "smithy.api#documentation": "

Determines if an IPv6 address associated with a network interface is the primary IPv6 address. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information, see ModifyNetworkInterfaceAttribute.

", + "smithy.api#documentation": "

Determines if an IPv6 address associated with a network interface is the primary IPv6 address. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. \n For more information, see ModifyNetworkInterfaceAttribute.

", "smithy.api#xmlName": "isPrimaryIpv6" } } @@ -82040,7 +82298,7 @@ "target": "com.amazonaws.ec2#TotalNeuronMemory", "traits": { "aws.protocols#ec2QueryName": "TotalNeuronDeviceMemoryInMiB", - "smithy.api#documentation": "

The total size of the memory for the neuron accelerators for the instance type, in MiB.

", + "smithy.api#documentation": "

The total size of the memory for the neuron accelerators for the instance type, in\n MiB.

", "smithy.api#xmlName": "totalNeuronDeviceMemoryInMiB" } } @@ -82345,7 +82603,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "aws.protocols#ec2QueryName": "MinTargetCapacity", - "smithy.api#documentation": "

The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is\n not reached, the fleet launches no instances.

\n

Supported only for fleets of type instant.

\n

At least one of the following must be specified: SingleAvailabilityZone |\n SingleInstanceType\n

", + "smithy.api#documentation": "

The minimum target capacity for On-Demand Instances in the fleet. If this minimum capacity isn't\n reached, no instances are launched.

\n

Constraints: Maximum value of 1000. Supported only for fleets of type\n instant.

\n

At least one of the following must be specified: SingleAvailabilityZone |\n SingleInstanceType\n

", "smithy.api#xmlName": "minTargetCapacity" } }, @@ -82353,7 +82611,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "MaxTotalPrice", - "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay.

\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The maxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
", + "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay.

\n \n

If your fleet includes T instances that are configured as unlimited, and\n if their average CPU usage exceeds the baseline utilization, you will incur a charge for\n surplus credits. The maxTotalPrice does not account for surplus credits,\n and, if you use surplus credits, your final cost might be higher than what you specified\n for maxTotalPrice. For more information, see Surplus credits can incur charges in the\n Amazon EC2 User Guide.

\n
", "smithy.api#xmlName": "maxTotalPrice" } } @@ -82392,13 +82650,13 @@ "MinTargetCapacity": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is\n not reached, the fleet launches no instances.

\n

Supported only for fleets of type instant.

\n

At least one of the following must be specified: SingleAvailabilityZone |\n SingleInstanceType\n

" + "smithy.api#documentation": "

The minimum target capacity for On-Demand Instances in the fleet. If this minimum capacity isn't\n reached, no instances are launched.

\n

Constraints: Maximum value of 1000. Supported only for fleets of type\n instant.

\n

At least one of the following must be specified: SingleAvailabilityZone |\n SingleInstanceType\n

" } }, "MaxTotalPrice": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay.

\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The MaxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
" + "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay.

\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The MaxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the Amazon EC2 User Guide.

\n
" } } }, @@ -83436,6 +83694,23 @@ "smithy.api#documentation": "

Specifies the integrity algorithm for the VPN tunnel for phase 2 IKE\n negotiations.

" } }, + "com.amazonaws.ec2#PhcSupport": { + "type": "enum", + "members": { + "UNSUPPORTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "unsupported" + } + }, + "SUPPORTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "supported" + } + } + } + }, "com.amazonaws.ec2#Placement": { "type": "structure", "members": { @@ -84518,7 +84793,7 @@ "target": "com.amazonaws.ec2#SupportedAdditionalProcessorFeatureList", "traits": { "aws.protocols#ec2QueryName": "SupportedFeatures", - "smithy.api#documentation": "

Indicates whether the instance type supports AMD SEV-SNP. If the request returns \n amd-sev-snp, AMD SEV-SNP is supported. Otherwise, it is not supported. \n For more information, see \n AMD SEV-SNP.

", + "smithy.api#documentation": "

Indicates whether the instance type supports AMD SEV-SNP. If the request returns\n amd-sev-snp, AMD SEV-SNP is supported. Otherwise, it is not supported. For more\n information, see AMD\n SEV-SNP.

", "smithy.api#xmlName": "supportedFeatures" } }, @@ -84692,7 +84967,7 @@ "target": "com.amazonaws.ec2#ProvisionByoipCidrResult" }, "traits": { - "smithy.api#documentation": "

Provisions an IPv4 or IPv6 address range for use with your Amazon Web Services resources through bring your own IP \n addresses (BYOIP) and creates a corresponding address pool. After the address range is\n provisioned, it is ready to be advertised using AdvertiseByoipCidr.

\n

Amazon Web Services verifies that you own the address range and are authorized to advertise it. \n You must ensure that the address range is registered to you and that you created an \n RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. \n For more information, see Bring your own IP addresses (BYOIP) in the Amazon Elastic Compute Cloud User Guide.

\n

Provisioning an address range is an asynchronous operation, so the call returns immediately,\n but the address range is not ready to use until its status changes from pending-provision\n to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. \n To allocate an Elastic IP address from your IPv4 address pool, use AllocateAddress \n with either the specific address from the address pool or the ID of the address pool.

" + "smithy.api#documentation": "

Provisions an IPv4 or IPv6 address range for use with your Amazon Web Services resources through bring your own IP \n addresses (BYOIP) and creates a corresponding address pool. After the address range is\n provisioned, it is ready to be advertised using AdvertiseByoipCidr.

\n

Amazon Web Services verifies that you own the address range and are authorized to advertise it. \n You must ensure that the address range is registered to you and that you created an \n RPKI ROA to authorize Amazon ASNs 16509 and 14618 to advertise the address range. \n For more information, see Bring your own IP addresses (BYOIP) in the Amazon EC2 User Guide.

\n

Provisioning an address range is an asynchronous operation, so the call returns immediately,\n but the address range is not ready to use until its status changes from pending-provision\n to provisioned. To monitor the status of an address range, use DescribeByoipCidrs. \n To allocate an Elastic IP address from your IPv4 address pool, use AllocateAddress \n with either the specific address from the address pool or the ID of the address pool.

" } }, "com.amazonaws.ec2#ProvisionByoipCidrRequest": { @@ -84886,7 +85161,7 @@ "ClientToken": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } } @@ -84992,7 +85267,7 @@ "target": "com.amazonaws.ec2#DateTime", "traits": { "aws.protocols#ec2QueryName": "ProvisionTime", - "smithy.api#documentation": "

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "smithy.api#documentation": "

Reserved.

", "smithy.api#xmlName": "provisionTime" } }, @@ -85000,7 +85275,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Provisioned", - "smithy.api#documentation": "

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "smithy.api#documentation": "

Reserved.

", "smithy.api#xmlName": "provisioned" } }, @@ -85008,7 +85283,7 @@ "target": "com.amazonaws.ec2#DateTime", "traits": { "aws.protocols#ec2QueryName": "RequestTime", - "smithy.api#documentation": "

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "smithy.api#documentation": "

Reserved.

", "smithy.api#xmlName": "requestTime" } }, @@ -85016,7 +85291,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Requested", - "smithy.api#documentation": "

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "smithy.api#documentation": "

Reserved.

", "smithy.api#xmlName": "requested" } }, @@ -85024,13 +85299,13 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Status", - "smithy.api#documentation": "

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

", + "smithy.api#documentation": "

Reserved.

", "smithy.api#xmlName": "status" } } }, "traits": { - "smithy.api#documentation": "

Reserved. If you need to sustain traffic greater than the documented limits, contact us through the Support Center.

" + "smithy.api#documentation": "

Reserved. If you need to sustain traffic greater than the documented limits, \n contact Amazon Web Services Support.

" } }, "com.amazonaws.ec2#PtrUpdateStatus": { @@ -85501,7 +85776,7 @@ "target": "com.amazonaws.ec2#PurchaseReservedInstancesOfferingResult" }, "traits": { - "smithy.api#documentation": "

Purchases a Reserved Instance for use with your account. With Reserved Instances, you pay a lower \n hourly rate compared to On-Demand instance pricing.

\n

Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings \n\t\t\tthat match your specifications. After you've purchased a Reserved Instance, you can check for your\n\t\t\tnew Reserved Instance with DescribeReservedInstances.

\n

To queue a purchase for a future date and time, specify a purchase time. If you do not specify a\n purchase time, the default is the current time.

\n

For more information, see Reserved Instances and \n \t Reserved Instance Marketplace \n \t in the Amazon EC2 User Guide.

" + "smithy.api#documentation": "

Purchases a Reserved Instance for use with your account. With Reserved Instances, you pay a lower \n hourly rate compared to On-Demand instance pricing.

\n

Use DescribeReservedInstancesOfferings to get a list of Reserved Instance offerings \n\t\t\tthat match your specifications. After you've purchased a Reserved Instance, you can check for your\n\t\t\tnew Reserved Instance with DescribeReservedInstances.

\n

To queue a purchase for a future date and time, specify a purchase time. If you do not specify a\n purchase time, the default is the current time.

\n

For more information, see Reserved\n Instances and Sell in the Reserved Instance\n Marketplace in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#PurchaseReservedInstancesOfferingRequest": { @@ -85558,7 +85833,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ReservedInstancesId", - "smithy.api#documentation": "

The IDs of the purchased Reserved Instances. If your purchase crosses into a discounted\n pricing tier, the final Reserved Instances IDs might change. For more information, see Crossing\n pricing tiers in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

The IDs of the purchased Reserved Instances. If your purchase crosses into a discounted\n pricing tier, the final Reserved Instances IDs might change. For more information, see Crossing\n pricing tiers in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "reservedInstancesId" } } @@ -86141,7 +86416,7 @@ "target": "com.amazonaws.ec2#RegisterTransitGatewayMulticastGroupMembersResult" }, "traits": { - "smithy.api#documentation": "

Registers members (network interfaces) with the transit gateway multicast group. A member is a network interface associated\n with a supported EC2 instance that receives multicast traffic. For information about\n supported instances, see Multicast\n Consideration in Amazon VPC Transit Gateways.

\n

After you add the members, use SearchTransitGatewayMulticastGroups to verify that the members were added\n to the transit gateway multicast group.

" + "smithy.api#documentation": "

Registers members (network interfaces) with the transit gateway multicast group. A member is a network interface associated\n with a supported EC2 instance that receives multicast traffic. For more information, see\n Multicast\n on transit gateways in the Amazon Web Services Transit Gateways Guide.

\n

After you add the members, use SearchTransitGatewayMulticastGroups to verify that the members were added\n to the transit gateway multicast group.

" } }, "com.amazonaws.ec2#RegisterTransitGatewayMulticastGroupMembersRequest": { @@ -86205,7 +86480,7 @@ "target": "com.amazonaws.ec2#RegisterTransitGatewayMulticastGroupSourcesResult" }, "traits": { - "smithy.api#documentation": "

Registers sources (network interfaces) with the specified transit gateway multicast group.

\n

A multicast source is a network interface attached to a supported instance that sends\n multicast traffic. For information about supported instances, see Multicast\n Considerations in Amazon VPC Transit Gateways.

\n

After you add the source, use SearchTransitGatewayMulticastGroups to verify that the source was added to the multicast\n group.

" + "smithy.api#documentation": "

Registers sources (network interfaces) with the specified transit gateway multicast group.

\n

A multicast source is a network interface attached to a supported instance that sends\n multicast traffic. For more information about supported instances, see Multicast\n on transit gateways in the Amazon Web Services Transit Gateways Guide.

\n

After you add the source, use SearchTransitGatewayMulticastGroups to verify that the source was added to the multicast\n group.

" } }, "com.amazonaws.ec2#RegisterTransitGatewayMulticastGroupSourcesRequest": { @@ -86651,7 +86926,7 @@ "target": "com.amazonaws.ec2#ReleaseIpamPoolAllocationResult" }, "traits": { - "smithy.api#documentation": "

Release an allocation within an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.\n

\n \n

All EC2 API actions follow an eventual consistency model.

\n
" + "smithy.api#documentation": "

Release an allocation within an IPAM pool. The Region you use should be the IPAM pool locale. The locale is the Amazon Web Services Region where this IPAM pool is available for allocations. You can only use this action to release manual allocations. To remove an allocation for a resource without deleting the resource, set its monitored state to false using ModifyIpamResourceCidr. For more information, see Release an allocation in the Amazon VPC IPAM User Guide.\n

\n \n

All EC2 API actions follow an eventual consistency model.

\n
" } }, "com.amazonaws.ec2#ReleaseIpamPoolAllocationRequest": { @@ -87794,7 +88069,7 @@ "KernelId": { "target": "com.amazonaws.ec2#KernelId", "traits": { - "smithy.api#documentation": "

The ID of the kernel.

\n \n

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more\n information, see User provided\n kernels in the Amazon Elastic Compute Cloud User Guide.

\n
" + "smithy.api#documentation": "

The ID of the kernel.

\n \n

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more\n information, see User provided\n kernels in the Amazon EC2 User Guide.

\n
" } }, "EbsOptimized": { @@ -87826,13 +88101,13 @@ "ImageId": { "target": "com.amazonaws.ec2#ImageId", "traits": { - "smithy.api#documentation": "

The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which\n will resolve to an AMI ID on launch.

\n

Valid formats:

\n
    \n
  • \n

    \n ami-17characters00000\n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name\n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:version-number\n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:label\n

    \n
  • \n
  • \n

    \n resolve:ssm:public-parameter\n

    \n
  • \n
\n \n

Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager parameter. \n If the launch template will be used by an EC2 Fleet or Spot Fleet, you must specify the AMI ID.

\n
\n

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which\n will resolve to an AMI ID on launch.

\n

Valid formats:

\n
    \n
  • \n

    \n ami-17characters00000\n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name\n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:version-number\n

    \n
  • \n
  • \n

    \n resolve:ssm:parameter-name:label\n

    \n
  • \n
  • \n

    \n resolve:ssm:public-parameter\n

    \n
  • \n
\n \n

Currently, EC2 Fleet and Spot Fleet do not support specifying a Systems Manager parameter. \n If the launch template will be used by an EC2 Fleet or Spot Fleet, you must specify the AMI ID.

\n
\n

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

" } }, "InstanceType": { "target": "com.amazonaws.ec2#InstanceType", "traits": { - "smithy.api#documentation": "

The instance type. For more information, see Instance types in the\n Amazon Elastic Compute Cloud User Guide.

\n

If you specify InstanceType, you can't specify\n InstanceRequirements.

" + "smithy.api#documentation": "

The instance type. For more information, see Amazon EC2 instance types in\n the Amazon EC2 User Guide.

\n

If you specify InstanceType, you can't specify\n InstanceRequirements.

" } }, "KeyName": { @@ -87856,7 +88131,7 @@ "RamDiskId": { "target": "com.amazonaws.ec2#RamdiskId", "traits": { - "smithy.api#documentation": "

The ID of the RAM disk.

\n \n

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more\n information, see User provided\n kernels in the Amazon Elastic Compute Cloud User Guide.

\n
" + "smithy.api#documentation": "

The ID of the RAM disk.

\n \n

We recommend that you use PV-GRUB instead of kernels and RAM disks. For more\n information, see User provided\n kernels in the Amazon EC2 User Guide.

\n
" } }, "DisableApiTermination": { @@ -87874,7 +88149,7 @@ "UserData": { "target": "com.amazonaws.ec2#SensitiveUserData", "traits": { - "smithy.api#documentation": "

The user data to make available to the instance. You must provide base64-encoded text.\n User data is limited to 16 KB. For more information, see Run commands on your Linux instance at\n launch (Linux) or Work with instance\n user data (Windows) in the Amazon Elastic Compute Cloud User Guide.

\n

If you are creating the launch template for use with Batch, the user\n data must be provided in the MIME multi-part archive format. For more information, see Amazon EC2 user data in launch templates in the Batch User Guide.

" + "smithy.api#documentation": "

The user data to make available to the instance. You must provide base64-encoded text.\n User data is limited to 16 KB. For more information, see Run commands on your Amazon EC2 instance at\n launch in the Amazon EC2 User Guide.

\n

If you are creating the launch template for use with Batch, the user\n data must be provided in the MIME multi-part archive format. For more information, see Amazon EC2 user data in launch templates in the Batch User Guide.

" } }, "TagSpecifications": { @@ -87927,7 +88202,7 @@ "CpuOptions": { "target": "com.amazonaws.ec2#LaunchTemplateCpuOptionsRequest", "traits": { - "smithy.api#documentation": "

The CPU options for the instance. For more information, see Optimizing CPU Options in the Amazon Elastic Compute Cloud User\n Guide.

" + "smithy.api#documentation": "

The CPU options for the instance. For more information, see Optimize CPU options in the Amazon EC2 User Guide.

" } }, "CapacityReservationSpecification": { @@ -87946,19 +88221,19 @@ "HibernationOptions": { "target": "com.amazonaws.ec2#LaunchTemplateHibernationOptionsRequest", "traits": { - "smithy.api#documentation": "

Indicates whether an instance is enabled for hibernation. This parameter is valid only\n if the instance meets the hibernation\n prerequisites. For more information, see Hibernate your instance in the\n Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Indicates whether an instance is enabled for hibernation. This parameter is valid only\n if the instance meets the hibernation\n prerequisites. For more information, see Hibernate your Amazon EC2 instance\n in the Amazon EC2 User Guide.

" } }, "MetadataOptions": { "target": "com.amazonaws.ec2#LaunchTemplateInstanceMetadataOptionsRequest", "traits": { - "smithy.api#documentation": "

The metadata options for the instance. For more information, see Instance metadata and user data in the\n Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

The metadata options for the instance. For more information, see Instance metadata and user data in the\n Amazon EC2 User Guide.

" } }, "EnclaveOptions": { "target": "com.amazonaws.ec2#LaunchTemplateEnclaveOptionsRequest", "traits": { - "smithy.api#documentation": "

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more\n information, see What is Amazon Web Services Nitro Enclaves?\n in the Amazon Web Services Nitro Enclaves User Guide.

\n

You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance.

" + "smithy.api#documentation": "

Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more\n information, see What is Amazon Web Services Nitro Enclaves?\n in the Amazon Web Services Nitro Enclaves User Guide.

\n

You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance.

" } }, "InstanceRequirements": { @@ -87982,7 +88257,7 @@ "DisableApiStop": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether to enable the instance for stop protection. For more information,\n see Stop\n protection in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Indicates whether to enable the instance for stop protection. For more information,\n see Enable stop protection for your instance in the\n Amazon EC2 User Guide.

" } } }, @@ -88185,7 +88460,7 @@ "target": "com.amazonaws.ec2#RequestSpotInstancesResult" }, "traits": { - "smithy.api#documentation": "

Creates a Spot Instance request.

\n

For more information, see Spot Instance requests in\n the Amazon EC2 User Guide for Linux Instances.

\n \n

We strongly discourage using the RequestSpotInstances API because it is a legacy\n API with no planned investment. For options for requesting Spot Instances, see\n Which\n is the best Spot request method to use? in the\n Amazon EC2 User Guide for Linux Instances.

\n
", + "smithy.api#documentation": "

Creates a Spot Instance request.

\n

For more information, see Work with Spot Instance in\n the Amazon EC2 User Guide.

\n \n

We strongly discourage using the RequestSpotInstances API because it is a legacy\n API with no planned investment. For options for requesting Spot Instances, see\n Which\n is the best Spot request method to use? in the\n Amazon EC2 User Guide.

\n
", "smithy.api#examples": [ { "title": "To create a one-time Spot Instance request", @@ -88256,7 +88531,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ClientToken", - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see How to Ensure\n Idempotency in the Amazon EC2 User Guide for Linux Instances.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring idempotency in\n Amazon EC2 API requests in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "clientToken" } }, @@ -88575,7 +88850,7 @@ "Weight": { "target": "com.amazonaws.ec2#DoubleWithConstraints", "traits": { - "smithy.api#documentation": "

The number of capacity units provided by the specified instance type. This value, together with the \n\t\t\ttotal target capacity that you specify for the Fleet determine the number of instances for which the \n\t\t\tFleet reserves capacity. Both values are based on units that make sense for your workload. For more \n\t\t\tinformation, see Total target capacity \n\t\t\tin the Amazon EC2 User Guide.

" + "smithy.api#documentation": "

The number of capacity units provided by the specified instance type. This value, together\n\t\t\twith the total target capacity that you specify for the Fleet determine the number of\n\t\t\tinstances for which the Fleet reserves capacity. Both values are based on units that\n\t\t\tmake sense for your workload. For more information, see Total target\n\t\t\t\tcapacity in the Amazon EC2 User Guide.

" } }, "AvailabilityZone": { @@ -88599,7 +88874,7 @@ "Priority": { "target": "com.amazonaws.ec2#IntegerWithConstraints", "traits": { - "smithy.api#documentation": "

The priority to assign to the instance type. This value is used to determine which of the instance types \n\t\t\tspecified for the Fleet should be prioritized for use. A lower value indicates a high priority. For more \n\t\t\tinformation, see Instance type priority \n\t\t\tin the Amazon EC2 User Guide.

" + "smithy.api#documentation": "

The priority to assign to the instance type. This value is used to determine which of the\n\t\t\tinstance types specified for the Fleet should be prioritized for use. A lower value\n\t\t\tindicates a high priority. For more information, see Instance type\n\t\t\t\tpriority in the Amazon EC2 User Guide.

" } } }, @@ -89668,7 +89943,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Resets an attribute of an instance to its default value. To reset the\n kernel or ramdisk, the instance must be in a stopped\n state. To reset the sourceDestCheck, the instance can be either running or\n stopped.

\n

The sourceDestCheck attribute controls whether source/destination\n checking is enabled. The default value is true, which means checking is\n enabled. This value must be false for a NAT instance to perform NAT. For\n more information, see NAT Instances in the\n Amazon VPC User Guide.

", + "smithy.api#documentation": "

Resets an attribute of an instance to its default value. To reset the\n kernel or ramdisk, the instance must be in a stopped\n state. To reset the sourceDestCheck, the instance can be either running or\n stopped.

\n

The sourceDestCheck attribute controls whether source/destination\n checking is enabled. The default value is true, which means checking is\n enabled. This value must be false for a NAT instance to perform NAT. For\n more information, see NAT instances in the\n Amazon VPC User Guide.

", "smithy.api#examples": [ { "title": "To reset the sourceDestCheck attribute", @@ -90391,6 +90666,12 @@ "smithy.api#enumValue": "vpc-block-public-access-exclusion" } }, + "vpc_encryption_control": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "vpc-encryption-control" + } + }, "ipam_resource_discovery": { "target": "smithy.api#Unit", "traits": { @@ -90500,7 +90781,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ImageId", - "smithy.api#documentation": "

The ID of the AMI or a Systems Manager parameter. The Systems Manager parameter will\n resolve to the ID of the AMI at instance launch.

\n

The value depends on what you specified in the request. The possible values are:

\n
    \n
  • \n

    If an AMI ID was specified in the request, then this is the AMI ID.

    \n
  • \n
  • \n

    If a Systems Manager parameter was specified in the request, and\n ResolveAlias was configured as true, then this is\n the AMI ID that the parameter is mapped to in the Parameter Store.

    \n
  • \n
  • \n

    If a Systems Manager parameter was specified in the request, and ResolveAlias was configured\n as false, then this is the parameter value.

    \n
  • \n
\n

For more information, see Use a Systems \n Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

The ID of the AMI or a Systems Manager parameter. The Systems Manager parameter will\n resolve to the ID of the AMI at instance launch.

\n

The value depends on what you specified in the request. The possible values are:

\n
    \n
  • \n

    If an AMI ID was specified in the request, then this is the AMI ID.

    \n
  • \n
  • \n

    If a Systems Manager parameter was specified in the request, and\n ResolveAlias was configured as true, then this is\n the AMI ID that the parameter is mapped to in the Parameter Store.

    \n
  • \n
  • \n

    If a Systems Manager parameter was specified in the request, and ResolveAlias was configured\n as false, then this is the parameter value.

    \n
  • \n
\n

For more information, see Use a Systems \n Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "imageId" } }, @@ -90628,7 +90909,7 @@ "target": "com.amazonaws.ec2#LaunchTemplateCpuOptions", "traits": { "aws.protocols#ec2QueryName": "CpuOptions", - "smithy.api#documentation": "

The CPU options for the instance. For more information, see Optimizing CPU options in the Amazon Elastic Compute Cloud User\n Guide.

", + "smithy.api#documentation": "

The CPU options for the instance. For more information, see Optimize CPU options in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "cpuOptions" } }, @@ -90652,7 +90933,7 @@ "target": "com.amazonaws.ec2#LaunchTemplateHibernationOptions", "traits": { "aws.protocols#ec2QueryName": "HibernationOptions", - "smithy.api#documentation": "

Indicates whether an instance is configured for hibernation. For more information, see\n Hibernate\n your instance in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

Indicates whether an instance is configured for hibernation. For more information, see\n Hibernate\n your Amazon EC2 instance in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "hibernationOptions" } }, @@ -90660,7 +90941,7 @@ "target": "com.amazonaws.ec2#LaunchTemplateInstanceMetadataOptions", "traits": { "aws.protocols#ec2QueryName": "MetadataOptions", - "smithy.api#documentation": "

The metadata options for the instance. For more information, see Instance metadata and user data in the\n Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

The metadata options for the instance. For more information, see Instance metadata and user data in the\n Amazon EC2 User Guide.

", "smithy.api#xmlName": "metadataOptions" } }, @@ -90700,7 +90981,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DisableApiStop", - "smithy.api#documentation": "

Indicates whether the instance is enabled for stop protection. For more information,\n see Stop\n protection in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

Indicates whether the instance is enabled for stop protection. For more information,\n see Enable stop protection for your instance in the\n Amazon EC2 User Guide.

", "smithy.api#xmlName": "disableApiStop" } } @@ -91665,7 +91946,7 @@ "target": "com.amazonaws.ec2#RouteTableAssociationList", "traits": { "aws.protocols#ec2QueryName": "AssociationSet", - "smithy.api#documentation": "

The associations between the route table and one or more subnets or a gateway.

", + "smithy.api#documentation": "

The associations between the route table and your subnets or gateways.

", "smithy.api#xmlName": "associationSet" } }, @@ -91995,7 +92276,7 @@ "target": "com.amazonaws.ec2#Reservation" }, "traits": { - "smithy.api#documentation": "

Launches the specified number of instances using an AMI for which you have\n permissions.

\n

You can specify a number of options, or leave the default options. The following rules\n apply:

\n
    \n
  • \n

    If you don't specify a subnet ID, we choose a default subnet from\n your default VPC for you. If you don't have a default VPC, you must specify a\n subnet ID in the request.

    \n
  • \n
  • \n

    All instances have a network interface with a primary private IPv4\n address. If you don't specify this address, we choose one from the IPv4 range of\n your subnet.

    \n
  • \n
  • \n

    Not all instance types support IPv6 addresses. For more information, see\n Instance\n types.

    \n
  • \n
  • \n

    If you don't specify a security group ID, we use the default security group.\n For more information, see Security\n groups.

    \n
  • \n
  • \n

    If any of the AMIs have a product code attached for which the user has not\n subscribed, the request fails.

    \n
  • \n
\n

You can create a launch template,\n which is a resource that contains the parameters to launch an instance. When you launch\n an instance using RunInstances, you can specify the launch template\n instead of specifying the launch parameters.

\n

To ensure faster instance launches, break up large requests into smaller batches. For\n example, create five separate launch requests for 100 instances each instead of one\n launch request for 500 instances.

\n

An instance is ready for you to use when it's in the running state. You\n can check the state of your instance using DescribeInstances. You can\n tag instances and EBS volumes during launch, after launch, or both. For more\n information, see CreateTags and Tagging your Amazon EC2\n resources.

\n

Linux instances have access to the public key of the key pair at boot. You can use\n this key to provide secure access to the instance. Amazon EC2 public images use this\n feature to provide secure access without passwords. For more information, see Key\n pairs.

\n

For troubleshooting, see What to do if\n an instance immediately terminates, and Troubleshooting connecting to your instance.

", + "smithy.api#documentation": "

Launches the specified number of instances using an AMI for which you have\n permissions.

\n

You can specify a number of options, or leave the default options. The following rules\n apply:

\n
    \n
  • \n

    If you don't specify a subnet ID, we choose a default subnet from\n your default VPC for you. If you don't have a default VPC, you must specify a\n subnet ID in the request.

    \n
  • \n
  • \n

    All instances have a network interface with a primary private IPv4\n address. If you don't specify this address, we choose one from the IPv4 range of\n your subnet.

    \n
  • \n
  • \n

    Not all instance types support IPv6 addresses. For more information, see\n Instance\n types.

    \n
  • \n
  • \n

    If you don't specify a security group ID, we use the default security group\n for the VPC. For more information, see Security\n groups.

    \n
  • \n
  • \n

    If any of the AMIs have a product code attached for which the user has not\n subscribed, the request fails.

    \n
  • \n
\n

You can create a launch template,\n which is a resource that contains the parameters to launch an instance. When you launch\n an instance using RunInstances, you can specify the launch template\n instead of specifying the launch parameters.

\n

To ensure faster instance launches, break up large requests into smaller batches. For\n example, create five separate launch requests for 100 instances each instead of one\n launch request for 500 instances.

\n

\n RunInstances is subject to both request rate limiting and resource rate\n limiting. For more information, see Request throttling.

\n

An instance is ready for you to use when it's in the running state. You\n can check the state of your instance using DescribeInstances. You can\n tag instances and EBS volumes during launch, after launch, or both. For more\n information, see CreateTags and Tagging your Amazon EC2\n resources.

\n

Linux instances have access to the public key of the key pair at boot. You can use\n this key to provide secure access to the instance. Amazon EC2 public images use this\n feature to provide secure access without passwords. For more information, see Key\n pairs.

\n

For troubleshooting, see What to do if\n an instance immediately terminates, and Troubleshooting connecting to your instance.

", "smithy.api#examples": [ { "title": "To launch an instance", @@ -92072,7 +92353,7 @@ "InstanceType": { "target": "com.amazonaws.ec2#InstanceType", "traits": { - "smithy.api#documentation": "

The instance type. For more information, see Instance types in the\n Amazon EC2 User Guide.

" + "smithy.api#documentation": "

The instance type. For more information, see Amazon EC2 instance\n types in the Amazon EC2 User Guide.

" } }, "Ipv6AddressCount": { @@ -92104,7 +92385,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The maximum number of instances to launch. If you specify more instances than Amazon\n EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible\n number of instances above MinCount.

\n

Constraints: Between 1 and the maximum number you're allowed for the specified\n instance type. For more information about the default limits, and how to request an\n increase, see How many instances can I\n run in Amazon EC2 in the Amazon EC2 FAQ.

", + "smithy.api#documentation": "

The maximum number of instances to launch. If you specify a value that is more\n capacity than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 \n launches the largest possible number of instances above the specified minimum\n count.

\n

Constraints: Between 1 and the quota for the specified instance type for your account for this Region. \n For more information, see Amazon EC2 instance type quotas.

", "smithy.api#required": {} } }, @@ -92112,7 +92393,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The minimum number of instances to launch. If you specify a minimum that is more\n instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2\n launches no instances.

\n

Constraints: Between 1 and the maximum number you're allowed for the specified\n instance type. For more information about the default limits, and how to request an\n increase, see How many instances can I\n run in Amazon EC2 in the Amazon EC2 General FAQ.

", + "smithy.api#documentation": "

The minimum number of instances to launch. If you specify a value that is more\n capacity than Amazon EC2 can provide in the target Availability Zone, Amazon EC2 does\n not launch any instances.

\n

Constraints: Between 1 and the quota for the specified instance type for your account for this Region.\n For more information, see Amazon EC2 instance type quotas.

", "smithy.api#required": {} } }, @@ -92157,7 +92438,7 @@ "UserData": { "target": "com.amazonaws.ec2#RunInstancesUserData", "traits": { - "smithy.api#documentation": "

The user data script to make available to the instance. For more information, see\n Run\n commands on your Linux instance at launch and Run commands on your\n Windows instance at launch. If you are using a command line tool,\n base64-encoding is performed for you, and you can load the text from a file. Otherwise,\n you must provide base64-encoded text. User data is limited to 16 KB.

" + "smithy.api#documentation": "

The user data script to make available to the instance. For more information, see\n Run\n commands on your Amazon EC2 instance at launch in the Amazon EC2 User\n Guide. If you are using a command line tool, base64-encoding is performed\n for you, and you can load the text from a file. Otherwise, you must provide\n base64-encoded text. User data is limited to 16 KB.

" } }, "AdditionalInfo": { @@ -92286,7 +92567,7 @@ "HibernationOptions": { "target": "com.amazonaws.ec2#HibernationOptionsRequest", "traits": { - "smithy.api#documentation": "

Indicates whether an instance is enabled for hibernation. This parameter is valid only\n if the instance meets the hibernation\n prerequisites. For more information, see Hibernate your instance in the\n Amazon EC2 User Guide.

\n

You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same\n instance.

" + "smithy.api#documentation": "

Indicates whether an instance is enabled for hibernation. This parameter is valid only\n if the instance meets the hibernation\n prerequisites. For more information, see Hibernate your Amazon EC2\n instance in the Amazon EC2 User Guide.

\n

You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same\n instance.

" } }, "LicenseSpecifications": { @@ -92352,7 +92633,7 @@ "target": "com.amazonaws.ec2#RunScheduledInstancesResult" }, "traits": { - "smithy.api#documentation": "

Launches the specified Scheduled Instances.

\n

Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances.

\n

You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, \n but you can terminate it as needed. If you terminate a Scheduled Instance before the current scheduled time period ends, \n you can launch it again after a few minutes. For more information, see Scheduled Instances\n in the Amazon EC2 User Guide.

" + "smithy.api#documentation": "

Launches the specified Scheduled Instances.

\n

Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances.

\n

You must launch a Scheduled Instance during its scheduled time period. You can't stop or\n reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a\n Scheduled Instance before the current scheduled time period ends, you can launch it again\n after a few minutes.

" } }, "com.amazonaws.ec2#RunScheduledInstancesRequest": { @@ -94065,7 +94346,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a\n kernel panic (on Linux instances), or a blue\n screen/stop error (on Windows instances). For\n instances based on Intel and AMD processors, the interrupt is received as a\n non-maskable interrupt (NMI).

\n

In general, the operating system crashes and reboots when a kernel panic or stop error\n is triggered. The operating system can also be configured to perform diagnostic tasks,\n such as generating a memory dump file, loading a secondary kernel, or obtaining a call\n trace.

\n

Before sending a diagnostic interrupt to your instance, ensure that its operating\n system is configured to perform the required diagnostic tasks.

\n

For more information about configuring your operating system to generate a crash dump\n when a kernel panic or stop error occurs, see Send a diagnostic interrupt\n (for advanced users) (Linux instances) or Send a diagnostic\n interrupt (for advanced users) (Windows instances).

" + "smithy.api#documentation": "

Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a\n kernel panic (on Linux instances), or a blue\n screen/stop error (on Windows instances). For\n instances based on Intel and AMD processors, the interrupt is received as a\n non-maskable interrupt (NMI).

\n

In general, the operating system crashes and reboots when a kernel panic or stop error\n is triggered. The operating system can also be configured to perform diagnostic tasks,\n such as generating a memory dump file, loading a secondary kernel, or obtaining a call\n trace.

\n

Before sending a diagnostic interrupt to your instance, ensure that its operating\n system is configured to perform the required diagnostic tasks.

\n

For more information about configuring your operating system to generate a crash dump\n when a kernel panic or stop error occurs, see Send a diagnostic interrupt\n (for advanced users) in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#SendDiagnosticInterruptRequest": { @@ -94568,7 +94849,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "KmsKeyId", - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key that was used to protect the\n volume encryption key for the parent volume.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that was used to protect the\n volume encryption key for the parent volume.

", "smithy.api#xmlName": "kmsKeyId" } }, @@ -94616,7 +94897,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "StatusMessage", - "smithy.api#documentation": "

Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails\n (for example, if the proper Key Management Service (KMS) permissions are not obtained) this field displays error\n state details to help you diagnose why the error occurred. This parameter is only returned by\n DescribeSnapshots.

", + "smithy.api#documentation": "

Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails\n (for example, if the proper KMS permissions are not obtained) this field displays error\n state details to help you diagnose why the error occurred. This parameter is only returned by\n DescribeSnapshots.

", "smithy.api#xmlName": "statusMessage" } }, @@ -95340,7 +95621,7 @@ } }, "traits": { - "smithy.api#documentation": "

The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your\n Spot Instance is at an elevated risk of being interrupted. For more information, see\n Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances.

" + "smithy.api#documentation": "

The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your\n Spot Instance is at an elevated risk of being interrupted. For more information, see\n Capacity\n rebalancing in the Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#SpotDatafeedSubscription": { @@ -95630,7 +95911,7 @@ "target": "com.amazonaws.ec2#AllocationStrategy", "traits": { "aws.protocols#ec2QueryName": "AllocationStrategy", - "smithy.api#documentation": "

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance\n pools specified by the Spot Fleet launch configuration. For more information, see Allocation\n strategies for Spot Instances in the Amazon EC2 User Guide.

\n
\n
priceCapacityOptimized (recommended)
\n
\n

Spot Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. Spot Fleet then requests Spot Instances from the lowest priced of these pools.

\n
\n
capacityOptimized
\n
\n

Spot Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. To give certain\n instance types a higher chance of launching first, use\n capacityOptimizedPrioritized. Set a priority for each instance type by\n using the Priority parameter for LaunchTemplateOverrides. You can\n assign the same priority to different LaunchTemplateOverrides. EC2 implements\n the priorities on a best-effort basis, but optimizes for capacity first.\n capacityOptimizedPrioritized is supported only if your Spot Fleet uses a\n launch template. Note that if the OnDemandAllocationStrategy is set to\n prioritized, the same priority is applied when fulfilling On-Demand\n capacity.

\n
\n
diversified
\n
\n

Spot Fleet requests instances from all of the Spot Instance pools that you\n specify.

\n
\n
lowestPrice
\n
\n

Spot Fleet requests instances from the lowest priced Spot Instance pool that\n has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances\n come from the next lowest priced pool that has available capacity. If a pool runs out of\n capacity before fulfilling your desired capacity, Spot Fleet will continue to fulfill your\n request by drawing from the next lowest priced pool. To ensure that your desired capacity is\n met, you might receive Spot Instances from several pools. Because this strategy only considers instance \n price and not capacity availability, it might lead to high interruption rates.

\n
\n
\n

Default: lowestPrice\n

", + "smithy.api#documentation": "

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance\n pools specified by the Spot Fleet launch configuration. For more information, see Allocation\n strategies for Spot Instances in the Amazon EC2 User Guide.

\n
\n
priceCapacityOptimized (recommended)
\n
\n

Spot Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. Spot Fleet then requests Spot Instances from the lowest priced of these pools.

\n
\n
capacityOptimized
\n
\n

Spot Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. To give certain\n instance types a higher chance of launching first, use\n capacityOptimizedPrioritized. Set a priority for each instance type by\n using the Priority parameter for LaunchTemplateOverrides. You can\n assign the same priority to different LaunchTemplateOverrides. EC2 implements\n the priorities on a best-effort basis, but optimizes for capacity first.\n capacityOptimizedPrioritized is supported only if your Spot Fleet uses a\n launch template. Note that if the OnDemandAllocationStrategy is set to\n prioritized, the same priority is applied when fulfilling On-Demand\n capacity.

\n
\n
diversified
\n
\n

Spot Fleet requests instances from all of the Spot Instance pools that you\n specify.

\n
\n
lowestPrice (not recommended)
\n
\n \n

We don't recommend the lowestPrice allocation strategy because\n it has the highest risk of interruption for your Spot Instances.

\n
\n

Spot Fleet requests instances from the lowest priced Spot Instance pool that has available\n capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances\n come from the next lowest priced pool that has available capacity. If a pool runs\n out of capacity before fulfilling your desired capacity, Spot Fleet will continue to\n fulfill your request by drawing from the next lowest priced pool. To ensure that\n your desired capacity is met, you might receive Spot Instances from several pools. Because\n this strategy only considers instance price and not capacity availability, it\n might lead to high interruption rates.

\n
\n
\n

Default: lowestPrice\n

", "smithy.api#xmlName": "allocationStrategy" } }, @@ -95687,7 +95968,7 @@ "traits": { "aws.protocols#ec2QueryName": "IamFleetRole", "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that\n grants the Spot Fleet the permission to request, launch, terminate, and tag instances on\n your behalf. For more information, see Spot\n Fleet prerequisites in the Amazon EC2 User Guide. Spot Fleet\n can terminate Spot Instances on your behalf when you cancel its Spot Fleet request using\n CancelSpotFleetRequests or when the Spot Fleet request expires, if you set\n TerminateInstancesWithExpiration.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role\n that grants the Spot Fleet the permission to request, launch, terminate, and tag instances\n on your behalf. For more information, see Spot\n Fleet prerequisites in the Amazon EC2 User Guide. Spot Fleet can\n terminate Spot Instances on your behalf when you cancel its Spot Fleet request using CancelSpotFleetRequests or when the Spot Fleet request expires, if you set\n TerminateInstancesWithExpiration.

", "smithy.api#required": {}, "smithy.api#xmlName": "iamFleetRole" } @@ -95738,7 +96019,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "OnDemandMaxTotalPrice", - "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay. You\n can use the onDemandMaxTotalPrice parameter, the\n spotMaxTotalPrice parameter, or both parameters to ensure that your\n fleet cost does not exceed your budget. If you set a maximum price per hour for the\n On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the\n maximum amount you're willing to pay. When the maximum amount you're willing to pay is\n reached, the fleet stops launching instances even if it hasn’t met the target\n capacity.

\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The onDemandMaxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for onDemandMaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
", + "smithy.api#documentation": "

The maximum amount per hour for On-Demand Instances that you're willing to pay. You\n can use the onDemandMaxTotalPrice parameter, the\n spotMaxTotalPrice parameter, or both parameters to ensure that your\n fleet cost does not exceed your budget. If you set a maximum price per hour for the\n On-Demand Instances and Spot Instances in your request, Spot Fleet will launch instances until it reaches the\n maximum amount you're willing to pay. When the maximum amount you're willing to pay is\n reached, the fleet stops launching instances even if it hasn’t met the target\n capacity.

\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The onDemandMaxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for onDemandMaxTotalPrice. For more information, see Surplus credits can incur charges in the\n Amazon EC2 User Guide.

\n
", "smithy.api#xmlName": "onDemandMaxTotalPrice" } }, @@ -95746,7 +96027,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "SpotMaxTotalPrice", - "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. You can use\n the spotMaxTotalPrice parameter, the onDemandMaxTotalPrice\n parameter, or both parameters to ensure that your fleet cost does not exceed your budget.\n If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will\n launch instances until it reaches the maximum amount you're willing to pay. When the\n maximum amount you're willing to pay is reached, the fleet stops launching instances even\n if it hasn’t met the target capacity.

\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The spotMaxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for spotMaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
", + "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. You can use\n the spotMaxTotalPrice parameter, the onDemandMaxTotalPrice\n parameter, or both parameters to ensure that your fleet cost does not exceed your budget.\n If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, Spot Fleet will\n launch instances until it reaches the maximum amount you're willing to pay. When the\n maximum amount you're willing to pay is reached, the fleet stops launching instances even\n if it hasn’t met the target capacity.

\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The spotMaxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for spotMaxTotalPrice. For more information, see Surplus credits can incur charges in the\n Amazon EC2 User Guide.

\n
", "smithy.api#xmlName": "spotMaxTotalPrice" } }, @@ -96022,7 +96303,7 @@ "target": "com.amazonaws.ec2#SpotInstanceState", "traits": { "aws.protocols#ec2QueryName": "State", - "smithy.api#documentation": "

The state of the Spot Instance request. Spot request status information helps track your Spot\n Instance requests. For more information, see Spot request status in the\n Amazon EC2 User Guide for Linux Instances.

", + "smithy.api#documentation": "

The state of the Spot Instance request. Spot request status information helps track your Spot\n Instance requests. For more information, see Spot request status in the\n Amazon EC2 User Guide.

", "smithy.api#xmlName": "state" } }, @@ -96172,7 +96453,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Code", - "smithy.api#documentation": "

The status code. For a list of status codes, see Spot request status codes in the Amazon EC2 User Guide for Linux Instances.

", + "smithy.api#documentation": "

The status code. For a list of status codes, see Spot request status codes in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "code" } }, @@ -96221,7 +96502,7 @@ "target": "com.amazonaws.ec2#SpotCapacityRebalance", "traits": { "aws.protocols#ec2QueryName": "CapacityRebalance", - "smithy.api#documentation": "

The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your\n Spot Instance is at an elevated risk of being interrupted. For more information, see\n Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances.

", + "smithy.api#documentation": "

The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your\n Spot Instance is at an elevated risk of being interrupted. For more information, see\n Capacity\n rebalancing in the Amazon EC2 User Guide.

", "smithy.api#xmlName": "capacityRebalance" } } @@ -96275,7 +96556,7 @@ "target": "com.amazonaws.ec2#SpotAllocationStrategy", "traits": { "aws.protocols#ec2QueryName": "AllocationStrategy", - "smithy.api#documentation": "

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance\n pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the\n Amazon EC2 User Guide.

\n
\n
price-capacity-optimized (recommended)
\n
\n

EC2 Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools.

\n
\n
capacity-optimized
\n
\n

EC2 Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. To give certain\n instance types a higher chance of launching first, use\n capacity-optimized-prioritized. Set a priority for each instance type by\n using the Priority parameter for LaunchTemplateOverrides. You can\n assign the same priority to different LaunchTemplateOverrides. EC2 implements\n the priorities on a best-effort basis, but optimizes for capacity first.\n capacity-optimized-prioritized is supported only if your EC2 Fleet uses a\n launch template. Note that if the On-Demand AllocationStrategy is set to\n prioritized, the same priority is applied when fulfilling On-Demand\n capacity.

\n
\n
diversified
\n
\n

EC2 Fleet requests instances from all of the Spot Instance pools that you\n specify.

\n
\n
lowest-price
\n
\n

EC2 Fleet requests instances from the lowest priced Spot Instance pool that\n has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances\n come from the next lowest priced pool that has available capacity. If a pool runs out of\n capacity before fulfilling your desired capacity, EC2 Fleet will continue to fulfill your\n request by drawing from the next lowest priced pool. To ensure that your desired capacity is\n met, you might receive Spot Instances from several pools. Because this strategy only considers instance \n price and not capacity availability, it might lead to high interruption rates.

\n
\n
\n

Default: lowest-price\n

", + "smithy.api#documentation": "

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance\n pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the\n Amazon EC2 User Guide.

\n
\n
price-capacity-optimized (recommended)
\n
\n

EC2 Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools.

\n
\n
capacity-optimized
\n
\n

EC2 Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. To give certain\n instance types a higher chance of launching first, use\n capacity-optimized-prioritized. Set a priority for each instance type by\n using the Priority parameter for LaunchTemplateOverrides. You can\n assign the same priority to different LaunchTemplateOverrides. EC2 implements\n the priorities on a best-effort basis, but optimizes for capacity first.\n capacity-optimized-prioritized is supported only if your EC2 Fleet uses a\n launch template. Note that if the On-Demand AllocationStrategy is set to\n prioritized, the same priority is applied when fulfilling On-Demand\n capacity.

\n
\n
diversified
\n
\n

EC2 Fleet requests instances from all of the Spot Instance pools that you\n specify.

\n
\n
lowest-price (not recommended)
\n
\n \n

We don't recommend the lowest-price allocation strategy because\n it has the highest risk of interruption for your Spot Instances.

\n
\n

EC2 Fleet requests instances from the lowest priced Spot Instance pool that has available\n capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances\n come from the next lowest priced pool that has available capacity. If a pool runs\n out of capacity before fulfilling your desired capacity, EC2 Fleet will continue to\n fulfill your request by drawing from the next lowest priced pool. To ensure that\n your desired capacity is met, you might receive Spot Instances from several pools. Because\n this strategy only considers instance price and not capacity availability, it\n might lead to high interruption rates.

\n
\n
\n

Default: lowest-price\n

", "smithy.api#xmlName": "allocationStrategy" } }, @@ -96323,7 +96604,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "aws.protocols#ec2QueryName": "MinTargetCapacity", - "smithy.api#documentation": "

The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is\n not reached, the fleet launches no instances.

\n

Supported only for fleets of type instant.

\n

At least one of the following must be specified: SingleAvailabilityZone |\n SingleInstanceType\n

", + "smithy.api#documentation": "

The minimum target capacity for Spot Instances in the fleet. If this minimum capacity isn't\n reached, no instances are launched.

\n

Constraints: Maximum value of 1000. Supported only for fleets of type\n instant.

\n

At least one of the following must be specified: SingleAvailabilityZone |\n SingleInstanceType\n

", "smithy.api#xmlName": "minTargetCapacity" } }, @@ -96331,7 +96612,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "MaxTotalPrice", - "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend\n using this parameter because it can lead to increased interruptions. If you do not specify\n this parameter, you will pay the current Spot price.

\n \n

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

\n
\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The maxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for maxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
", + "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend\n using this parameter because it can lead to increased interruptions. If you do not specify\n this parameter, you will pay the current Spot price.

\n \n

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

\n
\n \n

If your fleet includes T instances that are configured as unlimited, and\n if their average CPU usage exceeds the baseline utilization, you will incur a charge for\n surplus credits. The maxTotalPrice does not account for surplus credits,\n and, if you use surplus credits, your final cost might be higher than what you specified\n for maxTotalPrice. For more information, see Surplus credits can incur charges in the\n Amazon EC2 User Guide.

\n
", "smithy.api#xmlName": "maxTotalPrice" } } @@ -96346,7 +96627,7 @@ "AllocationStrategy": { "target": "com.amazonaws.ec2#SpotAllocationStrategy", "traits": { - "smithy.api#documentation": "

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance\n pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the\n Amazon EC2 User Guide.

\n
\n
price-capacity-optimized (recommended)
\n
\n

EC2 Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools.

\n
\n
capacity-optimized
\n
\n

EC2 Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. To give certain\n instance types a higher chance of launching first, use\n capacity-optimized-prioritized. Set a priority for each instance type by\n using the Priority parameter for LaunchTemplateOverrides. You can\n assign the same priority to different LaunchTemplateOverrides. EC2 implements\n the priorities on a best-effort basis, but optimizes for capacity first.\n capacity-optimized-prioritized is supported only if your EC2 Fleet uses a\n launch template. Note that if the On-Demand AllocationStrategy is set to\n prioritized, the same priority is applied when fulfilling On-Demand\n capacity.

\n
\n
diversified
\n
\n

EC2 Fleet requests instances from all of the Spot Instance pools that you\n specify.

\n
\n
lowest-price
\n
\n

EC2 Fleet requests instances from the lowest priced Spot Instance pool that\n has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances\n come from the next lowest priced pool that has available capacity. If a pool runs out of\n capacity before fulfilling your desired capacity, EC2 Fleet will continue to fulfill your\n request by drawing from the next lowest priced pool. To ensure that your desired capacity is\n met, you might receive Spot Instances from several pools. Because this strategy only considers instance \n price and not capacity availability, it might lead to high interruption rates.

\n
\n
\n

Default: lowest-price\n

" + "smithy.api#documentation": "

The strategy that determines how to allocate the target Spot Instance capacity across the Spot Instance\n pools specified by the EC2 Fleet launch configuration. For more information, see Allocation strategies for Spot Instances in the\n Amazon EC2 User Guide.

\n
\n
price-capacity-optimized (recommended)
\n
\n

EC2 Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. EC2 Fleet then requests Spot Instances from the lowest priced of these pools.

\n
\n
capacity-optimized
\n
\n

EC2 Fleet identifies the pools with \n the highest capacity availability for the number of instances that are launching. This means \n that we will request Spot Instances from the pools that we believe have the lowest chance of interruption \n in the near term. To give certain\n instance types a higher chance of launching first, use\n capacity-optimized-prioritized. Set a priority for each instance type by\n using the Priority parameter for LaunchTemplateOverrides. You can\n assign the same priority to different LaunchTemplateOverrides. EC2 implements\n the priorities on a best-effort basis, but optimizes for capacity first.\n capacity-optimized-prioritized is supported only if your EC2 Fleet uses a\n launch template. Note that if the On-Demand AllocationStrategy is set to\n prioritized, the same priority is applied when fulfilling On-Demand\n capacity.

\n
\n
diversified
\n
\n

EC2 Fleet requests instances from all of the Spot Instance pools that you\n specify.

\n
\n
lowest-price (not recommended)
\n
\n \n

We don't recommend the lowest-price allocation strategy because\n it has the highest risk of interruption for your Spot Instances.

\n
\n

EC2 Fleet requests instances from the lowest priced Spot Instance pool that\n has available capacity. If the lowest priced pool doesn't have available capacity, the Spot Instances\n come from the next lowest priced pool that has available capacity. If a pool runs out of\n capacity before fulfilling your desired capacity, EC2 Fleet will continue to fulfill your\n request by drawing from the next lowest priced pool. To ensure that your desired capacity is\n met, you might receive Spot Instances from several pools. Because this strategy only considers instance \n price and not capacity availability, it might lead to high interruption rates.

\n
\n
\n

Default: lowest-price\n

" } }, "MaintenanceStrategies": { @@ -96382,13 +96663,13 @@ "MinTargetCapacity": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is\n not reached, the fleet launches no instances.

\n

Supported only for fleets of type instant.

\n

At least one of the following must be specified: SingleAvailabilityZone |\n SingleInstanceType\n

" + "smithy.api#documentation": "

The minimum target capacity for Spot Instances in the fleet. If this minimum capacity isn't\n reached, no instances are launched.

\n

Constraints: Maximum value of 1000. Supported only for fleets of type\n instant.

\n

At least one of the following must be specified: SingleAvailabilityZone |\n SingleInstanceType\n

" } }, "MaxTotalPrice": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend\n using this parameter because it can lead to increased interruptions. If you do not specify\n this parameter, you will pay the current Spot price.

\n \n

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

\n
\n \n

If your fleet includes T instances that are configured as unlimited,\n and if their average CPU usage exceeds the baseline utilization, you will incur a charge\n for surplus credits. The MaxTotalPrice does not account for surplus\n credits, and, if you use surplus credits, your final cost might be higher than what you\n specified for MaxTotalPrice. For more information, see Surplus credits can incur charges in the EC2 User\n Guide.

\n
" + "smithy.api#documentation": "

The maximum amount per hour for Spot Instances that you're willing to pay. We do not recommend\n using this parameter because it can lead to increased interruptions. If you do not specify\n this parameter, you will pay the current Spot price.

\n \n

If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter.

\n
\n \n

If your fleet includes T instances that are configured as unlimited, and\n if their average CPU usage exceeds the baseline utilization, you will incur a charge for\n surplus credits. The MaxTotalPrice does not account for surplus credits,\n and, if you use surplus credits, your final cost might be higher than what you specified\n for MaxTotalPrice. For more information, see Surplus credits can incur charges in the\n Amazon EC2 User Guide.

\n
" } } }, @@ -96700,7 +96981,7 @@ "target": "com.amazonaws.ec2#StartInstancesResult" }, "traits": { - "smithy.api#documentation": "

Starts an Amazon EBS-backed instance that you've previously stopped.

\n

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and\n started. When an instance is stopped, the compute resources are released and you are not\n billed for instance usage. However, your root partition Amazon EBS volume remains and\n continues to persist your data, and you are charged for Amazon EBS volume usage. You can\n restart your instance at any time. Every time you start your instance, Amazon EC2\n charges a one-minute minimum for instance usage, and thereafter charges per second for\n instance usage.

\n

Before stopping an instance, make sure it is in a state from which it can be\n restarted. Stopping an instance does not preserve data stored in RAM.

\n

Performing this operation on an instance that uses an instance store as its root\n device returns an error.

\n

If you attempt to start a T3 instance with host tenancy and the\n unlimited CPU credit option, the request fails. The\n unlimited CPU credit option is not supported on Dedicated Hosts. Before\n you start the instance, either change its CPU credit option to standard, or\n change its tenancy to default or dedicated.

\n

For more information, see Stop and start your instance\n in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

Starts an Amazon EBS-backed instance that you've previously stopped.

\n

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and\n started. When an instance is stopped, the compute resources are released and you are not\n billed for instance usage. However, your root partition Amazon EBS volume remains and\n continues to persist your data, and you are charged for Amazon EBS volume usage. You can\n restart your instance at any time. Every time you start your instance, Amazon EC2\n charges a one-minute minimum for instance usage, and thereafter charges per second for\n instance usage.

\n

Before stopping an instance, make sure it is in a state from which it can be\n restarted. Stopping an instance does not preserve data stored in RAM.

\n

Performing this operation on an instance that uses an instance store as its root\n device returns an error.

\n

If you attempt to start a T3 instance with host tenancy and the\n unlimited CPU credit option, the request fails. The\n unlimited CPU credit option is not supported on Dedicated Hosts. Before\n you start the instance, either change its CPU credit option to standard, or\n change its tenancy to default or dedicated.

\n

For more information, see Stop and start Amazon EC2\n instances in the Amazon EC2 User Guide.

", "smithy.api#examples": [ { "title": "To start a stopped EC2 instance", @@ -96818,7 +97099,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, \n see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, \n see How to ensure idempotency.

", "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } @@ -96898,7 +97179,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, \n see How to ensure idempotency.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, \n see How to ensure idempotency.

", "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } @@ -97151,7 +97432,7 @@ "target": "com.amazonaws.ec2#StopInstancesResult" }, "traits": { - "smithy.api#documentation": "

Stops an Amazon EBS-backed instance. For more information, see Stop and start\n your instance in the Amazon EC2 User Guide.

\n

You can use the Stop action to hibernate an instance if the instance is enabled for\n hibernation and it meets the hibernation\n prerequisites. For more information, see Hibernate your instance in the\n Amazon EC2 User Guide.

\n

We don't charge usage for a stopped instance, or data transfer fees; however, your\n root partition Amazon EBS volume remains and continues to persist your data, and you are\n charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2\n charges a one-minute minimum for instance usage, and thereafter charges per second for\n instance usage.

\n

You can't stop or hibernate instance store-backed instances. You can't use the Stop\n action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate\n Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the\n Amazon EC2 User Guide.

\n

When you stop or hibernate an instance, we shut it down. You can restart your instance\n at any time. Before stopping or hibernating an instance, make sure it is in a state from\n which it can be restarted. Stopping an instance does not preserve data stored in RAM,\n but hibernating an instance does preserve data stored in RAM. If an instance cannot\n hibernate successfully, a normal shutdown occurs.

\n

Stopping and hibernating an instance is different to rebooting or terminating it. For\n example, when you stop or hibernate an instance, the root device and any other devices\n attached to the instance persist. When you terminate an instance, the root device and\n any other devices attached during the instance launch are automatically deleted. For\n more information about the differences between rebooting, stopping, hibernating, and\n terminating instances, see Instance lifecycle\n in the Amazon EC2 User Guide.

\n

When you stop an instance, we attempt to shut it down forcibly after a short while. If\n your instance appears stuck in the stopping state after a period of time, there may be\n an issue with the underlying host computer. For more information, see Troubleshoot\n stopping your instance in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

Stops an Amazon EBS-backed instance. For more information, see Stop and start\n Amazon EC2 instances in the Amazon EC2 User\n Guide.

\n

You can use the Stop action to hibernate an instance if the instance is enabled\n for hibernation and it meets the hibernation\n prerequisites. For more information, see Hibernate your Amazon EC2\n instance in the Amazon EC2 User Guide.

\n

We don't charge usage for a stopped instance, or data transfer fees; however, your\n root partition Amazon EBS volume remains and continues to persist your data, and you are\n charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2\n charges a one-minute minimum for instance usage, and thereafter charges per second for\n instance usage.

\n

You can't stop or hibernate instance store-backed instances. You can't use the Stop\n action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate\n Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the\n Amazon EC2 User Guide.

\n

When you stop or hibernate an instance, we shut it down. You can restart your instance\n at any time. Before stopping or hibernating an instance, make sure it is in a state from\n which it can be restarted. Stopping an instance does not preserve data stored in RAM,\n but hibernating an instance does preserve data stored in RAM. If an instance cannot\n hibernate successfully, a normal shutdown occurs.

\n

Stopping and hibernating an instance is different to rebooting or terminating it. For\n example, when you stop or hibernate an instance, the root device and any other devices\n attached to the instance persist. When you terminate an instance, the root device and\n any other devices attached during the instance launch are automatically deleted. For\n more information about the differences between rebooting, stopping, hibernating, and\n terminating instances, see Instance lifecycle\n in the Amazon EC2 User Guide.

\n

When you stop an instance, we attempt to shut it down forcibly after a short while. If\n your instance appears stuck in the stopping state after a period of time, there may be\n an issue with the underlying host computer. For more information, see Troubleshoot\n stopping your instance in the Amazon EC2 User Guide.

", "smithy.api#examples": [ { "title": "To stop a running EC2 instance", @@ -99080,6 +99361,14 @@ "smithy.api#documentation": "

The description of the Traffic Mirror rule.

", "smithy.api#xmlName": "description" } + }, + "Tags": { + "target": "com.amazonaws.ec2#TagList", + "traits": { + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "

Tags on Traffic Mirroring filter rules.

", + "smithy.api#xmlName": "tagSet" + } } }, "traits": { @@ -99121,6 +99410,15 @@ "target": "com.amazonaws.ec2#TrafficMirrorFilterRuleField" } }, + "com.amazonaws.ec2#TrafficMirrorFilterRuleIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#TrafficMirrorFilterRuleIdWithResolver", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, "com.amazonaws.ec2#TrafficMirrorFilterRuleIdWithResolver": { "type": "string" }, @@ -99133,6 +99431,15 @@ } } }, + "com.amazonaws.ec2#TrafficMirrorFilterRuleSet": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#TrafficMirrorFilterRule", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, "com.amazonaws.ec2#TrafficMirrorFilterSet": { "type": "list", "member": { @@ -105054,7 +105361,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "KmsKeyId", - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Key Management Service (KMS) KMS key that was used to protect the\n volume encryption key for the volume.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that was used to protect the\n volume encryption key for the volume.

", "smithy.api#xmlName": "kmsKeyId" } }, diff --git a/models/ecr.json b/models/ecr.json index 79a2193878..7f10c77ad4 100644 --- a/models/ecr.json +++ b/models/ecr.json @@ -1905,7 +1905,7 @@ "upstreamRegistryUrl": { "target": "com.amazonaws.ecr#Url", "traits": { - "smithy.api#documentation": "

The registry URL of the upstream public registry to use as the source for the pull\n through cache rule. The following is the syntax to use for each supported upstream\n registry.

\n
    \n
  • \n

    Amazon ECR Public (ecr-public) - public.ecr.aws\n

    \n
  • \n
  • \n

    Docker Hub (docker-hub) -\n registry-1.docker.io\n

    \n
  • \n
  • \n

    Quay (quay) - quay.io\n

    \n
  • \n
  • \n

    Kubernetes (k8s) - registry.k8s.io\n

    \n
  • \n
  • \n

    GitHub Container Registry (github-container-registry) -\n ghcr.io\n

    \n
  • \n
  • \n

    Microsoft Azure Container Registry (azure-container-registry) -\n .azurecr.io\n

    \n
  • \n
", + "smithy.api#documentation": "

The registry URL of the upstream public registry to use as the source for the pull\n through cache rule. The following is the syntax to use for each supported upstream\n registry.

\n
    \n
  • \n

    Amazon ECR Public (ecr-public) - public.ecr.aws\n

    \n
  • \n
  • \n

    Docker Hub (docker-hub) -\n registry-1.docker.io\n

    \n
  • \n
  • \n

    Quay (quay) - quay.io\n

    \n
  • \n
  • \n

    Kubernetes (k8s) - registry.k8s.io\n

    \n
  • \n
  • \n

    GitHub Container Registry (github-container-registry) -\n ghcr.io\n

    \n
  • \n
  • \n

    Microsoft Azure Container Registry (azure-container-registry) -\n .azurecr.io\n

    \n
  • \n
  • \n

    GitLab Container Registry (gitlab-container-registry) -\n registry.gitlab.com\n

    \n
  • \n
", "smithy.api#required": {} } }, @@ -7711,6 +7711,12 @@ "traits": { "smithy.api#enumValue": "azure-container-registry" } + }, + "GitLabContainerRegistry": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "gitlab-container-registry" + } } } }, diff --git a/models/ecs.json b/models/ecs.json index c5ebc25fcc..c9ef466b67 100644 --- a/models/ecs.json +++ b/models/ecs.json @@ -1681,7 +1681,7 @@ } }, "traits": { - "smithy.api#documentation": "

The details of a capacity provider strategy. A capacity provider strategy can be set\n\t\t\twhen using the RunTask or CreateCluster APIs or as\n\t\t\tthe default capacity provider strategy for a cluster with the CreateCluster API.

\n

Only capacity providers that are already associated with a cluster and have an\n\t\t\t\tACTIVE or UPDATING status can be used in a capacity\n\t\t\tprovider strategy. The PutClusterCapacityProviders API is used to\n\t\t\tassociate a capacity provider with a cluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be used in a\n\t\t\tcapacity provider strategy.

\n

A capacity provider strategy may contain a maximum of 6 capacity providers.

" + "smithy.api#documentation": "

The details of a capacity provider strategy. A capacity provider strategy can be set\n\t\t\twhen using the RunTask or CreateCluster APIs or as\n\t\t\tthe default capacity provider strategy for a cluster with the CreateCluster API.

\n

Only capacity providers that are already associated with a cluster and have an\n\t\t\t\tACTIVE or UPDATING status can be used in a capacity\n\t\t\tprovider strategy. The PutClusterCapacityProviders API is used to\n\t\t\tassociate a capacity provider with a cluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be used in a\n\t\t\tcapacity provider strategy.

\n

With FARGATE_SPOT, you can run interruption\n\t\t\ttolerant tasks at a rate that's discounted compared to the FARGATE price.\n\t\t\t\tFARGATE_SPOT runs tasks on spare compute capacity. When Amazon Web Services needs the\n\t\t\tcapacity back, your tasks are interrupted with a two-minute warning.\n\t\t\t\tFARGATE_SPOT only supports Linux tasks with the X86_64 architecture on\n\t\t\tplatform version 1.3.0 or later.

\n

A capacity provider strategy may contain a maximum of 6 capacity providers.

" } }, "com.amazonaws.ecs#CapacityProviderStrategyItemBase": { @@ -1882,10 +1882,16 @@ "traits": { "smithy.api#documentation": "

The details of the execute command configuration.

" } + }, + "managedStorageConfiguration": { + "target": "com.amazonaws.ecs#ManagedStorageConfiguration", + "traits": { + "smithy.api#documentation": "

The details of the managed storage configuration.

" + } } }, "traits": { - "smithy.api#documentation": "

The execute command configuration for the cluster.

" + "smithy.api#documentation": "

The execute command and managed storage configuration for the cluster.

" } }, "com.amazonaws.ecs#ClusterContainsContainerInstancesException": { @@ -4163,6 +4169,12 @@ "traits": { "smithy.api#documentation": "

The details of the volume that was configuredAtLaunch. You can configure\n\t\t\tdifferent settings like the size, throughput, volumeType, and ecryption in ServiceManagedEBSVolumeConfiguration. The name of the volume\n\t\t\tmust match the name from the task definition.

" } + }, + "fargateEphemeralStorage": { + "target": "com.amazonaws.ecs#DeploymentEphemeralStorage", + "traits": { + "smithy.api#documentation": "

The Fargate ephemeral storage settings for the deployment.

" + } } }, "traits": { @@ -4294,6 +4306,20 @@ } } }, + "com.amazonaws.ecs#DeploymentEphemeralStorage": { + "type": "structure", + "members": { + "kmsKeyId": { + "target": "com.amazonaws.ecs#String", + "traits": { + "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the ephemeral storage for deployment.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The amount of ephemeral storage to allocate for the deployment.

" + } + }, "com.amazonaws.ecs#DeploymentRolloutState": { "type": "enum", "members": { @@ -6255,7 +6281,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object representing a container health check. Health check parameters that are\n\t\t\tspecified in a container definition override any Docker health checks that exist in the\n\t\t\tcontainer image (such as those specified in a parent image or from the image's\n\t\t\tDockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

\n \n

The Amazon ECS container agent only monitors and reports on the health checks specified\n\t\t\t\tin the task definition. Amazon ECS does not monitor Docker health checks that are\n\t\t\t\tembedded in a container image and not specified in the container definition. Health\n\t\t\t\tcheck parameters that are specified in a container definition override any Docker\n\t\t\t\thealth checks that exist in the container image.

\n
\n

You can view the health status of both individual containers and a task with the\n\t\t\tDescribeTasks API operation or when viewing the task details in the console.

\n

The health check is designed to make sure that your containers survive agent restarts,\n\t\t\tupgrades, or temporary unavailability.

\n

Amazon ECS performs health checks on containers with the default that launched the\n\t\t\tcontainer instance or the task.

\n

The following describes the possible healthStatus values for a\n\t\t\tcontainer:

\n
    \n
  • \n

    \n HEALTHY-The container health check has passed\n\t\t\t\t\tsuccessfully.

    \n
  • \n
  • \n

    \n UNHEALTHY-The container health check has failed.

    \n
  • \n
  • \n

    \n UNKNOWN-The container health check is being evaluated,\n\t\t\t\t\tthere's no container health check defined, or Amazon ECS doesn't have the health\n\t\t\t\t\tstatus of the container.

    \n
  • \n
\n

The following describes the possible healthStatus values based on the\n\t\t\tcontainer health checker status of essential containers in the task with the following\n\t\t\tpriority order (high to low):

\n
    \n
  • \n

    \n UNHEALTHY-One or more essential containers have failed\n\t\t\t\t\ttheir health check.

    \n
  • \n
  • \n

    \n UNKNOWN-Any essential container running within the task is\n\t\t\t\t\tin an UNKNOWN state and no other essential containers have an\n\t\t\t\t\t\tUNHEALTHY state.

    \n
  • \n
  • \n

    \n HEALTHY-All essential containers within the task have\n\t\t\t\t\tpassed their health checks.

    \n
  • \n
\n

Consider the following task health example with 2 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tHEALTHY, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tthe task health is UNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY,\n\t\t\t\t\tthe task health is HEALTHY.

    \n
  • \n
\n

Consider the following task health example with 3 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, and Container3 is UNKNOWN, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, and Container3 is HEALTHY, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tHEALTHY, and Container3 is HEALTHY, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tand Container3 is HEALTHY, the task health is\n\t\t\t\t\tUNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tand Container3 is UNKNOWN, the task health is\n\t\t\t\t\tUNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY,\n\t\t\t\t\tand Container3 is HEALTHY, the task health is\n\t\t\t\t\tHEALTHY.

    \n
  • \n
\n

If a task is run manually, and not as part of a service, the task will continue its\n\t\t\tlifecycle regardless of its health status. For tasks that are part of a service, if the\n\t\t\ttask reports as unhealthy then the task will be stopped and the service scheduler will\n\t\t\treplace it.

\n

The following are notes about container health check support:

\n
    \n
  • \n

    When the Amazon ECS agent cannot connect to the Amazon ECS service, the service reports\n\t\t\t\t\tthe container as UNHEALTHY.

    \n
  • \n
  • \n

    The health check statuses are the \"last heard from\" response from the Amazon ECS\n\t\t\t\t\tagent. There are no assumptions made about the status of the container health\n\t\t\t\t\tchecks.

    \n
  • \n
  • \n

    Container health checks require version 1.17.0 or greater of the Amazon ECS\n\t\t\t\t\tcontainer agent. For more information, see Updating the\n\t\t\t\t\t\tAmazon ECS container agent.

    \n
  • \n
  • \n

    Container health checks are supported for Fargate tasks if\n\t\t\t\t\tyou're using platform version 1.1.0 or greater. For more\n\t\t\t\t\tinformation, see Fargate\n\t\t\t\t\t\tplatform versions.

    \n
  • \n
  • \n

    Container health checks aren't supported for tasks that are part of a service\n\t\t\t\t\tthat's configured to use a Classic Load Balancer.

    \n
  • \n
" + "smithy.api#documentation": "

An object representing a container health check. Health check parameters that are\n\t\t\tspecified in a container definition override any Docker health checks that exist in the\n\t\t\tcontainer image (such as those specified in a parent image or from the image's\n\t\t\tDockerfile). This configuration maps to the HEALTHCHECK parameter of docker run.

\n \n

The Amazon ECS container agent only monitors and reports on the health checks specified\n\t\t\t\tin the task definition. Amazon ECS does not monitor Docker health checks that are\n\t\t\t\tembedded in a container image and not specified in the container definition. Health\n\t\t\t\tcheck parameters that are specified in a container definition override any Docker\n\t\t\t\thealth checks that exist in the container image.

\n
\n

You can view the health status of both individual containers and a task with the\n\t\t\tDescribeTasks API operation or when viewing the task details in the console.

\n

The health check is designed to make sure that your containers survive agent restarts,\n\t\t\tupgrades, or temporary unavailability.

\n

Amazon ECS performs health checks on containers with the default that launched the\n\t\t\tcontainer instance or the task.

\n

The following describes the possible healthStatus values for a\n\t\t\tcontainer:

\n
    \n
  • \n

    \n HEALTHY-The container health check has passed\n\t\t\t\t\tsuccessfully.

    \n
  • \n
  • \n

    \n UNHEALTHY-The container health check has failed.

    \n
  • \n
  • \n

    \n UNKNOWN-The container health check is being evaluated,\n\t\t\t\t\tthere's no container health check defined, or Amazon ECS doesn't have the health\n\t\t\t\t\tstatus of the container.

    \n
  • \n
\n

The following describes the possible healthStatus values based on the\n\t\t\tcontainer health checker status of essential containers in the task with the following\n\t\t\tpriority order (high to low):

\n
    \n
  • \n

    \n UNHEALTHY-One or more essential containers have failed\n\t\t\t\t\ttheir health check.

    \n
  • \n
  • \n

    \n UNKNOWN-Any essential container running within the task is\n\t\t\t\t\tin an UNKNOWN state and no other essential containers have an\n\t\t\t\t\t\tUNHEALTHY state.

    \n
  • \n
  • \n

    \n HEALTHY-All essential containers within the task have\n\t\t\t\t\tpassed their health checks.

    \n
  • \n
\n

Consider the following task health example with 2 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tHEALTHY, the task health is UNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tthe task health is UNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY,\n\t\t\t\t\tthe task health is HEALTHY.

    \n
  • \n
\n

Consider the following task health example with 3 containers.

\n
    \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, and Container3 is UNKNOWN, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tUNKNOWN, and Container3 is HEALTHY, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is UNHEALTHY and Container2 is\n\t\t\t\t\tHEALTHY, and Container3 is HEALTHY, the task health is\n\t\t\t\t\t\tUNHEALTHY.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tand Container3 is HEALTHY, the task health is\n\t\t\t\t\tUNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is UNKNOWN,\n\t\t\t\t\tand Container3 is UNKNOWN, the task health is\n\t\t\t\t\tUNKNOWN.

    \n
  • \n
  • \n

    If Container1 is HEALTHY and Container2 is HEALTHY,\n\t\t\t\t\tand Container3 is HEALTHY, the task health is\n\t\t\t\t\tHEALTHY.

    \n
  • \n
\n

If a task is run manually, and not as part of a service, the task will continue its\n\t\t\tlifecycle regardless of its health status. For tasks that are part of a service, if the\n\t\t\ttask reports as unhealthy then the task will be stopped and the service scheduler will\n\t\t\treplace it.

\n

The following are notes about container health check support:

\n
    \n
  • \n

    If the Amazon ECS container agent becomes disconnected from the Amazon ECS service, this won't\n\t\t\t\t\tcause a container to transition to an UNHEALTHY status. This is by design,\n\t\t\t\t\tto ensure that containers remain running during agent restarts or temporary\n\t\t\t\t\tunavailability. The health check status is the \"last heard from\" response from the Amazon ECS\n\t\t\t\t\tagent, so if the container was considered HEALTHY prior to the disconnect,\n\t\t\t\t\tthat status will remain until the agent reconnects and another health check occurs.\n\t\t\t\t\tThere are no assumptions made about the status of the container health checks.

    \n
  • \n
  • \n

    Container health checks require version 1.17.0 or greater of the Amazon ECS\n\t\t\t\t\tcontainer agent. For more information, see Updating the\n\t\t\t\t\t\tAmazon ECS container agent.

    \n
  • \n
  • \n

    Container health checks are supported for Fargate tasks if\n\t\t\t\t\tyou're using platform version 1.1.0 or greater. For more\n\t\t\t\t\tinformation, see Fargate\n\t\t\t\t\t\tplatform versions.

    \n
  • \n
  • \n

    Container health checks aren't supported for tasks that are part of a service\n\t\t\t\t\tthat's configured to use a Classic Load Balancer.

    \n
  • \n
" } }, "com.amazonaws.ecs#HealthStatus": { @@ -8002,6 +8028,26 @@ } } }, + "com.amazonaws.ecs#ManagedStorageConfiguration": { + "type": "structure", + "members": { + "kmsKeyId": { + "target": "com.amazonaws.ecs#String", + "traits": { + "smithy.api#documentation": "

Specify a Key Management Service key ID to encrypt the managed storage.

" + } + }, + "fargateEphemeralStorageKmsKeyId": { + "target": "com.amazonaws.ecs#String", + "traits": { + "smithy.api#documentation": "

Specify the Key Management Service key ID for the Fargate ephemeral storage.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The managed storage configuration for the cluster.

" + } + }, "com.amazonaws.ecs#ManagedTerminationProtection": { "type": "enum", "members": { @@ -9196,7 +9242,7 @@ "runtimePlatform": { "target": "com.amazonaws.ecs#RuntimePlatform", "traits": { - "smithy.api#documentation": "

The operating system that your tasks definitions run on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.

\n

When you specify a task definition in a service, this value must match the\n\t\t\t\truntimePlatform value of the service.

" + "smithy.api#documentation": "

The operating system that your tasks definitions run on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.

" } } }, @@ -9334,14 +9380,14 @@ "value": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The value for the specified resource type.

\n

If the GPU type is used, the value is the number of physical\n\t\t\t\tGPUs the Amazon ECS container agent reserves for the container. The number\n\t\t\tof GPUs that's reserved for all containers in a task can't exceed the number of\n\t\t\tavailable GPUs on the container instance that the task is launched on.

\n

If the InferenceAccelerator type is used, the value matches\n\t\t\tthe deviceName for an InferenceAccelerator specified in a task definition.

", + "smithy.api#documentation": "

The value for the specified resource type.

\n

When the type is GPU, the value is the number of physical GPUs the\n\t\t\tAmazon ECS container agent reserves for the container. The number of GPUs that's reserved for\n\t\t\tall containers in a task can't exceed the number of available GPUs on the container\n\t\t\tinstance that the task is launched on.

\n

When the type is InferenceAccelerator, the value matches\n\t\t\tthe deviceName for an InferenceAccelerator specified in a task definition.

", "smithy.api#required": {} } }, "type": { "target": "com.amazonaws.ecs#ResourceType", "traits": { - "smithy.api#documentation": "

The type of resource to assign to a container. The supported values are\n\t\t\t\tGPU or InferenceAccelerator.

", + "smithy.api#documentation": "

The type of resource to assign to a container.

", "smithy.api#required": {} } } @@ -10085,7 +10131,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object that represents the Amazon Web Services Private Certificate Authority certificate.

" + "smithy.api#documentation": "

The certificate root authority that secures your service.

" } }, "com.amazonaws.ecs#ServiceConnectTlsConfiguration": { @@ -10112,7 +10158,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object that represents the configuration for Service Connect TLS.

" + "smithy.api#documentation": "

The key that encrypts and decrypts your resources for Service Connect TLS.

" } }, "com.amazonaws.ecs#ServiceEvent": { @@ -10676,7 +10722,7 @@ } ], "traits": { - "smithy.api#documentation": "

Stops a running task. Any tags associated with the task will be deleted.

\n

When StopTask is called on a task, the equivalent of docker\n\t\t\t\tstop is issued to the containers running in the task. This results in a\n\t\t\t\tSIGTERM value and a default 30-second timeout, after which the\n\t\t\t\tSIGKILL value is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL value is sent.

\n \n

The default 30-second timeout can be configured on the Amazon ECS container agent with\n\t\t\t\tthe ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see\n\t\t\t\t\tAmazon ECS Container Agent Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

\n
" + "smithy.api#documentation": "

Stops a running task. Any tags associated with the task will be deleted.

\n

When StopTask is called on a task, the equivalent of docker\n\t\t\t\tstop is issued to the containers running in the task. This results in a\n\t\t\t\tSIGTERM value and a default 30-second timeout, after which the\n\t\t\t\tSIGKILL value is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL value is sent.

\n

For Windows containers, POSIX signals do not work and runtime stops the container by sending\n\t\t\ta CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown\n\t\t\t\tof (Windows) container #25982 on GitHub.

\n \n

The default 30-second timeout can be configured on the Amazon ECS container agent with\n\t\t\t\tthe ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see\n\t\t\t\t\tAmazon ECS Container Agent Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

\n
" } }, "com.amazonaws.ecs#StopTaskRequest": { @@ -11419,6 +11465,12 @@ "traits": { "smithy.api#documentation": "

The ephemeral storage settings for the task.

" } + }, + "fargateEphemeralStorage": { + "target": "com.amazonaws.ecs#TaskEphemeralStorage", + "traits": { + "smithy.api#documentation": "

The Fargate ephemeral storage settings for the task.

" + } } }, "traits": { @@ -11684,6 +11736,27 @@ } } }, + "com.amazonaws.ecs#TaskEphemeralStorage": { + "type": "structure", + "members": { + "sizeInGiB": { + "target": "com.amazonaws.ecs#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The total amount, in GiB, of the ephemeral storage to set for the task. The minimum \t\t\n\t\t\tsupported value is 20 GiB and the maximum supported value is\u2028 200 \n\t\t\tGiB.

" + } + }, + "kmsKeyId": { + "target": "com.amazonaws.ecs#String", + "traits": { + "smithy.api#documentation": "

Specify an Key Management Service key ID to encrypt the ephemeral storage for the task.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The amount of ephemeral storage to allocate for the task.

" + } + }, "com.amazonaws.ecs#TaskField": { "type": "enum", "members": { @@ -12013,6 +12086,12 @@ "traits": { "smithy.api#documentation": "

The metadata that you apply to the task set to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both.

\n

The following basic restrictions apply to tags:

\n
    \n
  • \n

    Maximum number of tags per resource - 50

    \n
  • \n
  • \n

    For each resource, each tag key must be unique, and each tag key can have only\n one value.

    \n
  • \n
  • \n

    Maximum key length - 128 Unicode characters in UTF-8

    \n
  • \n
  • \n

    Maximum value length - 256 Unicode characters in UTF-8

    \n
  • \n
  • \n

    If your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.

    \n
  • \n
  • \n

    Tag keys and values are case-sensitive.

    \n
  • \n
  • \n

    Do not use aws:, AWS:, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.

    \n
  • \n
" } + }, + "fargateEphemeralStorage": { + "target": "com.amazonaws.ecs#DeploymentEphemeralStorage", + "traits": { + "smithy.api#documentation": "

The Fargate ephemeral storage settings for the task set.

" + } } }, "traits": { diff --git a/models/efs.json b/models/efs.json index e40d01229c..c4eb1a8c29 100644 --- a/models/efs.json +++ b/models/efs.json @@ -1407,7 +1407,20 @@ "outputToken": "NextMarker", "items": "FileSystems", "pageSize": "MaxItems" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeFileSystemsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.efs#DescribeFileSystemsRequest": { @@ -2776,7 +2789,7 @@ "sdkId": "EFS", "arnNamespace": "elasticfilesystem", "cloudFormationName": "EFS", - "cloudTrailEventSource": "efs.amazonaws.com", + "cloudTrailEventSource": "elasticfilesystem.amazonaws.com", "docId": "elasticfilesystem-2015-02-01", "endpointPrefix": "elasticfilesystem" }, diff --git a/models/eks.json b/models/eks.json index 782f5ecdfb..d6916f4981 100644 --- a/models/eks.json +++ b/models/eks.json @@ -1592,6 +1592,12 @@ "traits": { "smithy.api#documentation": "

The configuration values that you provided.

" } + }, + "podIdentityAssociations": { + "target": "com.amazonaws.eks#StringList", + "traits": { + "smithy.api#documentation": "

An array of Pod Identity Assocations owned by the Addon. Each EKS Pod Identity association maps a role to a service account in a namespace in the cluster.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" + } } }, "traits": { @@ -1732,6 +1738,18 @@ "traits": { "smithy.api#enumValue": "K8sResourceNotFound" } + }, + "ADDON_SUBSCRIPTION_NEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AddonSubscriptionNeeded" + } + }, + "ADDON_PERMISSION_FAILURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AddonPermissionFailure" + } } } }, @@ -1741,6 +1759,60 @@ "target": "com.amazonaws.eks#AddonIssue" } }, + "com.amazonaws.eks#AddonPodIdentityAssociations": { + "type": "structure", + "members": { + "serviceAccount": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The name of a Kubernetes Service Account.

", + "smithy.api#required": {} + } + }, + "roleArn": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The ARN of an IAM Role.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A type of Pod Identity Association owned by an Amazon EKS Add-on.

\n

Each EKS Pod Identity Association maps a role to a service account in a namespace in the cluster.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" + } + }, + "com.amazonaws.eks#AddonPodIdentityAssociationsList": { + "type": "list", + "member": { + "target": "com.amazonaws.eks#AddonPodIdentityAssociations" + } + }, + "com.amazonaws.eks#AddonPodIdentityConfiguration": { + "type": "structure", + "members": { + "serviceAccount": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The Kubernetes Service Account name used by the addon.

" + } + }, + "recommendedManagedPolicies": { + "target": "com.amazonaws.eks#StringList", + "traits": { + "smithy.api#documentation": "

A suggested IAM Policy for the addon.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about how to configure IAM for an Addon.

" + } + }, + "com.amazonaws.eks#AddonPodIdentityConfigurationList": { + "type": "list", + "member": { + "target": "com.amazonaws.eks#AddonPodIdentityConfiguration" + } + }, "com.amazonaws.eks#AddonStatus": { "type": "enum", "members": { @@ -1821,6 +1893,13 @@ "smithy.api#default": false, "smithy.api#documentation": "

Whether the add-on requires configuration.

" } + }, + "requiresIamPermissions": { + "target": "com.amazonaws.eks#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Indicates if the Addon requires IAM Permissions to operate, such as networking permissions.

" + } } }, "traits": { @@ -2291,7 +2370,7 @@ "subscriptionId": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" + "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" } }, "message": { @@ -2331,7 +2410,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details about clients using the deprecated resources.

" + "smithy.api#documentation": "

Details about clients using the deprecated resources.

" } }, "com.amazonaws.eks#ClientStats": { @@ -2454,7 +2533,7 @@ "health": { "target": "com.amazonaws.eks#ClusterHealth", "traits": { - "smithy.api#documentation": "

An object representing the health of your local Amazon EKS cluster on an\n Amazon Web Services Outpost. This object isn't available for clusters on the Amazon Web Services cloud.

" + "smithy.api#documentation": "

An object representing the health of your Amazon EKS cluster.

" } }, "outpostConfig": { @@ -2480,12 +2559,12 @@ "issues": { "target": "com.amazonaws.eks#ClusterIssueList", "traits": { - "smithy.api#documentation": "

An object representing the health issues of your local Amazon EKS cluster on\n an Amazon Web Services Outpost.

" + "smithy.api#documentation": "

An object representing the health issues of your Amazon EKS cluster.

" } } }, "traits": { - "smithy.api#documentation": "

An object representing the health of your local Amazon EKS cluster on an\n Amazon Web Services Outpost. You can't use this API with an Amazon EKS\n cluster on the Amazon Web Services cloud.

" + "smithy.api#documentation": "

An object representing the health of your Amazon EKS cluster.

" } }, "com.amazonaws.eks#ClusterIssue": { @@ -2511,7 +2590,7 @@ } }, "traits": { - "smithy.api#documentation": "

An issue with your local Amazon EKS cluster on an Amazon Web Services Outpost.\n You can't use this API with an Amazon EKS cluster on the Amazon Web Services\n cloud.

" + "smithy.api#documentation": "

An issue with your Amazon EKS cluster.

" } }, "com.amazonaws.eks#ClusterIssueCode": { @@ -2848,7 +2927,7 @@ "groupName": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The name of the placement group for the Kubernetes control plane instances. This\n setting can't be changed after cluster creation.

" + "smithy.api#documentation": "

The name of the placement group for the Kubernetes control plane instances. This setting\n can't be changed after cluster creation.

" } } }, @@ -2941,7 +3020,7 @@ "principalArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The ARN of the IAM principal for the AccessEntry. You can specify one ARN for each access entry. You can't specify the\n same ARN in more than one access entry. This value can't be changed after access entry\n creation.

\n

The valid principals differ depending on the type of the access entry in the type field.\n The only valid ARN is IAM roles for the types of access entries for nodes: \n . You can use every IAM principal type for STANDARD access entries.\n You can't use the STS session principal type with access entries because this is a temporary\n principal for each session and not a permanent identity that can be assigned permissions.

\n

\n IAM best practices recommend using IAM roles with\n temporary credentials, rather than IAM users with long-term credentials.\n

", + "smithy.api#documentation": "

The ARN of the IAM principal for the AccessEntry. You can specify one ARN for each access entry. You can't specify the\n same ARN in more than one access entry. This value can't be changed after access entry\n creation.

\n

The valid principals differ depending on the type of the access entry in the\n type field. The only valid ARN is IAM roles for the types of access\n entries for nodes: \n . You can use every IAM principal type for STANDARD access entries.\n You can't use the STS session principal type with access entries because this is a\n temporary principal for each session and not a permanent identity that can be assigned\n permissions.

\n

\n IAM best practices recommend using IAM roles with\n temporary credentials, rather than IAM users with long-term credentials.\n

", "smithy.api#required": {} } }, @@ -2973,7 +3052,7 @@ "type": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The type of the new access entry. Valid values are Standard,\n FARGATE_LINUX, EC2_LINUX, and EC2_WINDOWS.

\n

If the principalArn is for an IAM role that's used for\n self-managed Amazon EC2 nodes, specify EC2_LINUX or\n EC2_WINDOWS. Amazon EKS grants the necessary permissions to the\n node for you. If the principalArn is for any other purpose, specify\n STANDARD. If you don't specify a value, Amazon EKS sets the\n value to STANDARD. It's unnecessary to create access entries for IAM roles used with Fargate profiles or managed Amazon EC2 nodes, because Amazon EKS creates entries in the\n aws-auth\n ConfigMap for the roles. You can't change this value once you've created\n the access entry.

\n

If you set the value to EC2_LINUX or EC2_WINDOWS, you can't\n specify values for kubernetesGroups, or associate an\n AccessPolicy to the access entry.

" + "smithy.api#documentation": "

The type of the new access entry. Valid values are Standard,\n FARGATE_LINUX, EC2_LINUX, and\n EC2_WINDOWS.

\n

If the principalArn is for an IAM role that's used for\n self-managed Amazon EC2 nodes, specify EC2_LINUX or\n EC2_WINDOWS. Amazon EKS grants the necessary permissions to the\n node for you. If the principalArn is for any other purpose, specify\n STANDARD. If you don't specify a value, Amazon EKS sets the\n value to STANDARD. It's unnecessary to create access entries for IAM roles used with Fargate profiles or managed Amazon EC2 nodes, because Amazon EKS creates entries in the\n aws-auth\n ConfigMap for the roles. You can't change this value once you've created\n the access entry.

\n

If you set the value to EC2_LINUX or EC2_WINDOWS, you can't\n specify values for kubernetesGroups, or associate an\n AccessPolicy to the access entry.

" } } }, @@ -3083,6 +3162,12 @@ "traits": { "smithy.api#documentation": "

The set of configuration values for the add-on that's created. The values that you\n provide are validated against the schema returned by\n DescribeAddonConfiguration.

" } + }, + "podIdentityAssociations": { + "target": "com.amazonaws.eks#AddonPodIdentityAssociationsList", + "traits": { + "smithy.api#documentation": "

An array of Pod Identity Assocations to be created. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" + } } }, "traits": { @@ -3132,7 +3217,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Amazon EKS control plane.

\n

The Amazon EKS control plane consists of control plane instances that run the\n Kubernetes software, such as etcd and the API server. The control plane runs in\n an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is\n single tenant and unique. It runs on its own set of Amazon EC2 instances.

\n

The cluster control plane is provisioned across multiple Availability Zones and\n fronted by an Elastic Load Balancing\n Network Load Balancer. Amazon EKS also provisions elastic network interfaces in\n your VPC subnets to provide connectivity from the control plane instances to the nodes\n (for example, to support kubectl exec, logs, and\n proxy data flows).

\n

Amazon EKS nodes run in your Amazon Web Services account and connect to your\n cluster's control plane over the Kubernetes API server endpoint and a certificate file that\n is created for your cluster.

\n

You can use the endpointPublicAccess and\n endpointPrivateAccess parameters to enable or disable public and\n private access to your cluster's Kubernetes API server endpoint. By default, public access is\n enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the\n \n Amazon EKS User Guide\n .

\n

You can use the logging parameter to enable or disable exporting the\n Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster\n control plane logs aren't exported to CloudWatch Logs. For more information, see\n Amazon EKS Cluster Control Plane Logs in the\n \n Amazon EKS User Guide\n .

\n \n

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

\n
\n

In most cases, it takes several minutes to create a cluster. After you create an\n Amazon EKS cluster, you must configure your Kubernetes tooling to communicate\n with the API server and launch nodes into your cluster. For more information, see Managing Cluster\n Authentication and Launching Amazon EKS nodes in the\n Amazon EKS User Guide.

", + "smithy.api#documentation": "

Creates an Amazon EKS control plane.

\n

The Amazon EKS control plane consists of control plane instances that run the\n Kubernetes software, such as etcd and the API server. The control plane runs in\n an account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is\n single tenant and unique. It runs on its own set of Amazon EC2 instances.

\n

The cluster control plane is provisioned across multiple Availability Zones and\n fronted by an Elastic Load Balancing\n Network Load Balancer. Amazon EKS also provisions elastic network interfaces in\n your VPC subnets to provide connectivity from the control plane instances to the nodes\n (for example, to support kubectl exec, logs, and\n proxy data flows).

\n

Amazon EKS nodes run in your Amazon Web Services account and connect to your\n cluster's control plane over the Kubernetes API server endpoint and a certificate file that\n is created for your cluster.

\n

You can use the endpointPublicAccess and\n endpointPrivateAccess parameters to enable or disable public and\n private access to your cluster's Kubernetes API server endpoint. By default, public access is\n enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the\n \n Amazon EKS User Guide\n .

\n

You can use the logging parameter to enable or disable exporting the\n Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster\n control plane logs aren't exported to CloudWatch Logs. For more information, see\n Amazon EKS Cluster Control Plane Logs in the\n \n Amazon EKS User Guide\n .

\n \n

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

\n
\n

In most cases, it takes several minutes to create a cluster. After you create an\n Amazon EKS cluster, you must configure your Kubernetes tooling to communicate\n with the API server and launch nodes into your cluster. For more information, see Allowing users to\n access your cluster and Launching\n Amazon EKS nodes in the Amazon EKS User Guide.

", "smithy.api#examples": [ { "title": "To create a new cluster", @@ -3234,6 +3319,12 @@ "traits": { "smithy.api#documentation": "

The access configuration for the cluster.

" } + }, + "bootstrapSelfManagedAddons": { + "target": "com.amazonaws.eks#BoxedBoolean", + "traits": { + "smithy.api#documentation": "

If you set this value to False when creating a cluster, the default networking add-ons will not be installed.

\n

The default networking addons include vpc-cni, coredns, and kube-proxy.

\n

Use this option when you plan to install third-party alternative add-ons or self-manage the default networking add-ons.

" + } } }, "traits": { @@ -3495,7 +3586,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a managed node group for an Amazon EKS cluster.

\n

You can only create a node group for your cluster that is equal to the current Kubernetes\n version for the cluster. All node groups are created with the latest AMI release version\n for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI\n using a launch template. For more information about using launch templates, see Launch\n template support.

\n

An Amazon EKS managed node group is an Amazon EC2\n Auto Scaling group and associated Amazon EC2 instances that are managed by\n Amazon Web Services for an Amazon EKS cluster. For more information, see\n Managed node groups in the Amazon EKS User Guide.

\n \n

Windows AMI types are only supported for commercial Amazon Web Services Regions\n that support Windows on Amazon EKS.

\n
", + "smithy.api#documentation": "

Creates a managed node group for an Amazon EKS cluster.

\n

You can only create a node group for your cluster that is equal to the current Kubernetes\n version for the cluster. All node groups are created with the latest AMI release version\n for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI\n using a launch template. For more information about using launch templates, see Customizing managed nodes with launch templates.

\n

An Amazon EKS managed node group is an Amazon EC2\n Auto Scaling group and associated Amazon EC2 instances that are managed by\n Amazon Web Services for an Amazon EKS cluster. For more information, see\n Managed node groups in the Amazon EKS User Guide.

\n \n

Windows AMI types are only supported for commercial Amazon Web Services Regions\n that support Windows on Amazon EKS.

\n
", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/node-groups", @@ -3530,38 +3621,38 @@ "diskSize": { "target": "com.amazonaws.eks#BoxedInteger", "traits": { - "smithy.api#documentation": "

The root device disk size (in GiB) for your node group instances. The default disk\n size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows.\n If you specify launchTemplate, then don't specify diskSize, or the node group \n deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The root device disk size (in GiB) for your node group instances. The default disk\n size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows.\n If you specify launchTemplate, then don't specify diskSize, or the node group \n deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" } }, "subnets": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

The subnets to use for the Auto Scaling group that is created for your node group.\n If you specify launchTemplate, then don't specify \n SubnetId\n in your launch template, or the node group deployment\n will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

", + "smithy.api#documentation": "

The subnets to use for the Auto Scaling group that is created for your node group.\n If you specify launchTemplate, then don't specify \n SubnetId\n in your launch template, or the node group deployment\n will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

", "smithy.api#required": {} } }, "instanceTypes": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

Specify the instance types for a node group. If you specify a GPU instance type, make\n sure to also specify an applicable GPU AMI type with the amiType parameter.\n If you specify launchTemplate, then you can specify zero or one instance\n type in your launch template or you can specify 0-20 instance types\n for instanceTypes. If however, you specify an instance type in your launch\n template and specify any instanceTypes, the node group\n deployment will fail. If you don't specify an instance type in a launch template or for\n instanceTypes, then t3.medium is used, by default. If you\n specify Spot for capacityType, then we recommend specifying\n multiple values for instanceTypes. For more information, see Managed node group capacity types and Launch template support in\n the Amazon EKS User Guide.

" + "smithy.api#documentation": "

Specify the instance types for a node group. If you specify a GPU instance type, make\n sure to also specify an applicable GPU AMI type with the amiType parameter.\n If you specify launchTemplate, then you can specify zero or one instance\n type in your launch template or you can specify 0-20 instance types\n for instanceTypes. If however, you specify an instance type in your launch\n template and specify any instanceTypes, the node group\n deployment will fail. If you don't specify an instance type in a launch template or for\n instanceTypes, then t3.medium is used, by default. If you\n specify Spot for capacityType, then we recommend specifying\n multiple values for instanceTypes. For more information, see Managed node group capacity types and Customizing managed nodes with launch templates in\n the Amazon EKS User Guide.

" } }, "amiType": { "target": "com.amazonaws.eks#AMITypes", "traits": { - "smithy.api#documentation": "

The AMI type for your node group. If you specify launchTemplate, and your launch template uses a custom AMI,\n then don't specify amiType, or the node group deployment\n will fail. If your launch template uses a Windows custom AMI, then add\n eks:kube-proxy-windows to your Windows nodes rolearn in\n the aws-auth\n ConfigMap. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The AMI type for your node group. If you specify launchTemplate, and your launch template uses a custom AMI,\n then don't specify amiType, or the node group deployment\n will fail. If your launch template uses a Windows custom AMI, then add\n eks:kube-proxy-windows to your Windows nodes rolearn in\n the aws-auth\n ConfigMap. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" } }, "remoteAccess": { "target": "com.amazonaws.eks#RemoteAccessConfig", "traits": { - "smithy.api#documentation": "

The remote access configuration to use with your node group. For Linux, the protocol\n is SSH. For Windows, the protocol is RDP. If you specify launchTemplate, then don't specify \n remoteAccess, or the node group deployment will fail.\n For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The remote access configuration to use with your node group. For Linux, the protocol\n is SSH. For Windows, the protocol is RDP. If you specify launchTemplate, then don't specify \n remoteAccess, or the node group deployment will fail.\n For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" } }, "nodeRole": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The\n Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls\n through an IAM instance profile and associated policies. Before you can\n launch nodes and register them into a cluster, you must create an IAM\n role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the\n \n Amazon EKS User Guide\n . If you specify launchTemplate, then don't specify \n \n IamInstanceProfile\n in your launch template, or the node group \n deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The\n Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls\n through an IAM instance profile and associated policies. Before you can\n launch nodes and register them into a cluster, you must create an IAM\n role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the\n \n Amazon EKS User Guide\n . If you specify launchTemplate, then don't specify \n \n IamInstanceProfile\n in your launch template, or the node group \n deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

", "smithy.api#required": {} } }, @@ -3593,7 +3684,7 @@ "launchTemplate": { "target": "com.amazonaws.eks#LaunchTemplateSpecification", "traits": { - "smithy.api#documentation": "

An object representing a node group's launch template specification. If specified,\n then do not specify instanceTypes, diskSize, or\n remoteAccess and make sure that the launch template meets the\n requirements in launchTemplateSpecification.

" + "smithy.api#documentation": "

An object representing a node group's launch template specification. When using this\n object, don't directly specify instanceTypes, diskSize, or\n remoteAccess. Make sure that\n the launch template meets the requirements in launchTemplateSpecification. Also refer to\n Customizing managed nodes with launch templates in\n the Amazon EKS User Guide.

" } }, "updateConfig": { @@ -3611,13 +3702,13 @@ "version": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Kubernetes version to use for your managed nodes. By default, the Kubernetes version of the\n cluster is used, and this is the only accepted specified value. If you specify launchTemplate,\n and your launch template uses a custom AMI, then don't specify version, or the node group \n deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The Kubernetes version to use for your managed nodes. By default, the Kubernetes version of the\n cluster is used, and this is the only accepted specified value. If you specify launchTemplate,\n and your launch template uses a custom AMI, then don't specify version, or the node group \n deployment will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" } }, "releaseVersion": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The AMI version of the Amazon EKS optimized AMI to use with your node group.\n By default, the latest available AMI version for the node group's current Kubernetes version\n is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the\n Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the\n Amazon EKS User Guide.

\n

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify \n releaseVersion, or the node group deployment will fail.\n For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The AMI version of the Amazon EKS optimized AMI to use with your node group.\n By default, the latest available AMI version for the node group's current Kubernetes version\n is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the\n Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the\n Amazon EKS User Guide.

\n

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify \n releaseVersion, or the node group deployment will fail.\n For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" } } }, @@ -4242,13 +4333,13 @@ "stopServingVersion": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The version of the software where the deprecated resource version will stop being served.

" + "smithy.api#documentation": "

The version of the software where the deprecated resource version will stop being\n served.

" } }, "startServingReplacementVersion": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The version of the software where the newer resource version became available to migrate to if applicable.

" + "smithy.api#documentation": "

The version of the software where the newer resource version became available to\n migrate to if applicable.

" } }, "clientStats": { @@ -4562,6 +4653,12 @@ "traits": { "smithy.api#documentation": "

A JSON schema that's used to validate the configuration values you provide when an\n add-on is created or updated.

" } + }, + "podIdentityConfiguration": { + "target": "com.amazonaws.eks#AddonPodIdentityConfigurationList", + "traits": { + "smithy.api#documentation": "

The Kubernetes service account name used by the addon, and any suggested IAM policies. Use this information to create an IAM Role for the Addon.

" + } } }, "traits": { @@ -5794,7 +5891,7 @@ "target": "com.amazonaws.eks#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The duration of the subscription term. Valid values are 12 and 36, indicating a 12 month or 36 month subscription.

" + "smithy.api#documentation": "

The duration of the subscription term. Valid values are 12 and 36, indicating a 12\n month or 36 month subscription.

" } }, "unit": { @@ -6046,12 +6143,93 @@ "traits": { "smithy.api#documentation": "

Metadata that assists with categorization and organization.\n Each tag consists of a key and an optional value. You define both. Tags don't\n propagate to any other cluster or Amazon Web Services resources.

" } + }, + "health": { + "target": "com.amazonaws.eks#FargateProfileHealth", + "traits": { + "smithy.api#documentation": "

The health status of the Fargate profile. If there are issues with\n your Fargate profile's health, they are listed here.

" + } } }, "traits": { "smithy.api#documentation": "

An object representing an Fargate profile.

" } }, + "com.amazonaws.eks#FargateProfileHealth": { + "type": "structure", + "members": { + "issues": { + "target": "com.amazonaws.eks#FargateProfileIssueList", + "traits": { + "smithy.api#documentation": "

Any issues that are associated with the Fargate profile.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The health status of the Fargate profile. If there are issues with\n your Fargate profile's health, they are listed here.

" + } + }, + "com.amazonaws.eks#FargateProfileIssue": { + "type": "structure", + "members": { + "code": { + "target": "com.amazonaws.eks#FargateProfileIssueCode", + "traits": { + "smithy.api#documentation": "

A brief description of the error.

" + } + }, + "message": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

The error message associated with the issue.

" + } + }, + "resourceIds": { + "target": "com.amazonaws.eks#StringList", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services resources that are affected by this issue.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An issue that is associated with the Fargate profile.

" + } + }, + "com.amazonaws.eks#FargateProfileIssueCode": { + "type": "enum", + "members": { + "POD_EXECUTION_ROLE_ALREADY_IN_USE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PodExecutionRoleAlreadyInUse" + } + }, + "ACCESS_DENIED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AccessDenied" + } + }, + "CLUSTER_UNREACHABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ClusterUnreachable" + } + }, + "INTERNAL_FAILURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InternalFailure" + } + } + } + }, + "com.amazonaws.eks#FargateProfileIssueList": { + "type": "list", + "member": { + "target": "com.amazonaws.eks#FargateProfileIssue" + } + }, "com.amazonaws.eks#FargateProfileLabel": { "type": "map", "key": { @@ -6223,7 +6401,7 @@ "lastRefreshTime": { "target": "com.amazonaws.eks#Timestamp", "traits": { - "smithy.api#documentation": "

The time Amazon EKS last successfully completed a refresh of this insight check on the cluster.

" + "smithy.api#documentation": "

The time Amazon EKS last successfully completed a refresh of this insight check on the\n cluster.

" } }, "lastTransitionTime": { @@ -6235,7 +6413,7 @@ "description": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The description of the insight which includes alert criteria, remediation recommendation, and additional resources (contains Markdown).

" + "smithy.api#documentation": "

The description of the insight which includes alert criteria, remediation\n recommendation, and additional resources (contains Markdown).

" } }, "insightStatus": { @@ -6270,7 +6448,7 @@ } }, "traits": { - "smithy.api#documentation": "

A check that provides recommendations to remedy potential upgrade-impacting issues.

" + "smithy.api#documentation": "

A check that provides recommendations to remedy potential upgrade-impacting\n issues.

" } }, "com.amazonaws.eks#InsightCategorySpecificSummary": { @@ -6410,7 +6588,7 @@ "lastRefreshTime": { "target": "com.amazonaws.eks#Timestamp", "traits": { - "smithy.api#documentation": "

The time Amazon EKS last successfully completed a refresh of this insight check on the cluster.

" + "smithy.api#documentation": "

The time Amazon EKS last successfully completed a refresh of this insight check on the\n cluster.

" } }, "lastTransitionTime": { @@ -6422,7 +6600,7 @@ "description": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The description of the insight which includes alert criteria, remediation recommendation, and additional resources (contains Markdown).

" + "smithy.api#documentation": "

The description of the insight which includes alert criteria, remediation\n recommendation, and additional resources (contains Markdown).

" } }, "insightStatus": { @@ -6498,7 +6676,7 @@ "subscriptionId": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" + "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" } }, "message": { @@ -6538,7 +6716,7 @@ "subscriptionId": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" + "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" } }, "message": { @@ -6672,7 +6850,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object representing a node group launch template specification. The launch template\n can't include \n SubnetId\n , \n IamInstanceProfile\n , \n RequestSpotInstances\n , \n HibernationOptions\n , or \n TerminateInstances\n , or the node group deployment or\n update will fail. For more information about launch templates, see \n CreateLaunchTemplate\n in the Amazon EC2 API\n Reference. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

\n

You must specify either the launch template ID or the launch template name in the\n request, but not both.

" + "smithy.api#documentation": "

An object representing a node group launch template specification. The launch template\n can't include \n SubnetId\n , \n IamInstanceProfile\n , \n RequestSpotInstances\n , \n HibernationOptions\n , or \n TerminateInstances\n , or the node group deployment or\n update will fail. For more information about launch templates, see \n CreateLaunchTemplate\n in the Amazon EC2 API\n Reference. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

\n

You must specify either the launch template ID or the launch template name in the\n request, but not both.

" } }, "com.amazonaws.eks#ListAccessEntries": { @@ -6948,7 +7126,7 @@ "nextToken": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListAddons\n request. When the results of a ListAddons request exceed\n maxResults, you can use this value to retrieve the next page of\n results. This value is null when there are no more results to\n return.

\n \n

This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" + "smithy.api#documentation": "

The nextToken value to include in a future ListAddons\n request. When the results of a ListAddons request exceed\n maxResults, you can use this value to retrieve the next page of\n results. This value is null when there are no more results to\n return.

\n \n

This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.

\n
" } } }, @@ -7226,7 +7404,7 @@ "nextToken": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a previous paginated\n ListEksAnywhereSubscriptions request where maxResults was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken value.

", + "smithy.api#documentation": "

The nextToken value returned from a previous paginated\n ListEksAnywhereSubscriptions request where maxResults was\n used and the results exceeded the value of that parameter. Pagination continues from the\n end of the previous results that returned the nextToken value.

", "smithy.api#httpQuery": "nextToken" } }, @@ -7480,7 +7658,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of all insights checked for against the specified cluster. You can filter which insights are returned by category, associated Kubernetes version, and status.

", + "smithy.api#documentation": "

Returns a list of all insights checked for against the specified cluster. You can\n filter which insights are returned by category, associated Kubernetes version, and\n status.

", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/insights", @@ -7517,7 +7695,7 @@ "filter": { "target": "com.amazonaws.eks#InsightsFilter", "traits": { - "smithy.api#documentation": "

The criteria to filter your list of insights for your cluster. You can filter which insights are returned by category, associated Kubernetes version, and status.

" + "smithy.api#documentation": "

The criteria to filter your list of insights for your cluster. You can filter which\n insights are returned by category, associated Kubernetes version, and status.

" } }, "maxResults": { @@ -7529,7 +7707,7 @@ "nextToken": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The nextToken value returned from a previous paginated ListInsights\n request. When the results of a ListInsights request exceed\n maxResults, you can use this value to retrieve the next page of\n results. This value is null when there are no more results to\n return.

" + "smithy.api#documentation": "

The nextToken value returned from a previous paginated\n ListInsights request. When the results of a ListInsights\n request exceed maxResults, you can use this value to retrieve the next page\n of results. This value is null when there are no more results to\n return.

" } } }, @@ -7549,7 +7727,7 @@ "nextToken": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The nextToken value to include in a future ListInsights\n request. When the results of a ListInsights request exceed\n maxResults, you can use this value to retrieve the next page of\n results. This value is null when there are no more results to\n return.

" + "smithy.api#documentation": "

The nextToken value to include in a future ListInsights\n request. When the results of a ListInsights request exceed\n maxResults, you can use this value to retrieve the next page of\n results. This value is null when there are no more results to\n return.

" } } }, @@ -7752,7 +7930,7 @@ "associations": { "target": "com.amazonaws.eks#PodIdentityAssociationSummaries", "traits": { - "smithy.api#documentation": "

The list of summarized descriptions of the associations that are in the cluster and match any\n filters that you provided.

\n

Each summary is simplified by removing these fields compared to the full \n PodIdentityAssociation\n :

\n
    \n
  • \n

    The IAM role: roleArn\n

    \n
  • \n
  • \n

    The timestamp that the association was created at: createdAt\n

    \n
  • \n
  • \n

    The most recent timestamp that the association was modified at:. modifiedAt\n

    \n
  • \n
  • \n

    The tags on the association: tags\n

    \n
  • \n
" + "smithy.api#documentation": "

The list of summarized descriptions of the associations that are in the cluster and match\n any filters that you provided.

\n

Each summary is simplified by removing these fields compared to the full \n PodIdentityAssociation\n :

\n
    \n
  • \n

    The IAM role: roleArn\n

    \n
  • \n
  • \n

    The timestamp that the association was created at: createdAt\n

    \n
  • \n
  • \n

    The most recent timestamp that the association was modified at:. modifiedAt\n

    \n
  • \n
  • \n

    The tags on the association: tags\n

    \n
  • \n
" } }, "nextToken": { @@ -8832,6 +9010,12 @@ "traits": { "smithy.api#documentation": "

The most recent timestamp that the association was modified at

" } + }, + "ownerArn": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

If defined, the Pod Identity Association is owned by an Amazon EKS Addon.

" + } } }, "traits": { @@ -8862,7 +9046,7 @@ "serviceAccount": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The name of the Kubernetes service account inside the cluster to associate the IAM credentials with.

" + "smithy.api#documentation": "

The name of the Kubernetes service account inside the cluster to associate the IAM\n credentials with.

" } }, "associationArn": { @@ -8876,6 +9060,12 @@ "traits": { "smithy.api#documentation": "

The ID of the association.

" } + }, + "ownerArn": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

If defined, the Pod Identity Association is owned by an Amazon EKS Addon.

" + } } }, "traits": { @@ -8888,7 +9078,7 @@ "keyArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

Amazon Resource Name (ARN) or alias of the KMS key. The KMS key must be\n symmetric and created in the same Amazon Web Services Region as the cluster. If the\n KMS key was created in a different account, the IAM principal must\n have access to the KMS key. For more information, see Allowing\n users in other accounts to use a KMS key in the\n Key Management Service Developer Guide.

" + "smithy.api#documentation": "

Amazon Resource Name (ARN) or alias of the KMS key. The KMS key must be\n symmetric and created in the same Amazon Web Services Region as the cluster. If the\n KMS key was created in a different account, the IAM principal must\n have access to the KMS key. For more information, see Allowing\n users in other accounts to use a KMS key in the\n Key Management Service Developer Guide.

" } } }, @@ -9080,7 +9270,7 @@ "subscriptionId": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" + "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" } }, "message": { @@ -9126,7 +9316,7 @@ "subscriptionId": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" + "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" } }, "message": { @@ -9191,7 +9381,7 @@ "subscriptionId": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" + "smithy.api#documentation": "

The Amazon EKS subscription ID with the exception.

" } }, "message": { @@ -9709,6 +9899,12 @@ "traits": { "smithy.api#documentation": "

The set of configuration values for the add-on that's created. The values that you\n provide are validated against the schema returned by\n DescribeAddonConfiguration.

" } + }, + "podIdentityAssociations": { + "target": "com.amazonaws.eks#AddonPodIdentityAssociationsList", + "traits": { + "smithy.api#documentation": "

An array of Pod Identity Assocations to be updated. Each EKS Pod Identity association maps a Kubernetes service account to an IAM Role. If this value is left blank, no change. If an empty array is provided, existing Pod Identity Assocations owned by the Addon are deleted.

\n

For more information, see Attach an IAM Role to an Amazon EKS add-on using Pod Identity in the EKS User Guide.

" + } } }, "traits": { @@ -10154,13 +10350,13 @@ "version": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The Kubernetes version to update to. If no version is specified, then the Kubernetes version of\n the node group does not change. You can specify the Kubernetes version of the cluster to\n update the node group to the latest AMI version of the cluster's Kubernetes version.\n If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version,\n or the node group update will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The Kubernetes version to update to. If no version is specified, then the Kubernetes version of\n the node group does not change. You can specify the Kubernetes version of the cluster to\n update the node group to the latest AMI version of the cluster's Kubernetes version.\n If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify version,\n or the node group update will fail. For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" } }, "releaseVersion": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

The AMI version of the Amazon EKS optimized AMI to use for the update. By\n default, the latest available AMI version for the node group's Kubernetes version is used.\n For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the\n Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the\n Amazon EKS User Guide.

\n

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify \n releaseVersion, or the node group update will fail.\n For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

" + "smithy.api#documentation": "

The AMI version of the Amazon EKS optimized AMI to use for the update. By\n default, the latest available AMI version for the node group's Kubernetes version is used.\n For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the\n Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the\n Amazon EKS User Guide.

\n

If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify \n releaseVersion, or the node group update will fail.\n For more information about using launch templates with Amazon EKS, see Customizing managed nodes with launch templates in the Amazon EKS User Guide.

" } }, "launchTemplate": { @@ -10383,6 +10579,12 @@ "traits": { "smithy.api#enumValue": "AuthenticationMode" } + }, + "POD_IDENTITY_ASSOCIATIONS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PodIdentityAssociations" + } } } }, diff --git a/models/elastic-load-balancing-v2.json b/models/elastic-load-balancing-v2.json index 8568f0edc7..acc08e24c4 100644 --- a/models/elastic-load-balancing-v2.json +++ b/models/elastic-load-balancing-v2.json @@ -1287,7 +1287,7 @@ "IpAddressType": { "target": "com.amazonaws.elasticloadbalancingv2#IpAddressType", "traits": { - "smithy.api#documentation": "

The type of IP addresses used by the subnets for your load balancer. The possible values\n are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6\n addresses).

" + "smithy.api#documentation": "

Note: Internal load balancers must use the ipv4 IP address type.

\n

[Application Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses), dualstack (for IPv4 and \n IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public \n addresses, with private IPv4 and IPv6 addresses).

\n

[Network Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses) and dualstack \n (for IPv4 and IPv6 addresses). You can’t specify dualstack \n for a load balancer with a UDP or TCP_UDP listener.

\n

[Gateway Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses) and dualstack \n (for IPv4 and IPv6 addresses).

" } }, "CustomerOwnedIpv4Pool": { @@ -3825,7 +3825,7 @@ "sdkId": "Elastic Load Balancing v2", "arnNamespace": "elasticloadbalancing", "cloudFormationName": "ElasticLoadBalancingV2", - "cloudTrailEventSource": "elasticloadbalancingv2.amazonaws.com", + "cloudTrailEventSource": "elasticloadbalancing.amazonaws.com", "docId": "elasticloadbalancingv2-2015-12-01", "endpointPrefix": "elasticloadbalancing" }, @@ -5340,6 +5340,12 @@ "traits": { "smithy.api#enumValue": "dualstack" } + }, + "DUALSTACK_WITHOUT_PUBLIC_IPV4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "dualstack-without-public-ipv4" + } } } }, @@ -5550,7 +5556,7 @@ "IpAddressType": { "target": "com.amazonaws.elasticloadbalancingv2#IpAddressType", "traits": { - "smithy.api#documentation": "

The type of IP addresses used by the subnets for your load balancer. The possible values\n are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6\n addresses).

" + "smithy.api#documentation": "

[Application Load Balancers] The type of IP addresses used for public or private \n connections by the subnets attached to your load balancer. The possible values are \n ipv4 (for only IPv4 addresses), dualstack (for IPv4 and \n IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public \n addresses, with private IPv4 and IPv6 addresses).

\n

[Network Load Balancers and Gateway Load Balancers] The type of IP addresses \n used for public or private connections by the subnets attached to your load \n balancer. The possible values are ipv4 (for only IPv4 addresses) \n and dualstack (for IPv4 and IPv6 addresses).

" } }, "CustomerOwnedIpv4Pool": { @@ -7594,7 +7600,7 @@ "target": "com.amazonaws.elasticloadbalancingv2#IpAddressType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The IP address type. The possible values are ipv4 (for IPv4 addresses) and\n dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer\n with a UDP or TCP_UDP listener.

", + "smithy.api#documentation": "

Note: Internal load balancers must use the ipv4 IP address type.

\n

[Application Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses), dualstack (for IPv4 and \n IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public \n addresses, with private IPv4 and IPv6 addresses).

\n

[Network Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses) and dualstack \n (for IPv4 and IPv6 addresses). You can’t specify dualstack \n for a load balancer with a UDP or TCP_UDP listener.

\n

[Gateway Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses) and dualstack \n (for IPv4 and IPv6 addresses).

", "smithy.api#required": {} } } @@ -7880,7 +7886,7 @@ "IpAddressType": { "target": "com.amazonaws.elasticloadbalancingv2#IpAddressType", "traits": { - "smithy.api#documentation": "

[Network Load Balancers] The type of IP addresses used by the subnets for your load\n balancer. The possible values are ipv4 (for IPv4 addresses) and\n dualstack (for IPv4 and IPv6 addresses). You can’t specify\n dualstack for a load balancer with a UDP or TCP_UDP listener.

\n

[Gateway Load Balancers] The type of IP addresses used by the subnets for your load\n balancer. The possible values are ipv4 (for IPv4 addresses) and\n dualstack (for IPv4 and IPv6 addresses).

" + "smithy.api#documentation": "

[Application Load Balancers] The IP address type. The possible values are \n ipv4 (for only IPv4 addresses), dualstack (for IPv4 and \n IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public \n addresses, with private IPv4 and IPv6 addresses).

\n

[Network Load Balancers] The type of IP addresses used by the subnets for your load\n balancer. The possible values are ipv4 (for IPv4 addresses) and\n dualstack (for IPv4 and IPv6 addresses). You can’t specify\n dualstack for a load balancer with a UDP or TCP_UDP listener.

\n

[Gateway Load Balancers] The type of IP addresses used by the subnets for your load\n balancer. The possible values are ipv4 (for IPv4 addresses) and\n dualstack (for IPv4 and IPv6 addresses).

" } } }, @@ -7900,7 +7906,7 @@ "IpAddressType": { "target": "com.amazonaws.elasticloadbalancingv2#IpAddressType", "traits": { - "smithy.api#documentation": "

[Network Load Balancers] The IP address type.

\n

[Gateway Load Balancers] The IP address type.

" + "smithy.api#documentation": "

[Application Load Balancers] The IP address type.

\n

[Network Load Balancers] The IP address type.

\n

[Gateway Load Balancers] The IP address type.

" } } }, diff --git a/models/elastic-transcoder.json b/models/elastic-transcoder.json index 56fb6c74c6..9efdaacd6b 100644 --- a/models/elastic-transcoder.json +++ b/models/elastic-transcoder.json @@ -1318,7 +1318,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -1361,7 +1360,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -1374,7 +1374,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -1388,7 +1387,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -1411,7 +1409,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -1446,7 +1443,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -1457,14 +1453,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -1478,14 +1476,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -1494,11 +1490,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -1509,14 +1505,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -1530,7 +1528,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -1550,7 +1547,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -1561,14 +1557,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -1579,9 +1577,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -2890,7 +2890,20 @@ "inputToken": "PageToken", "outputToken": "NextPageToken", "items": "Presets" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListPresetsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.elastictranscoder#ListPresetsRequest": { diff --git a/models/elasticache.json b/models/elasticache.json index 7b2ef05d55..61f28a3ccf 100644 --- a/models/elasticache.json +++ b/models/elasticache.json @@ -9461,7 +9461,7 @@ "Marker": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

An optional marker returned from a prior request. Use this marker for pagination of\n results from this operation. If this parameter is specified, the response includes only\n records beyond the marker, up to the value specified by MaxRecords. >

" + "smithy.api#documentation": "

An optional marker returned from a prior request. Use this marker for pagination of\n results from this operation. If this parameter is specified, the response includes only\n records beyond the marker, up to the value specified by MaxRecords.>

" } } }, @@ -11370,7 +11370,7 @@ "AuthTokenUpdateStrategy": { "target": "com.amazonaws.elasticache#AuthTokenUpdateStrategyType", "traits": { - "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    Rotate

    \n
  • \n
  • \n

    Set

    \n
  • \n
\n

For more information, see Authenticating Users with Redis AUTH\n

" + "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    ROTATE - default, if no update strategy is provided

    \n
  • \n
  • \n

    SET - allowed only after ROTATE

    \n
  • \n
  • \n

    DELETE - allowed only when transitioning to RBAC

    \n
  • \n
\n

For more information, see Authenticating Users with Redis AUTH\n

" } }, "LogDeliveryConfigurations": { @@ -11947,7 +11947,7 @@ "AuthTokenUpdateStrategy": { "target": "com.amazonaws.elasticache#AuthTokenUpdateStrategyType", "traits": { - "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    Rotate

    \n
  • \n
  • \n

    Set

    \n
  • \n
\n

For more information, see Authenticating Users with Redis AUTH\n

" + "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    ROTATE - default, if no update strategy is provided

    \n
  • \n
  • \n

    SET - allowed only after ROTATE

    \n
  • \n
  • \n

    DELETE - allowed only when transitioning to RBAC

    \n
  • \n
\n

For more information, see Authenticating Users with Redis AUTH\n

" } }, "UserGroupIdsToAdd": { @@ -15739,7 +15739,7 @@ } ], "traits": { - "smithy.api#documentation": "

Represents the input of a TestFailover operation which test automatic\n failover on a specified node group (called shard in the console) in a replication group\n (called cluster in the console).

\n

This API is designed for testing the behavior of your application in case of\n ElastiCache failover. It is not designed to be an operational tool for initiating a\n failover to overcome a problem you may have with the cluster. Moreover, in certain\n conditions such as large-scale operational events, Amazon may block this API.

\n

\n Note the following\n

\n
    \n
  • \n

    A customer can use this operation to test automatic failover on up to 5 shards\n (called node groups in the ElastiCache API and Amazon CLI) in any rolling\n 24-hour period.

    \n
  • \n
  • \n

    If calling this operation on shards in different clusters (called replication\n groups in the API and CLI), the calls can be made concurrently.

    \n

    \n
  • \n
  • \n

    If calling this operation multiple times on different shards in the same Redis\n (cluster mode enabled) replication group, the first node replacement must\n complete before a subsequent call can be made.

    \n
  • \n
  • \n

    To determine whether the node replacement is complete you can check Events\n using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API.\n Look for the following automatic failover related events, listed here in order\n of occurrance:

    \n
      \n
    1. \n

      Replication group message: Test Failover API called for node\n group \n

      \n
    2. \n
    3. \n

      Cache cluster message: Failover from primary node\n to replica node \n completed\n

      \n
    4. \n
    5. \n

      Replication group message: Failover from primary node\n to replica node \n completed\n

      \n
    6. \n
    7. \n

      Cache cluster message: Recovering cache nodes\n \n

      \n
    8. \n
    9. \n

      Cache cluster message: Finished recovery for cache nodes\n \n

      \n
    10. \n
    \n

    For more information see:

    \n \n
  • \n
\n

Also see, Testing\n Multi-AZ in the ElastiCache User Guide.

" + "smithy.api#documentation": "

Represents the input of a TestFailover operation which tests automatic\n failover on a specified node group (called shard in the console) in a replication group\n (called cluster in the console).

\n

This API is designed for testing the behavior of your application in case of\n ElastiCache failover. It is not designed to be an operational tool for initiating a\n failover to overcome a problem you may have with the cluster. Moreover, in certain\n conditions such as large-scale operational events, Amazon may block this API.

\n

\n Note the following\n

\n
    \n
  • \n

    A customer can use this operation to test automatic failover on up to 15 shards\n (called node groups in the ElastiCache API and Amazon CLI) in any rolling\n 24-hour period.

    \n
  • \n
  • \n

    If calling this operation on shards in different clusters (called replication\n groups in the API and CLI), the calls can be made concurrently.

    \n

    \n
  • \n
  • \n

    If calling this operation multiple times on different shards in the same Redis\n (cluster mode enabled) replication group, the first node replacement must\n complete before a subsequent call can be made.

    \n
  • \n
  • \n

    To determine whether the node replacement is complete you can check Events\n using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API.\n Look for the following automatic failover related events, listed here in order\n of occurrance:

    \n
      \n
    1. \n

      Replication group message: Test Failover API called for node\n group \n

      \n
    2. \n
    3. \n

      Cache cluster message: Failover from primary node\n to replica node \n completed\n

      \n
    4. \n
    5. \n

      Replication group message: Failover from primary node\n to replica node \n completed\n

      \n
    6. \n
    7. \n

      Cache cluster message: Recovering cache nodes\n \n

      \n
    8. \n
    9. \n

      Cache cluster message: Finished recovery for cache nodes\n \n

      \n
    10. \n
    \n

    For more information see:

    \n \n
  • \n
\n

Also see, Testing\n Multi-AZ in the ElastiCache User Guide.

" } }, "com.amazonaws.elasticache#TestFailoverMessage": { @@ -15757,7 +15757,7 @@ "target": "com.amazonaws.elasticache#AllowedNodeGroupId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the node group (called shard in the console) in this replication group on\n which automatic failover is to be tested. You may test automatic failover on up to 5\n node groups in any rolling 24-hour period.

", + "smithy.api#documentation": "

The name of the node group (called shard in the console) in this replication group on\n which automatic failover is to be tested. You may test automatic failover on up to 15\n node groups in any rolling 24-hour period.

", "smithy.api#required": {} } } diff --git a/models/emr-serverless.json b/models/emr-serverless.json index 1b1c7af60f..c6bd0f79d1 100644 --- a/models/emr-serverless.json +++ b/models/emr-serverless.json @@ -128,6 +128,12 @@ }, "monitoringConfiguration": { "target": "com.amazonaws.emrserverless#MonitoringConfiguration" + }, + "interactiveConfiguration": { + "target": "com.amazonaws.emrserverless#InteractiveConfiguration", + "traits": { + "smithy.api#documentation": "

The interactive configuration object that enables the interactive use cases for an application.

" + } } }, "traits": { @@ -345,6 +351,14 @@ ] } }, + "com.amazonaws.emrserverless#AttemptNumber": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, "com.amazonaws.emrserverless#AutoStartConfig": { "type": "structure", "members": { @@ -1474,6 +1488,12 @@ "traits": { "smithy.api#documentation": "

The configuration setting for monitoring.

" } + }, + "interactiveConfiguration": { + "target": "com.amazonaws.emrserverless#InteractiveConfiguration", + "traits": { + "smithy.api#documentation": "

The interactive configuration object that enables the interactive use cases \n to use when running an application.

" + } } } }, @@ -1727,6 +1747,13 @@ "smithy.api#required": {}, "smithy.api#resourceIdentifier": "jobRunId" } + }, + "attempt": { + "target": "com.amazonaws.emrserverless#AttemptNumber", + "traits": { + "smithy.api#documentation": "

An optimal parameter that indicates the amount of attempts for the job. If not specified,\n this value defaults to the attempt of the latest job.

", + "smithy.api#httpQuery": "attempt" + } } } }, @@ -1789,6 +1816,13 @@ "smithy.api#required": {}, "smithy.api#resourceIdentifier": "jobRunId" } + }, + "attempt": { + "target": "com.amazonaws.emrserverless#AttemptNumber", + "traits": { + "smithy.api#documentation": "

An optimal parameter that indicates the amount of attempts for the job. If not specified,\n this value defaults to the attempt of the latest job.

", + "smithy.api#httpQuery": "attempt" + } } } }, @@ -1955,6 +1989,26 @@ } } }, + "com.amazonaws.emrserverless#InteractiveConfiguration": { + "type": "structure", + "members": { + "studioEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Enables you to connect an application to Amazon EMR Studio to run interactive workloads in a notebook.

" + } + }, + "livyEndpointEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Enables an Apache Livy endpoint that you can connect to and run interactive jobs.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration to use to enable the different types of interactive use cases in an application.

" + } + }, "com.amazonaws.emrserverless#InternalServerException": { "type": "structure", "members": { @@ -2128,12 +2182,159 @@ "traits": { "smithy.api#documentation": "

The aggregate vCPU, memory, and storage that Amazon Web Services has billed for the job\n run. The billed resources include a 1-minute minimum usage for workers, plus additional\n storage over 20 GB per worker. Note that billed resources do not include usage for idle\n pre-initialized workers.

" } + }, + "mode": { + "target": "com.amazonaws.emrserverless#JobRunMode", + "traits": { + "smithy.api#documentation": "

The mode of the job run.

" + } + }, + "retryPolicy": { + "target": "com.amazonaws.emrserverless#RetryPolicy", + "traits": { + "smithy.api#documentation": "

The retry policy of the job run.

" + } + }, + "attempt": { + "target": "com.amazonaws.emrserverless#AttemptNumber", + "traits": { + "smithy.api#documentation": "

The attempt of the job run.

" + } + }, + "attemptCreatedAt": { + "target": "com.amazonaws.emrserverless#Date", + "traits": { + "smithy.api#documentation": "

The date and time of when the job run attempt was created.

" + } + }, + "attemptUpdatedAt": { + "target": "com.amazonaws.emrserverless#Date", + "traits": { + "smithy.api#documentation": "

The date and time of when the job run attempt was last updated.

" + } } }, "traits": { "smithy.api#documentation": "

Information about a job run. A job run is a unit of work, such as a Spark JAR, Hive\n query, or SparkSQL query, that you submit to an Amazon EMR Serverless\n application.

" } }, + "com.amazonaws.emrserverless#JobRunAttemptSummary": { + "type": "structure", + "members": { + "applicationId": { + "target": "com.amazonaws.emrserverless#ApplicationId", + "traits": { + "smithy.api#documentation": "

The ID of the application the job is running on.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "applicationId" + } + }, + "id": { + "target": "com.amazonaws.emrserverless#JobRunId", + "traits": { + "smithy.api#documentation": "

The ID of the job run attempt.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "jobRunId" + } + }, + "name": { + "target": "com.amazonaws.emrserverless#String256", + "traits": { + "smithy.api#documentation": "

The name of the job run attempt.

" + } + }, + "mode": { + "target": "com.amazonaws.emrserverless#JobRunMode", + "traits": { + "smithy.api#documentation": "

The mode of the job run attempt.

" + } + }, + "arn": { + "target": "com.amazonaws.emrserverless#JobArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the job run.

", + "smithy.api#required": {} + } + }, + "createdBy": { + "target": "com.amazonaws.emrserverless#RequestIdentityUserArn", + "traits": { + "smithy.api#documentation": "

The user who created the job run.

", + "smithy.api#required": {} + } + }, + "jobCreatedAt": { + "target": "com.amazonaws.emrserverless#Date", + "traits": { + "smithy.api#documentation": "

The date and time of when the job run was created.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.emrserverless#Date", + "traits": { + "smithy.api#documentation": "

The date and time when the job run attempt was created.

", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.emrserverless#Date", + "traits": { + "smithy.api#documentation": "

The date and time of when the job run attempt was last updated.

", + "smithy.api#required": {} + } + }, + "executionRole": { + "target": "com.amazonaws.emrserverless#IAMRoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the execution role of the job run..

", + "smithy.api#required": {} + } + }, + "state": { + "target": "com.amazonaws.emrserverless#JobRunState", + "traits": { + "smithy.api#documentation": "

The state of the job run attempt.

", + "smithy.api#required": {} + } + }, + "stateDetails": { + "target": "com.amazonaws.emrserverless#String256", + "traits": { + "smithy.api#documentation": "

The state details of the job run attempt.

", + "smithy.api#required": {} + } + }, + "releaseLabel": { + "target": "com.amazonaws.emrserverless#ReleaseLabel", + "traits": { + "smithy.api#documentation": "

The Amazon EMR release label of the job run attempt.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.emrserverless#JobRunType", + "traits": { + "smithy.api#documentation": "

The type of the job run, such as Spark or Hive.

" + } + }, + "attempt": { + "target": "com.amazonaws.emrserverless#AttemptNumber", + "traits": { + "smithy.api#documentation": "

The attempt number of the job run execution.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The summary of attributes associated with a job run attempt.

" + } + }, + "com.amazonaws.emrserverless#JobRunAttempts": { + "type": "list", + "member": { + "target": "com.amazonaws.emrserverless#JobRunAttemptSummary" + } + }, "com.amazonaws.emrserverless#JobRunId": { "type": "string", "traits": { @@ -2144,6 +2345,21 @@ "smithy.api#pattern": "^[0-9a-z]+$" } }, + "com.amazonaws.emrserverless#JobRunMode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "BATCH", + "name": "BATCH" + }, + { + "value": "STREAMING", + "name": "STREAMING" + } + ] + } + }, "com.amazonaws.emrserverless#JobRunResource": { "type": "resource", "identifiers": { @@ -2169,6 +2385,9 @@ "operations": [ { "target": "com.amazonaws.emrserverless#GetDashboardForJobRun" + }, + { + "target": "com.amazonaws.emrserverless#ListJobRunAttempts" } ] }, @@ -2249,6 +2468,12 @@ "smithy.api#documentation": "

The optional job run name. This doesn't have to be unique.

" } }, + "mode": { + "target": "com.amazonaws.emrserverless#JobRunMode", + "traits": { + "smithy.api#documentation": "

The mode of the job run.

" + } + }, "arn": { "target": "com.amazonaws.emrserverless#JobArn", "traits": { @@ -2310,6 +2535,24 @@ "traits": { "smithy.api#documentation": "

The type of job run, such as Spark or Hive.

" } + }, + "attempt": { + "target": "com.amazonaws.emrserverless#AttemptNumber", + "traits": { + "smithy.api#documentation": "

The attempt number of the job run execution.

" + } + }, + "attemptCreatedAt": { + "target": "com.amazonaws.emrserverless#Date", + "traits": { + "smithy.api#documentation": "

The date and time of when the job run attempt was created.

" + } + }, + "attemptUpdatedAt": { + "target": "com.amazonaws.emrserverless#Date", + "traits": { + "smithy.api#documentation": "

The date and time of when the job run attempt was last updated.

" + } } }, "traits": { @@ -2405,6 +2648,106 @@ } } }, + "com.amazonaws.emrserverless#ListJobRunAttempts": { + "type": "operation", + "input": { + "target": "com.amazonaws.emrserverless#ListJobRunAttemptsRequest" + }, + "output": { + "target": "com.amazonaws.emrserverless#ListJobRunAttemptsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.emrserverless#InternalServerException" + }, + { + "target": "com.amazonaws.emrserverless#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.emrserverless#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all attempt of a job run.

", + "smithy.api#http": { + "method": "GET", + "uri": "/applications/{applicationId}/jobruns/{jobRunId}/attempts" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "jobRunAttempts" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.emrserverless#ListJobRunAttemptsRequest": { + "type": "structure", + "members": { + "applicationId": { + "target": "com.amazonaws.emrserverless#ApplicationId", + "traits": { + "smithy.api#documentation": "

The ID of the application for which to list job runs.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "applicationId" + } + }, + "jobRunId": { + "target": "com.amazonaws.emrserverless#JobRunId", + "traits": { + "smithy.api#documentation": "

The ID of the job run to list.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "jobRunId" + } + }, + "nextToken": { + "target": "com.amazonaws.emrserverless#NextToken", + "traits": { + "smithy.api#documentation": "

The token for the next set of job run attempt results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

The maximum number of job run attempts to list.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 50 + } + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.emrserverless#ListJobRunAttemptsResponse": { + "type": "structure", + "members": { + "jobRunAttempts": { + "target": "com.amazonaws.emrserverless#JobRunAttempts", + "traits": { + "smithy.api#documentation": "

The array of the listed job run attempt objects.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.emrserverless#NextToken", + "traits": { + "smithy.api#documentation": "

The output displays the token for the next set of application results. \n This is required for pagination and is available as a response of the previous request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.emrserverless#ListJobRuns": { "type": "operation", "input": { @@ -2487,6 +2830,13 @@ "smithy.api#documentation": "

An optional filter for job run states. Note that if this filter contains multiple\n states, the resulting list will be grouped by the state.

", "smithy.api#httpQuery": "states" } + }, + "mode": { + "target": "com.amazonaws.emrserverless#JobRunMode", + "traits": { + "smithy.api#documentation": "

The mode of the job runs to list.

", + "smithy.api#httpQuery": "mode" + } } } }, @@ -2845,6 +3195,30 @@ "smithy.api#documentation": "

The resource utilization for memory, storage, and vCPU for jobs.

" } }, + "com.amazonaws.emrserverless#RetryPolicy": { + "type": "structure", + "members": { + "maxAttempts": { + "target": "com.amazonaws.emrserverless#AttemptNumber", + "traits": { + "smithy.api#documentation": "

Maximum number of attempts for the job run. This parameter is only applicable for BATCH mode.

" + } + }, + "maxFailedAttemptsPerHour": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

Maximum number of failed attempts per hour. This [arameter is only applicable for STREAMING mode.

", + "smithy.api#range": { + "min": 1 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

The retry policy to use for a job run.

" + } + }, "com.amazonaws.emrserverless#S3MonitoringConfiguration": { "type": "structure", "members": { @@ -3094,6 +3468,18 @@ "traits": { "smithy.api#documentation": "

The optional job run name. This doesn't have to be unique.

" } + }, + "mode": { + "target": "com.amazonaws.emrserverless#JobRunMode", + "traits": { + "smithy.api#documentation": "

The mode of the job run when it starts.

" + } + }, + "retryPolicy": { + "target": "com.amazonaws.emrserverless#RetryPolicy", + "traits": { + "smithy.api#documentation": "

The retry policy when job run starts.

" + } } } }, @@ -3482,6 +3868,12 @@ "smithy.api#documentation": "

The key-value pairs that specify worker type to\n WorkerTypeSpecificationInput. This parameter must contain all valid worker\n types for a Spark or Hive application. Valid worker types include Driver and\n Executor for Spark applications and HiveDriver and\n TezTask for Hive applications. You can either set image details in this\n parameter for each worker type, or in imageConfiguration for all worker\n types.

" } }, + "interactiveConfiguration": { + "target": "com.amazonaws.emrserverless#InteractiveConfiguration", + "traits": { + "smithy.api#documentation": "

The interactive configuration object that contains new interactive use cases \n when the application is updated.

" + } + }, "releaseLabel": { "target": "com.amazonaws.emrserverless#ReleaseLabel", "traits": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 18e52890cd..08b29800b8 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -112,11 +112,6 @@ } }, "services" : { - "a4b" : { - "endpoints" : { - "us-east-1" : { } - } - }, "access-analyzer" : { "endpoints" : { "af-south-1" : { }, @@ -136,7 +131,12 @@ "tags" : [ "fips" ] } ] }, - "ca-west-1" : { }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "access-analyzer-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -152,6 +152,13 @@ "deprecated" : true, "hostname" : "access-analyzer-fips.ca-central-1.amazonaws.com" }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "access-analyzer-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -442,6 +449,8 @@ }, "airflow" : { "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -450,12 +459,15 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -532,10 +544,15 @@ "aoss" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -2519,37 +2536,6 @@ "us-west-2" : { } } }, - "backupstorage" : { - "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } - } - }, "batch" : { "defaults" : { "variants" : [ { @@ -2667,6 +2653,12 @@ }, "hostname" : "bedrock.ap-southeast-2.amazonaws.com" }, + "bedrock-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "bedrock.ca-central-1.amazonaws.com" + }, "bedrock-eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -2679,12 +2671,24 @@ }, "hostname" : "bedrock.eu-west-1.amazonaws.com" }, + "bedrock-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "bedrock.eu-west-2.amazonaws.com" + }, "bedrock-eu-west-3" : { "credentialScope" : { "region" : "eu-west-3" }, "hostname" : "bedrock.eu-west-3.amazonaws.com" }, + "bedrock-fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "bedrock-fips.ca-central-1.amazonaws.com" + }, "bedrock-fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -2721,6 +2725,12 @@ }, "hostname" : "bedrock-runtime.ap-southeast-2.amazonaws.com" }, + "bedrock-runtime-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "bedrock-runtime.ca-central-1.amazonaws.com" + }, "bedrock-runtime-eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -2733,12 +2743,24 @@ }, "hostname" : "bedrock-runtime.eu-west-1.amazonaws.com" }, + "bedrock-runtime-eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "bedrock-runtime.eu-west-2.amazonaws.com" + }, "bedrock-runtime-eu-west-3" : { "credentialScope" : { "region" : "eu-west-3" }, "hostname" : "bedrock-runtime.eu-west-3.amazonaws.com" }, + "bedrock-runtime-fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "bedrock-runtime-fips.ca-central-1.amazonaws.com" + }, "bedrock-runtime-fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -2751,6 +2773,12 @@ }, "hostname" : "bedrock-runtime-fips.us-west-2.amazonaws.com" }, + "bedrock-runtime-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "bedrock-runtime.sa-east-1.amazonaws.com" + }, "bedrock-runtime-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -2763,6 +2791,12 @@ }, "hostname" : "bedrock-runtime.us-west-2.amazonaws.com" }, + "bedrock-sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "bedrock.sa-east-1.amazonaws.com" + }, "bedrock-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -2775,9 +2809,12 @@ }, "hostname" : "bedrock.us-west-2.amazonaws.com" }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-west-2" : { } } @@ -2796,6 +2833,7 @@ }, "braket" : { "endpoints" : { + "eu-north-1" : { }, "eu-west-2" : { }, "us-east-1" : { }, "us-west-1" : { }, @@ -2816,6 +2854,8 @@ }, "cases" : { "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -2935,28 +2975,116 @@ }, "cloud9" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "il-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "af-south-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "il-central-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + } } }, "cloudcontrolapi" : { @@ -4434,6 +4562,19 @@ "deprecated" : true, "hostname" : "controltower-fips.ca-central-1.amazonaws.com" }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "controltower-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-west-1-fips" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "controltower-fips.ca-west-1.amazonaws.com" + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -5220,8 +5361,18 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "directconnect-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "directconnect-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -5230,6 +5381,20 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "directconnect-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "directconnect-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -7858,6 +8023,12 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "fms-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { "variants" : [ { "hostname" : "fms-fips.eu-central-1.amazonaws.com", @@ -7947,6 +8118,13 @@ "deprecated" : true, "hostname" : "fms-fips.ca-central-1.amazonaws.com" }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "fms-fips.ca-west-1.amazonaws.com" + }, "fips-eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -8906,11 +9084,6 @@ "us-west-2" : { } } }, - "honeycode" : { - "endpoints" : { - "us-west-2" : { } - } - }, "iam" : { "endpoints" : { "aws-global" : { @@ -8983,6 +9156,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -10138,9 +10312,21 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "kendra-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-west-1" : { }, "eu-west-2" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "kendra-fips.ca-central-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -11288,6 +11474,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -12089,6 +12276,7 @@ }, "meetings-chime" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, @@ -12518,6 +12706,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -13962,14 +14151,19 @@ }, "quicksight" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "api" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, + "eu-south-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -14582,7 +14776,9 @@ } ] }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -14621,6 +14817,8 @@ "deprecated" : true, "hostname" : "redshift-serverless-fips.us-west-2.amazonaws.com" }, + "me-central-1" : { }, + "sa-east-1" : { }, "us-east-1" : { "variants" : [ { "hostname" : "redshift-serverless-fips.us-east-1.amazonaws.com", @@ -15753,6 +15951,31 @@ "hostname" : "s3-control-fips.ca-central-1.amazonaws.com", "signatureVersions" : [ "s3v4" ] }, + "ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "hostname" : "s3-control.ca-west-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ], + "variants" : [ { + "hostname" : "s3-control-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "s3-control-fips.dualstack.ca-west-1.amazonaws.com", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "s3-control.dualstack.ca-west-1.amazonaws.com", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1-fips" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "s3-control-fips.ca-west-1.amazonaws.com", + "signatureVersions" : [ "s3v4" ] + }, "eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -18440,6 +18663,19 @@ "deprecated" : true, "hostname" : "storagegateway-fips.ca-central-1.amazonaws.com" }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "storagegateway-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "ca-west-1-fips" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "storagegateway-fips.ca-west-1.amazonaws.com" + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -19150,7 +19386,12 @@ "tags" : [ "fips" ] } ] }, - "ca-west-1" : { }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "transfer-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -19166,6 +19407,13 @@ "deprecated" : true, "hostname" : "transfer-fips.ca-central-1.amazonaws.com" }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "transfer-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -19489,6 +19737,8 @@ "vpc-lattice" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -19496,9 +19746,11 @@ "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -20960,12 +21212,6 @@ "cn-northwest-1" : { } } }, - "backupstorage" : { - "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } - } - }, "batch" : { "endpoints" : { "cn-north-1" : { }, @@ -21814,7 +22060,8 @@ }, "redshift-serverless" : { "endpoints" : { - "cn-north-1" : { } + "cn-north-1" : { }, + "cn-northwest-1" : { } } }, "resource-groups" : { @@ -22859,12 +23106,6 @@ "us-gov-west-1" : { } } }, - "backupstorage" : { - "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } - } - }, "batch" : { "defaults" : { "variants" : [ { @@ -22903,6 +23144,18 @@ }, "bedrock" : { "endpoints" : { + "bedrock-fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "bedrock-fips.us-gov-west-1.amazonaws.com" + }, + "bedrock-runtime-fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "bedrock-runtime-fips.us-gov-west-1.amazonaws.com" + }, "bedrock-runtime-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" @@ -24099,14 +24352,62 @@ }, "emr-containers" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "emr-containers.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "emr-containers.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "emr-containers.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "emr-containers.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "emr-serverless" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "emr-serverless.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "emr-serverless.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "emr-serverless.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "emr-serverless.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "es" : { @@ -24958,6 +25259,44 @@ "us-gov-west-1" : { } } }, + "kinesisvideo" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "kinesisvideo-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "kinesisvideo-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "variants" : [ { + "hostname" : "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + "variants" : [ { + "hostname" : "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, "kms" : { "endpoints" : { "ProdFips" : { @@ -26181,6 +26520,36 @@ } } }, + "securitylake" : { + "endpoints" : { + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "securitylake.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-east-1-fips" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "securitylake.us-gov-east-1.amazonaws.com" + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "securitylake.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1-fips" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "securitylake.us-gov-west-1.amazonaws.com" + } + } + }, "serverlessrepo" : { "defaults" : { "protocols" : [ "https" ] @@ -27718,32 +28087,8 @@ }, "ram" : { "endpoints" : { - "fips-us-iso-east-1" : { - "credentialScope" : { - "region" : "us-iso-east-1" - }, - "deprecated" : true, - "hostname" : "ram-fips.us-iso-east-1.c2s.ic.gov" - }, - "fips-us-iso-west-1" : { - "credentialScope" : { - "region" : "us-iso-west-1" - }, - "deprecated" : true, - "hostname" : "ram-fips.us-iso-west-1.c2s.ic.gov" - }, - "us-iso-east-1" : { - "variants" : [ { - "hostname" : "ram-fips.us-iso-east-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - }, - "us-iso-west-1" : { - "variants" : [ { - "hostname" : "ram-fips.us-iso-west-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "rbin" : { @@ -27778,43 +28123,23 @@ }, "rds" : { "endpoints" : { - "rds-fips.us-iso-east-1" : { - "credentialScope" : { - "region" : "us-iso-east-1" - }, - "deprecated" : true, - "hostname" : "rds-fips.us-iso-east-1.c2s.ic.gov" - }, - "rds-fips.us-iso-west-1" : { - "credentialScope" : { - "region" : "us-iso-west-1" - }, - "deprecated" : true, - "hostname" : "rds-fips.us-iso-west-1.c2s.ic.gov" - }, "rds.us-iso-east-1" : { "credentialScope" : { "region" : "us-iso-east-1" }, "deprecated" : true, - "variants" : [ { - "hostname" : "rds-fips.us-iso-east-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "rds.us-iso-east-1.c2s.ic.gov" }, "rds.us-iso-west-1" : { "credentialScope" : { "region" : "us-iso-west-1" }, "deprecated" : true, - "variants" : [ { - "hostname" : "rds-fips.us-iso-west-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "rds.us-iso-west-1.c2s.ic.gov" }, "us-iso-east-1" : { "variants" : [ { - "hostname" : "rds-fips.us-iso-east-1.c2s.ic.gov", + "hostname" : "rds.us-iso-east-1.c2s.ic.gov", "tags" : [ "fips" ] } ] }, @@ -27823,11 +28148,11 @@ "region" : "us-iso-east-1" }, "deprecated" : true, - "hostname" : "rds-fips.us-iso-east-1.c2s.ic.gov" + "hostname" : "rds.us-iso-east-1.c2s.ic.gov" }, "us-iso-west-1" : { "variants" : [ { - "hostname" : "rds-fips.us-iso-west-1.c2s.ic.gov", + "hostname" : "rds.us-iso-west-1.c2s.ic.gov", "tags" : [ "fips" ] } ] }, @@ -27836,7 +28161,7 @@ "region" : "us-iso-west-1" }, "deprecated" : true, - "hostname" : "rds-fips.us-iso-west-1.c2s.ic.gov" + "hostname" : "rds.us-iso-west-1.c2s.ic.gov" } } }, @@ -28484,19 +28809,7 @@ }, "ram" : { "endpoints" : { - "fips-us-isob-east-1" : { - "credentialScope" : { - "region" : "us-isob-east-1" - }, - "deprecated" : true, - "hostname" : "ram-fips.us-isob-east-1.sc2s.sgov.gov" - }, - "us-isob-east-1" : { - "variants" : [ { - "hostname" : "ram-fips.us-isob-east-1.sc2s.sgov.gov", - "tags" : [ "fips" ] - } ] - } + "us-isob-east-1" : { } } }, "rbin" : { @@ -28518,26 +28831,16 @@ }, "rds" : { "endpoints" : { - "rds-fips.us-isob-east-1" : { - "credentialScope" : { - "region" : "us-isob-east-1" - }, - "deprecated" : true, - "hostname" : "rds-fips.us-isob-east-1.sc2s.sgov.gov" - }, "rds.us-isob-east-1" : { "credentialScope" : { "region" : "us-isob-east-1" }, "deprecated" : true, - "variants" : [ { - "hostname" : "rds-fips.us-isob-east-1.sc2s.sgov.gov", - "tags" : [ "fips" ] - } ] + "hostname" : "rds.us-isob-east-1.sc2s.sgov.gov" }, "us-isob-east-1" : { "variants" : [ { - "hostname" : "rds-fips.us-isob-east-1.sc2s.sgov.gov", + "hostname" : "rds.us-isob-east-1.sc2s.sgov.gov", "tags" : [ "fips" ] } ] }, @@ -28546,7 +28849,7 @@ "region" : "us-isob-east-1" }, "deprecated" : true, - "hostname" : "rds-fips.us-isob-east-1.sc2s.sgov.gov" + "hostname" : "rds.us-isob-east-1.sc2s.sgov.gov" } } }, @@ -28787,7 +29090,11 @@ "partition" : "aws-iso-e", "partitionName" : "AWS ISOE (Europe)", "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", - "regions" : { }, + "regions" : { + "eu-isoe-west-1" : { + "description" : "EU ISOE West" + } + }, "services" : { } }, { "defaults" : { diff --git a/models/eventbridge.json b/models/eventbridge.json index a351bb8475..9756587b89 100644 --- a/models/eventbridge.json +++ b/models/eventbridge.json @@ -200,6 +200,9 @@ }, { "target": "com.amazonaws.eventbridge#UpdateEndpoint" + }, + { + "target": "com.amazonaws.eventbridge#UpdateEventBus" } ], "traits": { @@ -207,14 +210,14 @@ "sdkId": "EventBridge", "arnNamespace": "events", "cloudFormationName": "Events", - "cloudTrailEventSource": "eventbridge.amazonaws.com", + "cloudTrailEventSource": "events.amazonaws.com", "endpointPrefix": "events" }, "aws.auth#sigv4": { "name": "events" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

Amazon EventBridge helps you to respond to state changes in your Amazon Web Services resources. When your\n resources change state, they automatically send events to an event stream. You can create\n rules that match selected events in the stream and route them to targets to take action. You\n can also use rules to take action on a predetermined schedule. For example, you can configure\n rules to:

\n
    \n
  • \n

    Automatically invoke an Lambda function to update DNS entries when an event\n notifies you that Amazon EC2 instance enters the running state.

    \n
  • \n
  • \n

    Direct specific API records from CloudTrail to an Amazon Kinesis data stream for\n detailed analysis of potential security or availability risks.

    \n
  • \n
  • \n

    Periodically invoke a built-in target to create a snapshot of an Amazon EBS\n volume.

    \n
  • \n
\n

For more information about the features of Amazon EventBridge, see the Amazon EventBridge User\n Guide.

", + "smithy.api#documentation": "

Amazon EventBridge helps you to respond to state changes in your Amazon Web Services\n resources. When your resources change state, they automatically send events to an event\n stream. You can create rules that match selected events in the stream and route them to\n targets to take action. You can also use rules to take action on a predetermined schedule. For\n example, you can configure rules to:

\n
    \n
  • \n

    Automatically invoke an Lambda function to update DNS entries when an\n event notifies you that Amazon EC2 instance enters the running state.

    \n
  • \n
  • \n

    Direct specific API records from CloudTrail to an Amazon Kinesis\n data stream for detailed analysis of potential security or availability risks.

    \n
  • \n
  • \n

    Periodically invoke a built-in target to create a snapshot of an Amazon EBS\n volume.

    \n
  • \n
\n

For more information about the features of Amazon EventBridge, see the Amazon EventBridge User\n Guide.

", "smithy.api#title": "Amazon EventBridge", "smithy.api#xmlNamespace": { "uri": "http://events.amazonaws.com/doc/2015-10-07" @@ -1835,12 +1838,12 @@ "GraphQLOperation": { "target": "com.amazonaws.eventbridge#GraphQLOperation", "traits": { - "smithy.api#documentation": "

The GraphQL operation; that is, the query, mutation, or subscription to be parsed and executed by the GraphQL service.

\n

For more information, see Operations in the AppSync User Guide.

" + "smithy.api#documentation": "

The GraphQL operation; that is, the query, mutation, or subscription to be parsed and\n executed by the GraphQL service.

\n

For more information, see Operations in the AppSync User Guide.

" } } }, "traits": { - "smithy.api#documentation": "

Contains the GraphQL operation to be parsed and executed, if the event target is an AppSync API.

" + "smithy.api#documentation": "

Contains the GraphQL operation to be parsed and executed, if the event target is an\n AppSync API.

" } }, "com.amazonaws.eventbridge#Archive": { @@ -2083,14 +2086,14 @@ "JobDefinition": { "target": "com.amazonaws.eventbridge#String", "traits": { - "smithy.api#documentation": "

The ARN or name of the job definition to use if the event target is an Batch job. This\n job definition must already exist.

", + "smithy.api#documentation": "

The ARN or name of the job definition to use if the event target is an Batch job. This job definition must already exist.

", "smithy.api#required": {} } }, "JobName": { "target": "com.amazonaws.eventbridge#String", "traits": { - "smithy.api#documentation": "

The name to use for this execution of the job, if the target is an Batch job.

", + "smithy.api#documentation": "

The name to use for this execution of the job, if the target is an Batch\n job.

", "smithy.api#required": {} } }, @@ -2103,7 +2106,7 @@ "RetryStrategy": { "target": "com.amazonaws.eventbridge#BatchRetryStrategy", "traits": { - "smithy.api#documentation": "

The retry strategy to use for failed jobs, if the target is an Batch job. The retry\n strategy is the number of times to retry the failed job execution. Valid values are 1–10. When\n you specify a retry strategy here, it overrides the retry strategy defined in the job\n definition.

" + "smithy.api#documentation": "

The retry strategy to use for failed jobs, if the target is an Batch job.\n The retry strategy is the number of times to retry the failed job execution. Valid values are\n 1–10. When you specify a retry strategy here, it overrides the retry strategy defined in the\n job definition.

" } } }, @@ -2123,7 +2126,7 @@ } }, "traits": { - "smithy.api#documentation": "

The retry strategy to use for failed jobs, if the target is an Batch job. If you\n specify a retry strategy here, it overrides the retry strategy defined in the job\n definition.

" + "smithy.api#documentation": "

The retry strategy to use for failed jobs, if the target is an Batch job.\n If you specify a retry strategy here, it overrides the retry strategy defined in the job\n definition.

" } }, "com.amazonaws.eventbridge#Boolean": { @@ -2307,7 +2310,7 @@ } }, "traits": { - "smithy.api#documentation": "

A JSON string which you can use to limit the event bus permissions you are granting to\n only accounts that fulfill the condition. Currently, the only supported condition is\n membership in a certain Amazon Web Services organization. The string must contain Type,\n Key, and Value fields. The Value field specifies the\n ID of the Amazon Web Services organization. Following is an example value for Condition:

\n

\n '{\"Type\" : \"StringEquals\", \"Key\": \"aws:PrincipalOrgID\", \"Value\":\n \"o-1234567890\"}'\n

" + "smithy.api#documentation": "

A JSON string which you can use to limit the event bus permissions you are granting to\n only accounts that fulfill the condition. Currently, the only supported condition is\n membership in a certain Amazon Web Services organization. The string must contain\n Type, Key, and Value fields. The Value\n field specifies the ID of the Amazon Web Services organization. Following is an example value\n for Condition:

\n

\n '{\"Type\" : \"StringEquals\", \"Key\": \"aws:PrincipalOrgID\", \"Value\":\n \"o-1234567890\"}'\n

" } }, "com.amazonaws.eventbridge#Connection": { @@ -2777,7 +2780,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an API destination, which is an HTTP invocation endpoint configured as a target\n for events.

\n

API destinations do not support private destinations, such as interface VPC endpoints.

\n

For more information, see API destinations in the EventBridge User Guide.

" + "smithy.api#documentation": "

Creates an API destination, which is an HTTP invocation endpoint configured as a target\n for events.

\n

API destinations do not support private destinations, such as interface VPC\n endpoints.

\n

For more information, see API destinations in the\n EventBridge User Guide.

" } }, "com.amazonaws.eventbridge#CreateApiDestinationRequest": { @@ -2889,7 +2892,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an archive of events with the specified settings. When you create an archive,\n incoming events might not immediately start being sent to the archive. Allow a short period of\n time for changes to take effect. If you do not specify a pattern to filter events sent to the\n archive, all events are sent to the archive except replayed events. Replayed events are not\n sent to an archive.

" + "smithy.api#documentation": "

Creates an archive of events with the specified settings. When you create an archive,\n incoming events might not immediately start being sent to the archive. Allow a short period of\n time for changes to take effect. If you do not specify a pattern to filter events sent to the\n archive, all events are sent to the archive except replayed events. Replayed events are not\n sent to an archive.

\n \n

Archives and schema discovery are not supported for event buses encrypted using a\n customer managed key. EventBridge returns an error if:

\n
    \n
  • \n

    You call \n CreateArchive\n on an event bus set to use a customer managed key for encryption.

    \n
  • \n
  • \n

    You call \n CreateDiscoverer\n on an event bus set to use a customer managed key for encryption.

    \n
  • \n
  • \n

    You call \n UpdatedEventBus\n to set a customer managed key on an event bus with an archives or schema discovery enabled.

    \n
  • \n
\n

To enable archives or schema discovery on an event bus, choose to\n use an Amazon Web Services owned key. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide.

\n
" } }, "com.amazonaws.eventbridge#CreateArchiveRequest": { @@ -3207,7 +3210,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a global endpoint. Global endpoints improve your application's availability by making it regional-fault tolerant. To do this, you define a primary and secondary Region \n with event buses in each Region. You also create a Amazon Route 53 health check that will tell EventBridge to route events to the secondary Region when an \"unhealthy\" state \n is encountered and events will be routed back to the primary Region when the health check reports a \"healthy\" state.

" + "smithy.api#documentation": "

Creates a global endpoint. Global endpoints improve your application's availability by\n making it regional-fault tolerant. To do this, you define a primary and secondary Region with\n event buses in each Region. You also create a Amazon Route 53 health check that will\n tell EventBridge to route events to the secondary Region when an \"unhealthy\" state is\n encountered and events will be routed back to the primary Region when the health check reports\n a \"healthy\" state.

" } }, "com.amazonaws.eventbridge#CreateEndpointRequest": { @@ -3216,7 +3219,7 @@ "Name": { "target": "com.amazonaws.eventbridge#EndpointName", "traits": { - "smithy.api#documentation": "

The name of the global endpoint. For example, \"Name\":\"us-east-2-custom_bus_A-endpoint\".

", + "smithy.api#documentation": "

The name of the global endpoint. For example,\n \"Name\":\"us-east-2-custom_bus_A-endpoint\".

", "smithy.api#required": {} } }, @@ -3236,7 +3239,7 @@ "ReplicationConfig": { "target": "com.amazonaws.eventbridge#ReplicationConfig", "traits": { - "smithy.api#documentation": "

Enable or disable event replication. The default state is ENABLED which means you must supply a RoleArn. If you don't have a \n RoleArn or you don't want event replication enabled, set the state to DISABLED.

" + "smithy.api#documentation": "

Enable or disable event replication. The default state is ENABLED which means\n you must supply a RoleArn. If you don't have a RoleArn or you don't\n want event replication enabled, set the state to DISABLED.

" } }, "EventBuses": { @@ -3348,7 +3351,7 @@ "Name": { "target": "com.amazonaws.eventbridge#EventBusName", "traits": { - "smithy.api#documentation": "

The name of the new event bus.

\n

Custom event bus names can't contain the / character, but you can use the / character in partner event bus names. In addition, for partner event buses, the name must exactly match the name of the partner event\n source that this event bus is matched to.

\n

You can't use the name default for a custom event bus, as this name is already used for your account's\n default event bus.

", + "smithy.api#documentation": "

The name of the new event bus.

\n

Custom event bus names can't contain the / character, but you can use the\n / character in partner event bus names. In addition, for partner event buses,\n the name must exactly match the name of the partner event source that this event bus is\n matched to.

\n

You can't use the name default for a custom event bus, as this name is\n already used for your account's default event bus.

", "smithy.api#required": {} } }, @@ -3358,6 +3361,21 @@ "smithy.api#documentation": "

If you are creating a partner event bus, this specifies the partner event source that the\n new event bus will be matched with.

" } }, + "Description": { + "target": "com.amazonaws.eventbridge#EventBusDescription", + "traits": { + "smithy.api#documentation": "

The event bus description.

" + } + }, + "KmsKeyIdentifier": { + "target": "com.amazonaws.eventbridge#KmsKeyIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the KMS\n customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key \n Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN.

\n

If you do not specify a customer managed key identifier, EventBridge uses an\n Amazon Web Services owned key to encrypt events on the event bus.

\n

For more information, see Managing keys in the Key Management Service\n Developer Guide.

\n \n

Archives and schema discovery are not supported for event buses encrypted using a\n customer managed key. EventBridge returns an error if:

\n
    \n
  • \n

    You call \n CreateArchive\n on an event bus set to use a customer managed key for encryption.

    \n
  • \n
  • \n

    You call \n CreateDiscoverer\n on an event bus set to use a customer managed key for encryption.

    \n
  • \n
  • \n

    You call \n UpdatedEventBus\n to set a customer managed key on an event bus with an archives or schema discovery enabled.

    \n
  • \n
\n

To enable archives or schema discovery on an event bus, choose to\n use an Amazon Web Services owned key. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide.

\n
" + } + }, + "DeadLetterConfig": { + "target": "com.amazonaws.eventbridge#DeadLetterConfig" + }, "Tags": { "target": "com.amazonaws.eventbridge#TagList", "traits": { @@ -3377,6 +3395,21 @@ "traits": { "smithy.api#documentation": "

The ARN of the new event bus.

" } + }, + "Description": { + "target": "com.amazonaws.eventbridge#EventBusDescription", + "traits": { + "smithy.api#documentation": "

The event bus description.

" + } + }, + "KmsKeyIdentifier": { + "target": "com.amazonaws.eventbridge#KmsKeyIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the KMS\n customer managed key for EventBridge to use to encrypt events on this event bus, if one has been specified.

\n

For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide.

" + } + }, + "DeadLetterConfig": { + "target": "com.amazonaws.eventbridge#DeadLetterConfig" } }, "traits": { @@ -3409,7 +3442,7 @@ } ], "traits": { - "smithy.api#documentation": "

Called by an SaaS partner to create a partner event source. This operation is not used by\n Amazon Web Services customers.

\n

Each partner event source can be used by one Amazon Web Services account to create a matching partner\n event bus in that Amazon Web Services account. A SaaS partner must create one partner event source for each\n Amazon Web Services account that wants to receive those event types.

\n

A partner event source creates events based on resources within the SaaS partner's service\n or application.

\n

An Amazon Web Services account that creates a partner event bus that matches the partner event source can\n use that event bus to receive events from the partner, and then process them using Amazon Web Services Events\n rules and targets.

\n

Partner event source names follow this format:

\n

\n \n partner_name/event_namespace/event_name\n \n

\n
    \n
  • \n

    \n partner_name is determined during partner registration, and\n identifies the partner to Amazon Web Services customers.

    \n
  • \n
  • \n

    \n event_namespace is determined by the partner, and is a way for\n the partner to categorize their events.

    \n
  • \n
  • \n

    \n event_name is determined by the partner, and should uniquely identify\n an event-generating resource within the partner system.

    \n

    The event_name must be unique across all Amazon Web Services customers. This is because the event source is a shared resource\n between the partner and customer accounts, and each partner event source unique in the partner account.

    \n
  • \n
\n

The combination of\n event_namespace and event_name should help Amazon Web Services\n customers decide whether to create an event bus to receive these events.

" + "smithy.api#documentation": "

Called by an SaaS partner to create a partner event source. This operation is not used by\n Amazon Web Services customers.

\n

Each partner event source can be used by one Amazon Web Services account to create a\n matching partner event bus in that Amazon Web Services account. A SaaS partner must create one\n partner event source for each Amazon Web Services account that wants to receive those event\n types.

\n

A partner event source creates events based on resources within the SaaS partner's service\n or application.

\n

An Amazon Web Services account that creates a partner event bus that matches the partner\n event source can use that event bus to receive events from the partner, and then process them\n using Amazon Web Services Events rules and targets.

\n

Partner event source names follow this format:

\n

\n \n partner_name/event_namespace/event_name\n \n

\n
    \n
  • \n

    \n partner_name is determined during partner registration, and\n identifies the partner to Amazon Web Services customers.

    \n
  • \n
  • \n

    \n event_namespace is determined by the partner, and is a way for\n the partner to categorize their events.

    \n
  • \n
  • \n

    \n event_name is determined by the partner, and should uniquely\n identify an event-generating resource within the partner system.

    \n

    The event_name must be unique across all Amazon Web Services\n customers. This is because the event source is a shared resource between the partner and\n customer accounts, and each partner event source unique in the partner account.

    \n
  • \n
\n

The combination of event_namespace and\n event_name should help Amazon Web Services customers decide whether to\n create an event bus to receive these events.

" } }, "com.amazonaws.eventbridge#CreatePartnerEventSourceRequest": { @@ -3418,14 +3451,14 @@ "Name": { "target": "com.amazonaws.eventbridge#EventSourceName", "traits": { - "smithy.api#documentation": "

The name of the partner event source. This name must be unique and must be in the format\n \n partner_name/event_namespace/event_name\n .\n The Amazon Web Services account that wants to use this partner event source must create a partner event bus\n with a name that matches the name of the partner event source.

", + "smithy.api#documentation": "

The name of the partner event source. This name must be unique and must be in the format\n \n partner_name/event_namespace/event_name\n .\n The Amazon Web Services account that wants to use this partner event source must create a\n partner event bus with a name that matches the name of the partner event source.

", "smithy.api#required": {} } }, "Account": { "target": "com.amazonaws.eventbridge#AccountId", "traits": { - "smithy.api#documentation": "

The Amazon Web Services account ID that is permitted to create a matching partner event bus for this\n partner event source.

", + "smithy.api#documentation": "

The Amazon Web Services account ID that is permitted to create a matching partner event bus\n for this partner event source.

", "smithy.api#required": {} } } @@ -3532,7 +3565,7 @@ } }, "traits": { - "smithy.api#documentation": "

A DeadLetterConfig object that contains information about a dead-letter queue\n configuration.

" + "smithy.api#documentation": "

Configuration details of the Amazon SQS queue for EventBridge to use as a\n dead-letter queue (DLQ).

\n

For more information, see Event\n retry policy and using dead-letter queues in the EventBridge User\n Guide.

" } }, "com.amazonaws.eventbridge#DeauthorizeConnection": { @@ -3797,7 +3830,7 @@ } ], "traits": { - "smithy.api#documentation": "

Delete an existing global endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

Delete an existing global endpoint. For more information about global endpoints, see\n Making applications Regional-fault tolerant with global endpoints and event\n replication in the \n Amazon EventBridge User Guide\n .

" } }, "com.amazonaws.eventbridge#DeleteEndpointRequest": { @@ -3806,7 +3839,7 @@ "Name": { "target": "com.amazonaws.eventbridge#EndpointName", "traits": { - "smithy.api#documentation": "

The name of the endpoint you want to delete. For example, \"Name\":\"us-east-2-custom_bus_A-endpoint\"..

", + "smithy.api#documentation": "

The name of the endpoint you want to delete. For example,\n \"Name\":\"us-east-2-custom_bus_A-endpoint\"..

", "smithy.api#required": {} } } @@ -3877,7 +3910,7 @@ } ], "traits": { - "smithy.api#documentation": "

This operation is used by SaaS partners to delete a partner event source. This operation\n is not used by Amazon Web Services customers.

\n

When you delete an event source, the status of the corresponding partner event bus in the\n Amazon Web Services customer account becomes DELETED.

\n

" + "smithy.api#documentation": "

This operation is used by SaaS partners to delete a partner event source. This operation\n is not used by Amazon Web Services customers.

\n

When you delete an event source, the status of the corresponding partner event bus in the\n Amazon Web Services customer account becomes DELETED.

\n

" } }, "com.amazonaws.eventbridge#DeletePartnerEventSourceRequest": { @@ -3893,7 +3926,7 @@ "Account": { "target": "com.amazonaws.eventbridge#AccountId", "traits": { - "smithy.api#documentation": "

The Amazon Web Services account ID of the Amazon Web Services customer that the event source was created for.

", + "smithy.api#documentation": "

The Amazon Web Services account ID of the Amazon Web Services customer that the event source\n was created for.

", "smithy.api#required": {} } } @@ -3925,7 +3958,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified rule.

\n

Before you can delete the rule, you must remove all targets, using RemoveTargets.

\n

When you delete a rule, incoming events might continue to match to the deleted rule. Allow\n a short period of time for changes to take effect.

\n

If you call delete rule multiple times for the same rule, all calls will succeed. When you\n call delete rule for a non-existent custom eventbus, ResourceNotFoundException is\n returned.

\n

Managed rules are rules created and managed by another Amazon Web Services service on your behalf. These\n rules are created by those other Amazon Web Services services to support functionality in those services. You\n can delete these rules using the Force option, but you should do so only if you\n are sure the other service is not still using that rule.

" + "smithy.api#documentation": "

Deletes the specified rule.

\n

Before you can delete the rule, you must remove all targets, using RemoveTargets.

\n

When you delete a rule, incoming events might continue to match to the deleted rule. Allow\n a short period of time for changes to take effect.

\n

If you call delete rule multiple times for the same rule, all calls will succeed. When you\n call delete rule for a non-existent custom eventbus, ResourceNotFoundException is\n returned.

\n

Managed rules are rules created and managed by another Amazon Web Services service on your\n behalf. These rules are created by those other Amazon Web Services services to support\n functionality in those services. You can delete these rules using the Force\n option, but you should do so only if you are sure the other service is not still using that\n rule.

" } }, "com.amazonaws.eventbridge#DeleteRuleRequest": { @@ -3948,7 +3981,7 @@ "target": "com.amazonaws.eventbridge#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

If this is a managed rule, created by an Amazon Web Services service on your behalf, you must specify\n Force as True to delete the rule. This parameter is ignored for\n rules that are not managed rules. You can check whether a rule is a managed rule by using\n DescribeRule or ListRules and checking the ManagedBy\n field of the response.

" + "smithy.api#documentation": "

If this is a managed rule, created by an Amazon Web Services service on your behalf, you\n must specify Force as True to delete the rule. This parameter is\n ignored for rules that are not managed rules. You can check whether a rule is a managed rule\n by using DescribeRule or ListRules and checking the\n ManagedBy field of the response.

" } } }, @@ -4299,7 +4332,7 @@ } ], "traits": { - "smithy.api#documentation": "

Get the information about an existing global endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

Get the information about an existing global endpoint. For more information about global\n endpoints, see Making applications\n Regional-fault tolerant with global endpoints and event replication in the\n \n Amazon EventBridge User Guide\n .

" } }, "com.amazonaws.eventbridge#DescribeEndpointRequest": { @@ -4308,14 +4341,14 @@ "Name": { "target": "com.amazonaws.eventbridge#EndpointName", "traits": { - "smithy.api#documentation": "

The name of the endpoint you want to get information about. For example, \"Name\":\"us-east-2-custom_bus_A-endpoint\".

", + "smithy.api#documentation": "

The name of the endpoint you want to get information about. For example,\n \"Name\":\"us-east-2-custom_bus_A-endpoint\".

", "smithy.api#required": {} } }, "HomeRegion": { "target": "com.amazonaws.eventbridge#HomeRegion", "traits": { - "smithy.api#documentation": "

The primary Region of the endpoint you want to get information about. For example \"HomeRegion\": \"us-east-1\".

" + "smithy.api#documentation": "

The primary Region of the endpoint you want to get information about. For example\n \"HomeRegion\": \"us-east-1\".

" } } }, @@ -4353,7 +4386,7 @@ "ReplicationConfig": { "target": "com.amazonaws.eventbridge#ReplicationConfig", "traits": { - "smithy.api#documentation": "

Whether replication is enabled or disabled for the endpoint you asked for information about.

" + "smithy.api#documentation": "

Whether replication is enabled or disabled for the endpoint you asked for information\n about.

" } }, "EventBuses": { @@ -4426,7 +4459,7 @@ } ], "traits": { - "smithy.api#documentation": "

Displays details about an event bus in your account. This can include the external Amazon Web Services\n accounts that are permitted to write events to your default event bus, and the associated\n policy. For custom event buses and partner event buses, it displays the name, ARN, policy,\n state, and creation time.

\n

To enable your account to receive events from other accounts on its default event bus,\n use PutPermission.

\n

For more information about partner event buses, see CreateEventBus.

" + "smithy.api#documentation": "

Displays details about an event bus in your account. This can include the external Amazon Web Services accounts that are permitted to write events to your default event bus, and the\n associated policy. For custom event buses and partner event buses, it displays the name, ARN,\n policy, state, and creation time.

\n

To enable your account to receive events from other accounts on its default event bus,\n use PutPermission.

\n

For more information about partner event buses, see CreateEventBus.

" } }, "com.amazonaws.eventbridge#DescribeEventBusRequest": { @@ -4455,14 +4488,41 @@ "Arn": { "target": "com.amazonaws.eventbridge#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the account permitted to write events to the current\n account.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the account permitted to write events to the current account.

" + } + }, + "Description": { + "target": "com.amazonaws.eventbridge#EventBusDescription", + "traits": { + "smithy.api#documentation": "

The event bus description.

" + } + }, + "KmsKeyIdentifier": { + "target": "com.amazonaws.eventbridge#KmsKeyIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the KMS\n customer managed key for EventBridge to use to encrypt events on this event bus, if one has been specified.

\n

For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide.

" } }, + "DeadLetterConfig": { + "target": "com.amazonaws.eventbridge#DeadLetterConfig" + }, "Policy": { "target": "com.amazonaws.eventbridge#String", "traits": { "smithy.api#documentation": "

The policy that enables the external account to send events to your account.

" } + }, + "CreationTime": { + "target": "com.amazonaws.eventbridge#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the event bus was created.

" + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.eventbridge#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the event bus was last modified.

" + } } }, "traits": { @@ -4571,7 +4631,7 @@ } ], "traits": { - "smithy.api#documentation": "

An SaaS partner can use this operation to list details about a partner event source that\n they have created. Amazon Web Services customers do not use this operation. Instead, Amazon Web Services customers can use DescribeEventSource\n to see details about a partner event source that is\n shared with them.

" + "smithy.api#documentation": "

An SaaS partner can use this operation to list details about a partner event source that\n they have created. Amazon Web Services customers do not use this operation. Instead, Amazon Web Services customers can use DescribeEventSource to see details about a partner event source that is shared with\n them.

" } }, "com.amazonaws.eventbridge#DescribePartnerEventSourceRequest": { @@ -4783,7 +4843,7 @@ "EventPattern": { "target": "com.amazonaws.eventbridge#EventPattern", "traits": { - "smithy.api#documentation": "

The event pattern. For more information, see Events and Event\n Patterns in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

The event pattern. For more information, see Events and Event\n Patterns in the \n Amazon EventBridge User Guide\n .

" } }, "ScheduleExpression": { @@ -4813,7 +4873,7 @@ "ManagedBy": { "target": "com.amazonaws.eventbridge#ManagedBy", "traits": { - "smithy.api#documentation": "

If this is a managed rule, created by an Amazon Web Services service on your behalf, this field displays\n the principal name of the Amazon Web Services service that created the rule.

" + "smithy.api#documentation": "

If this is a managed rule, created by an Amazon Web Services service on your behalf, this\n field displays the principal name of the Amazon Web Services service that created the\n rule.

" } }, "EventBusName": { @@ -4899,19 +4959,19 @@ "LaunchType": { "target": "com.amazonaws.eventbridge#LaunchType", "traits": { - "smithy.api#documentation": "

Specifies the launch type on which your task is running. The launch type that you specify\n here must match one of the launch type (compatibilities) of the target task. The\n FARGATE value is supported only in the Regions where Fargate with Amazon ECS\n is supported. For more information, see Fargate on Amazon ECS in\n the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Specifies the launch type on which your task is running. The launch type that you specify\n here must match one of the launch type (compatibilities) of the target task. The\n FARGATE value is supported only in the Regions where Fargate\n with Amazon ECS is supported. For more information, see Fargate on Amazon ECS in the Amazon Elastic Container Service Developer\n Guide.

" } }, "NetworkConfiguration": { "target": "com.amazonaws.eventbridge#NetworkConfiguration", "traits": { - "smithy.api#documentation": "

Use this structure if the Amazon ECS task uses the awsvpc network mode. This\n structure specifies the VPC subnets and security groups associated with the task, and whether\n a public IP address is to be used. This structure is required if LaunchType is\n FARGATE because the awsvpc mode is required for Fargate\n tasks.

\n

If you specify NetworkConfiguration when the target ECS task does not use the\n awsvpc network mode, the task fails.

" + "smithy.api#documentation": "

Use this structure if the Amazon ECS task uses the awsvpc network\n mode. This structure specifies the VPC subnets and security groups associated with the task,\n and whether a public IP address is to be used. This structure is required if\n LaunchType is FARGATE because the awsvpc mode is\n required for Fargate tasks.

\n

If you specify NetworkConfiguration when the target ECS task does not use the\n awsvpc network mode, the task fails.

" } }, "PlatformVersion": { "target": "com.amazonaws.eventbridge#String", "traits": { - "smithy.api#documentation": "

Specifies the platform version for the task. Specify only the numeric portion of the\n platform version, such as 1.1.0.

\n

This structure is used only if LaunchType is FARGATE. For more\n information about valid platform versions, see Fargate Platform\n Versions in the Amazon Elastic Container Service Developer\n Guide.

" + "smithy.api#documentation": "

Specifies the platform version for the task. Specify only the numeric portion of the\n platform version, such as 1.1.0.

\n

This structure is used only if LaunchType is FARGATE. For more\n information about valid platform versions, see Fargate\n Platform Versions in the Amazon Elastic Container Service Developer\n Guide.

" } }, "Group": { @@ -4930,7 +4990,7 @@ "target": "com.amazonaws.eventbridge#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether to enable Amazon ECS managed tags for the task. For more information,\n see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer\n Guide.

" + "smithy.api#documentation": "

Specifies whether to enable Amazon ECS managed tags for the task. For more\n information, see Tagging Your Amazon ECS\n Resources in the Amazon Elastic Container Service Developer Guide.

" } }, "EnableExecuteCommand": { @@ -5052,7 +5112,7 @@ "ReplicationConfig": { "target": "com.amazonaws.eventbridge#ReplicationConfig", "traits": { - "smithy.api#documentation": "

Whether event replication was enabled or disabled for this endpoint. The default state is ENABLED which means you must supply a RoleArn. \n If you don't have a RoleArn or you don't want event replication enabled, set the state to DISABLED.

" + "smithy.api#documentation": "

Whether event replication was enabled or disabled for this endpoint. The default state is\n ENABLED which means you must supply a RoleArn. If you don't have a\n RoleArn or you don't want event replication enabled, set the state to\n DISABLED.

" } }, "EventBuses": { @@ -5070,7 +5130,7 @@ "EndpointId": { "target": "com.amazonaws.eventbridge#EndpointId", "traits": { - "smithy.api#documentation": "

The URL subdomain of the endpoint. For example, if the URL for Endpoint is https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo.

" + "smithy.api#documentation": "

The URL subdomain of the endpoint. For example, if the URL for Endpoint is\n https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is\n abcde.veo.

" } }, "EndpointUrl": { @@ -5105,7 +5165,7 @@ } }, "traits": { - "smithy.api#documentation": "

A global endpoint used to improve your application's availability by making it regional-fault tolerant. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

A global endpoint used to improve your application's availability by making it\n regional-fault tolerant. For more information about global endpoints, see Making\n applications Regional-fault tolerant with global endpoints and event replication in\n the \n Amazon EventBridge User Guide\n .

" } }, "com.amazonaws.eventbridge#EndpointArn": { @@ -5269,15 +5329,42 @@ "smithy.api#documentation": "

The ARN of the event bus.

" } }, + "Description": { + "target": "com.amazonaws.eventbridge#EventBusDescription", + "traits": { + "smithy.api#documentation": "

The event bus description.

" + } + }, "Policy": { "target": "com.amazonaws.eventbridge#String", "traits": { - "smithy.api#documentation": "

The permissions policy of the event bus, describing which other Amazon Web Services accounts can write\n events to this event bus.

" + "smithy.api#documentation": "

The permissions policy of the event bus, describing which other Amazon Web Services\n accounts can write events to this event bus.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.eventbridge#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the event bus was created.

" + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.eventbridge#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the event bus was last modified.

" } } }, "traits": { - "smithy.api#documentation": "

An event bus receives events from a source, uses rules to evaluate them, applies any configured input transformation, and routes them to the appropriate target(s).\n Your account's default event bus receives events from Amazon Web Services services. A custom event\n bus can receive events from your custom applications and services. A partner event bus\n receives events from an event source created by an SaaS partner. These events come from the\n partners services or applications.

" + "smithy.api#documentation": "

An event bus receives events from a source, uses rules to evaluate them, applies any\n configured input transformation, and routes them to the appropriate target(s). Your account's\n default event bus receives events from Amazon Web Services services. A custom event bus can\n receive events from your custom applications and services. A partner event bus receives events\n from an event source created by an SaaS partner. These events come from the partners services\n or applications.

" + } + }, + "com.amazonaws.eventbridge#EventBusDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + } } }, "com.amazonaws.eventbridge#EventBusList": { @@ -5363,7 +5450,7 @@ "ExpirationTime": { "target": "com.amazonaws.eventbridge#Timestamp", "traits": { - "smithy.api#documentation": "

The date and time that the event source will expire, if the Amazon Web Services account doesn't create a\n matching event bus for it.

" + "smithy.api#documentation": "

The date and time that the event source will expire, if the Amazon Web Services account\n doesn't create a matching event bus for it.

" } }, "Name": { @@ -5380,7 +5467,7 @@ } }, "traits": { - "smithy.api#documentation": "

A partner event source is created by an SaaS partner. If a customer creates a partner\n event bus that matches this event source, that Amazon Web Services account can receive events from the\n partner's applications or services.

" + "smithy.api#documentation": "

A partner event source is created by an SaaS partner. If a customer creates a partner\n event bus that matches this event source, that Amazon Web Services account can receive events\n from the partner's applications or services.

" } }, "com.amazonaws.eventbridge#EventSourceList": { @@ -5448,13 +5535,13 @@ "Secondary": { "target": "com.amazonaws.eventbridge#Secondary", "traits": { - "smithy.api#documentation": "

The Region that events are routed to when failover is triggered or event replication is enabled.

", + "smithy.api#documentation": "

The Region that events are routed to when failover is triggered or event replication is\n enabled.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The failover configuration for an endpoint. This includes what triggers failover and what happens when it's triggered.

" + "smithy.api#documentation": "

The failover configuration for an endpoint. This includes what triggers failover and what\n happens when it's triggered.

" } }, "com.amazonaws.eventbridge#GraphQLOperation": { @@ -5533,24 +5620,24 @@ "PathParameterValues": { "target": "com.amazonaws.eventbridge#PathParameterList", "traits": { - "smithy.api#documentation": "

The path parameter values to be used to populate API Gateway API or EventBridge\n ApiDestination path wildcards (\"*\").

" + "smithy.api#documentation": "

The path parameter values to be used to populate API Gateway API or EventBridge ApiDestination path wildcards (\"*\").

" } }, "HeaderParameters": { "target": "com.amazonaws.eventbridge#HeaderParametersMap", "traits": { - "smithy.api#documentation": "

The headers that need to be sent as part of request invoking the API Gateway API or\n EventBridge ApiDestination.

" + "smithy.api#documentation": "

The headers that need to be sent as part of request invoking the API Gateway API or\n EventBridge ApiDestination.

" } }, "QueryStringParameters": { "target": "com.amazonaws.eventbridge#QueryStringParametersMap", "traits": { - "smithy.api#documentation": "

The query string keys/values that need to be sent as part of request invoking the API Gateway \n API or EventBridge ApiDestination.

" + "smithy.api#documentation": "

The query string keys/values that need to be sent as part of request invoking the API Gateway API or EventBridge ApiDestination.

" } } }, "traits": { - "smithy.api#documentation": "

These are custom parameter to be used when the target is an API Gateway APIs or\n EventBridge ApiDestinations. In the latter case, these are merged with any\n InvocationParameters specified on the Connection, with any values from the Connection taking\n precedence.

" + "smithy.api#documentation": "

These are custom parameter to be used when the target is an API Gateway APIs or\n EventBridge ApiDestinations. In the latter case, these are merged with any\n InvocationParameters specified on the Connection, with any values from the Connection taking\n precedence.

" } }, "com.amazonaws.eventbridge#HttpsEndpoint": { @@ -5597,7 +5684,7 @@ "InputTemplate": { "target": "com.amazonaws.eventbridge#TransformerInput", "traits": { - "smithy.api#documentation": "

Input template where you specify placeholders that will be filled with the values of the\n keys from InputPathsMap to customize the data sent to the target. Enclose each\n InputPathsMaps value in brackets: <value>\n

\n

If InputTemplate is a JSON object (surrounded by curly braces), the following\n restrictions apply:

\n
    \n
  • \n

    The placeholder cannot be used as an object key.

    \n
  • \n
\n

The following example shows the syntax for using InputPathsMap and\n InputTemplate.

\n

\n \"InputTransformer\":\n

\n

\n {\n

\n

\n \"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\":\n \"$.detail.status\"},\n

\n

\n \"InputTemplate\": \" is in state \"\n

\n

\n }\n

\n

To have the InputTemplate include quote marks within a JSON string, escape\n each quote marks with a slash, as in the following example:

\n

\n \"InputTransformer\":\n

\n

\n {\n

\n

\n \"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\":\n \"$.detail.status\"},\n

\n

\n \"InputTemplate\": \" is in state \\\"\\\"\"\n

\n

\n }\n

\n

The InputTemplate can also be valid JSON with varibles in quotes or out, as\n in the following example:

\n

\n \"InputTransformer\":\n

\n

\n {\n

\n

\n \"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\":\n \"$.detail.status\"},\n

\n

\n \"InputTemplate\": '{\"myInstance\": ,\"myStatus\": \" is\n in state \\\"\\\"\"}'\n

\n

\n }\n

", + "smithy.api#documentation": "

Input template where you specify placeholders that will be filled with the values of the\n keys from InputPathsMap to customize the data sent to the target. Enclose each\n InputPathsMaps value in brackets: <value>

\n

If InputTemplate is a JSON object (surrounded by curly braces), the following\n restrictions apply:

\n
    \n
  • \n

    The placeholder cannot be used as an object key.

    \n
  • \n
\n

The following example shows the syntax for using InputPathsMap and\n InputTemplate.

\n

\n \"InputTransformer\":\n

\n

\n {\n

\n

\n \"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\":\n \"$.detail.status\"},\n

\n

\n \"InputTemplate\": \" is in state \"\n

\n

\n }\n

\n

To have the InputTemplate include quote marks within a JSON string, escape\n each quote marks with a slash, as in the following example:

\n

\n \"InputTransformer\":\n

\n

\n {\n

\n

\n \"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\":\n \"$.detail.status\"},\n

\n

\n \"InputTemplate\": \" is in state \\\"\\\"\"\n

\n

\n }\n

\n

The InputTemplate can also be valid JSON with varibles in quotes or out, as\n in the following example:

\n

\n \"InputTransformer\":\n

\n

\n {\n

\n

\n \"InputPathsMap\": {\"instance\": \"$.detail.instance\",\"status\":\n \"$.detail.status\"},\n

\n

\n \"InputTemplate\": '{\"myInstance\": ,\"myStatus\": \" is\n in state \\\"\\\"\"}'\n

\n

\n }\n

", "smithy.api#required": {} } } @@ -5673,6 +5760,15 @@ "smithy.api#documentation": "

This object enables you to specify a JSON path to extract from the event and use as the\n partition key for the Amazon Kinesis data stream, so that you can control the shard to which\n the event goes. If you do not include this parameter, the default is to use the\n eventId as the partition key.

" } }, + "com.amazonaws.eventbridge#KmsKeyIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + } + } + }, "com.amazonaws.eventbridge#LaunchType": { "type": "enum", "members": { @@ -5955,7 +6051,7 @@ } ], "traits": { - "smithy.api#documentation": "

List the global endpoints associated with this account. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

List the global endpoints associated with this account. For more information about global\n endpoints, see Making applications\n Regional-fault tolerant with global endpoints and event replication in the\n \n Amazon EventBridge User Guide\n .

" } }, "com.amazonaws.eventbridge#ListEndpointsRequest": { @@ -5964,19 +6060,19 @@ "NamePrefix": { "target": "com.amazonaws.eventbridge#EndpointName", "traits": { - "smithy.api#documentation": "

A value that will return a subset of the endpoints associated with this account. For example, \"NamePrefix\": \"ABC\" will return all endpoints with \"ABC\" in the name.

" + "smithy.api#documentation": "

A value that will return a subset of the endpoints associated with this account. For\n example, \"NamePrefix\": \"ABC\" will return all endpoints with \"ABC\" in the\n name.

" } }, "HomeRegion": { "target": "com.amazonaws.eventbridge#HomeRegion", "traits": { - "smithy.api#documentation": "

The primary Region of the endpoints associated with this account. For example \"HomeRegion\": \"us-east-1\".

" + "smithy.api#documentation": "

The primary Region of the endpoints associated with this account. For example\n \"HomeRegion\": \"us-east-1\".

" } }, "NextToken": { "target": "com.amazonaws.eventbridge#NextToken", "traits": { - "smithy.api#documentation": "

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. \n Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination \n token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" + "smithy.api#documentation": "

If nextToken is returned, there are more results available. The value of\n nextToken is a unique pagination token for each page. Make the call again using\n the returned token to retrieve the next page. Keep all other arguments unchanged. Each\n pagination token expires after 24 hours. Using an expired pagination token will return an HTTP\n 400 InvalidToken error.

" } }, "MaxResults": { @@ -6002,7 +6098,7 @@ "NextToken": { "target": "com.amazonaws.eventbridge#NextToken", "traits": { - "smithy.api#documentation": "

If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. \n Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination \n token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" + "smithy.api#documentation": "

If nextToken is returned, there are more results available. The value of\n nextToken is a unique pagination token for each page. Make the call again using\n the returned token to retrieve the next page. Keep all other arguments unchanged. Each\n pagination token expires after 24 hours. Using an expired pagination token will return an HTTP\n 400 InvalidToken error.

" } } }, @@ -6090,7 +6186,7 @@ } ], "traits": { - "smithy.api#documentation": "

You can use this to see all the partner event sources that have been shared with your Amazon Web Services\n account. For more information about partner event sources, see CreateEventBus.

" + "smithy.api#documentation": "

You can use this to see all the partner event sources that have been shared with your\n Amazon Web Services account. For more information about partner event sources, see CreateEventBus.

" } }, "com.amazonaws.eventbridge#ListEventSourcesRequest": { @@ -6159,7 +6255,7 @@ } ], "traits": { - "smithy.api#documentation": "

An SaaS partner can use this operation to display the Amazon Web Services account ID that a particular\n partner event source name is associated with. This operation is not used by Amazon Web Services\n customers.

" + "smithy.api#documentation": "

An SaaS partner can use this operation to display the Amazon Web Services account ID that a\n particular partner event source name is associated with. This operation is not used by Amazon Web Services customers.

" } }, "com.amazonaws.eventbridge#ListPartnerEventSourceAccountsRequest": { @@ -6441,7 +6537,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists your Amazon EventBridge rules. You can either list all the rules or you can provide\n a prefix to match to the rule names.

\n

The maximum number of results per page for requests is 100.

\n

ListRules does not list the targets of a rule. To see the targets associated with a rule,\n use ListTargetsByRule.

" + "smithy.api#documentation": "

Lists your Amazon EventBridge rules. You can either list all the rules or you can\n provide a prefix to match to the rule names.

\n

The maximum number of results per page for requests is 100.

\n

ListRules does not list the targets of a rule. To see the targets associated with a rule,\n use ListTargetsByRule.

" } }, "com.amazonaws.eventbridge#ListRulesRequest": { @@ -6641,7 +6737,7 @@ } }, "traits": { - "smithy.api#documentation": "

This rule was created by an Amazon Web Services service on behalf of your account. It is managed by that\n service. If you see this error in response to DeleteRule or\n RemoveTargets, you can use the Force parameter in those calls to\n delete the rule or remove targets from the rule. You cannot modify these managed rules by\n using DisableRule, EnableRule, PutTargets,\n PutRule, TagResource, or UntagResource.

", + "smithy.api#documentation": "

This rule was created by an Amazon Web Services service on behalf of your account. It is\n managed by that service. If you see this error in response to DeleteRule or\n RemoveTargets, you can use the Force parameter in those calls to\n delete the rule or remove targets from the rule. You cannot modify these managed rules by\n using DisableRule, EnableRule, PutTargets,\n PutRule, TagResource, or UntagResource.

", "smithy.api#error": "client" } }, @@ -6754,7 +6850,7 @@ } }, "traits": { - "smithy.api#documentation": "

A partner event source is created by an SaaS partner. If a customer creates a partner\n event bus that matches this event source, that Amazon Web Services account can receive events from the\n partner's applications or services.

" + "smithy.api#documentation": "

A partner event source is created by an SaaS partner. If a customer creates a partner\n event bus that matches this event source, that Amazon Web Services account can receive events\n from the partner's applications or services.

" } }, "com.amazonaws.eventbridge#PartnerEventSourceAccount": { @@ -6775,7 +6871,7 @@ "ExpirationTime": { "target": "com.amazonaws.eventbridge#Timestamp", "traits": { - "smithy.api#documentation": "

The date and time that the event source will expire, if the Amazon Web Services account doesn't create a\n matching event bus for it.

" + "smithy.api#documentation": "

The date and time that the event source will expire, if the Amazon Web Services account\n doesn't create a matching event bus for it.

" } }, "State": { @@ -6835,12 +6931,12 @@ "expression": { "target": "com.amazonaws.eventbridge#PlacementConstraintExpression", "traits": { - "smithy.api#documentation": "

A cluster query language expression to apply to the constraint. You cannot specify an\n expression if the constraint type is distinctInstance. To learn more, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.\n

" + "smithy.api#documentation": "

A cluster query language expression to apply to the constraint. You cannot specify an\n expression if the constraint type is distinctInstance. To learn more, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

" } } }, "traits": { - "smithy.api#documentation": "

An object representing a constraint on task placement. To learn more, see Task Placement Constraints in the Amazon Elastic Container Service Developer\n Guide.

" + "smithy.api#documentation": "

An object representing a constraint on task placement. To learn more, see Task Placement Constraints in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.eventbridge#PlacementConstraintExpression": { @@ -6963,7 +7059,7 @@ "HealthCheck": { "target": "com.amazonaws.eventbridge#HealthCheck", "traits": { - "smithy.api#documentation": "

The ARN of the health check used by the endpoint to determine whether failover is triggered.

", + "smithy.api#documentation": "

The ARN of the health check used by the endpoint to determine whether failover is\n triggered.

", "smithy.api#required": {} } } @@ -7007,7 +7103,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sends custom events to Amazon EventBridge so that they can be matched to rules.

\n

The maximum size for a PutEvents event entry is 256 KB. Entry size is calculated including the event and any necessary characters and keys of the JSON representation of the event. \n To learn more, see\n Calculating PutEvents event entry size in the Amazon EventBridge User Guide\n

\n

PutEvents accepts the data in JSON format. For the JSON number\n (integer) data type, the constraints are: a minimum value of\n -9,223,372,036,854,775,808 and a maximum value of 9,223,372,036,854,775,807.

\n \n

PutEvents will only process nested JSON up to 1100 levels deep.

\n
" + "smithy.api#documentation": "

Sends custom events to Amazon EventBridge so that they can be matched to rules.

\n

The maximum size for a PutEvents event entry is 256 KB. Entry size is calculated including\n the event and any necessary characters and keys of the JSON representation of the event. To\n learn more, see Calculating PutEvents event entry\n size in the \n Amazon EventBridge User Guide\n \n

\n

PutEvents accepts the data in JSON format. For the JSON number (integer) data type, the\n constraints are: a minimum value of -9,223,372,036,854,775,808 and a maximum value of\n 9,223,372,036,854,775,807.

\n \n

PutEvents will only process nested JSON up to 1100 levels deep.

\n
" } }, "com.amazonaws.eventbridge#PutEventsRequest": { @@ -7023,7 +7119,7 @@ "EndpointId": { "target": "com.amazonaws.eventbridge#EndpointId", "traits": { - "smithy.api#documentation": "

The URL subdomain of the endpoint. For example, if the URL for Endpoint is https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo.

\n \n

When using Java, you must include auth-crt on the class path.

\n
", + "smithy.api#documentation": "

The URL subdomain of the endpoint. For example, if the URL for Endpoint is\n https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is\n abcde.veo.

\n \n

When using Java, you must include auth-crt on the class path.

\n
", "smithy.rules#contextParam": { "name": "EndpointId" } @@ -7052,13 +7148,13 @@ "Resources": { "target": "com.amazonaws.eventbridge#EventResourceList", "traits": { - "smithy.api#documentation": "

Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily\n concerns. Any number, including zero, may be present.

" + "smithy.api#documentation": "

Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily concerns.\n Any number, including zero, may be present.

" } }, "DetailType": { "target": "com.amazonaws.eventbridge#String", "traits": { - "smithy.api#documentation": "

Free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail.

\n \n

\n Detail, DetailType, and Source are required for EventBridge to successfully send an event to an event bus. \n If you include event entries in a request that do not include each of those properties, EventBridge fails that entry. \n If you submit a request in which none of the entries have each of these properties, EventBridge fails the entire request.\n

\n
" + "smithy.api#documentation": "

Free-form string, with a maximum of 128 characters, used to decide what fields to expect\n in the event detail.

\n \n

\n Detail, DetailType, and Source are required for EventBridge to successfully send an event to an event bus. \n If you include event entries in a request that do not include each of those properties, EventBridge fails that entry. \n If you submit a request in which none of the entries have each of these properties, EventBridge fails the entire request.\n

\n
" } }, "Detail": { @@ -7070,13 +7166,13 @@ "EventBusName": { "target": "com.amazonaws.eventbridge#NonPartnerEventBusNameOrArn", "traits": { - "smithy.api#documentation": "

The name or ARN of the event bus to receive the event. Only the rules that are associated\n with this event bus are used to match the event. If you omit this, the default event bus is\n used.

\n \n

If you're using a global endpoint with a custom bus, you can enter either the name or Amazon Resource Name (ARN) of the event bus \n in either the primary or secondary Region here. EventBridge then determines the corresponding event bus in the \n other Region based on the endpoint referenced by the EndpointId. Specifying the event bus ARN is preferred.

\n
" + "smithy.api#documentation": "

The name or ARN of the event bus to receive the event. Only the rules that are associated\n with this event bus are used to match the event. If you omit this, the default event bus is\n used.

\n \n

If you're using a global endpoint with a custom bus, you can enter either the name or\n Amazon Resource Name (ARN) of the event bus in either the primary or secondary Region here. EventBridge then\n determines the corresponding event bus in the other Region based on the endpoint referenced\n by the EndpointId. Specifying the event bus ARN is preferred.

\n
" } }, "TraceHeader": { "target": "com.amazonaws.eventbridge#TraceHeader", "traits": { - "smithy.api#documentation": "

An X-Ray trace header, which is an http header (X-Amzn-Trace-Id) that contains the\n trace-id associated with the event.

\n

To learn more about X-Ray trace headers, see Tracing header in the X-Ray Developer Guide.

" + "smithy.api#documentation": "

An X-Ray trace header, which is an http header (X-Amzn-Trace-Id) that\n contains the trace-id associated with the event.

\n

To learn more about X-Ray trace headers, see Tracing\n header in the X-Ray Developer Guide.

" } } }, @@ -7109,7 +7205,7 @@ "Entries": { "target": "com.amazonaws.eventbridge#PutEventsResultEntryList", "traits": { - "smithy.api#documentation": "

The successfully and unsuccessfully ingested events results. If the ingestion was\n successful, the entry has the event ID in it. Otherwise, you can use the error code and error\n message to identify the problem with the entry.

\n

For each record, the index of the response element is the same as the index in the request array.

" + "smithy.api#documentation": "

The successfully and unsuccessfully ingested events results. If the ingestion was\n successful, the entry has the event ID in it. Otherwise, you can use the error code and error\n message to identify the problem with the entry.

\n

For each record, the index of the response element is the same as the index in the request\n array.

" } } }, @@ -7129,7 +7225,7 @@ "ErrorCode": { "target": "com.amazonaws.eventbridge#ErrorCode", "traits": { - "smithy.api#documentation": "

The error code that indicates why the event submission failed.

\n

Retryable errors include:

\n
    \n
  • \n

    \n \n InternalFailure\n \n

    \n

    The request processing has failed because of an unknown error, exception or failure.

    \n
  • \n
  • \n

    \n \n ThrottlingException\n \n

    \n

    The request was denied due to request throttling.

    \n
  • \n
\n

Non-retryable errors include:

\n
    \n
  • \n

    \n \n AccessDeniedException\n \n

    \n

    You do not have sufficient access to perform this action.

    \n
  • \n
  • \n

    \n InvalidAccountIdException\n

    \n

    The account ID provided is not valid.

    \n
  • \n
  • \n

    \n InvalidArgument\n

    \n

    A specified parameter is not valid.

    \n
  • \n
  • \n

    \n MalformedDetail\n

    \n

    The JSON provided is not valid.

    \n
  • \n
  • \n

    \n RedactionFailure\n

    \n

    Redacting the CloudTrail event failed.

    \n
  • \n
  • \n

    \n NotAuthorizedForSourceException\n

    \n

    You do not have permissions to publish events with this source onto this event bus.

    \n
  • \n
  • \n

    \n NotAuthorizedForDetailTypeException\n

    \n

    You do not have permissions to publish events with this detail type onto this event bus.

    \n
  • \n
" + "smithy.api#documentation": "

The error code that indicates why the event submission failed.

\n

Retryable errors include:

\n
    \n
  • \n

    \n \n InternalFailure\n \n

    \n

    The request processing has failed because of an unknown error, exception or\n failure.

    \n
  • \n
  • \n

    \n \n ThrottlingException\n \n

    \n

    The request was denied due to request throttling.

    \n
  • \n
\n

Non-retryable errors include:

\n
    \n
  • \n

    \n \n AccessDeniedException\n \n

    \n

    You do not have sufficient access to perform this action.

    \n
  • \n
  • \n

    \n InvalidAccountIdException\n

    \n

    The account ID provided is not valid.

    \n
  • \n
  • \n

    \n InvalidArgument\n

    \n

    A specified parameter is not valid.

    \n
  • \n
  • \n

    \n MalformedDetail\n

    \n

    The JSON provided is not valid.

    \n
  • \n
  • \n

    \n RedactionFailure\n

    \n

    Redacting the CloudTrail event failed.

    \n
  • \n
  • \n

    \n NotAuthorizedForSourceException\n

    \n

    You do not have permissions to publish events with this source onto this event\n bus.

    \n
  • \n
  • \n

    \n NotAuthorizedForDetailTypeException\n

    \n

    You do not have permissions to publish events with this detail type onto this event\n bus.

    \n
  • \n
" } }, "ErrorMessage": { @@ -7140,7 +7236,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents the results of an event submitted to an event bus.

\n

If the submission was successful, the entry has the event ID in it. \n Otherwise, you can use the error code and error message to identify the problem with the entry.

\n

For information about the errors that are common to all actions, see \n Common Errors.

" + "smithy.api#documentation": "

Represents the results of an event submitted to an event bus.

\n

If the submission was successful, the entry has the event ID in it. Otherwise, you can use\n the error code and error message to identify the problem with the entry.

\n

For information about the errors that are common to all actions, see Common\n Errors.

" } }, "com.amazonaws.eventbridge#PutEventsResultEntryList": { @@ -7166,7 +7262,7 @@ } ], "traits": { - "smithy.api#documentation": "

This is used by SaaS partners to write events to a customer's partner event bus. Amazon Web Services\n customers do not use this operation.

\n

For information on calculating event batch size, see \n Calculating EventBridge PutEvents event entry size\n in the EventBridge User Guide.

" + "smithy.api#documentation": "

This is used by SaaS partners to write events to a customer's partner event bus. Amazon Web Services customers do not use this operation.

\n

For information on calculating event batch size, see Calculating EventBridge PutEvents event\n entry size in the EventBridge User Guide.

" } }, "com.amazonaws.eventbridge#PutPartnerEventsRequest": { @@ -7202,13 +7298,13 @@ "Resources": { "target": "com.amazonaws.eventbridge#EventResourceList", "traits": { - "smithy.api#documentation": "

Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily\n concerns. Any number, including zero, may be present.

" + "smithy.api#documentation": "

Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily concerns.\n Any number, including zero, may be present.

" } }, "DetailType": { "target": "com.amazonaws.eventbridge#String", "traits": { - "smithy.api#documentation": "

A free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail.

\n \n

\n Detail, DetailType, and Source are required for EventBridge to successfully send an event to an event bus. \n If you include event entries in a request that do not include each of those properties, EventBridge fails that entry. \n If you submit a request in which none of the entries have each of these properties, EventBridge fails the entire request.\n

\n
" + "smithy.api#documentation": "

A free-form string, with a maximum of 128 characters, used to decide what fields to expect\n in the event detail.

\n \n

\n Detail, DetailType, and Source are required for EventBridge to successfully send an event to an event bus. \n If you include event entries in a request that do not include each of those properties, EventBridge fails that entry. \n If you submit a request in which none of the entries have each of these properties, EventBridge fails the entire request.\n

\n
" } }, "Detail": { @@ -7247,7 +7343,7 @@ "Entries": { "target": "com.amazonaws.eventbridge#PutPartnerEventsResultEntryList", "traits": { - "smithy.api#documentation": "

The results for each event entry the partner submitted in this request. \n If the event was successfully submitted, the entry has the event ID in it. \n Otherwise, you can use the error code and error message to identify the problem with the entry.

\n

For each record, the index of the response element is the same as the index in the request array.

" + "smithy.api#documentation": "

The results for each event entry the partner submitted in this request. If the event was\n successfully submitted, the entry has the event ID in it. Otherwise, you can use the error\n code and error message to identify the problem with the entry.

\n

For each record, the index of the response element is the same as the index in the request\n array.

" } } }, @@ -7278,7 +7374,7 @@ } }, "traits": { - "smithy.api#documentation": "

The result of an event entry the partner submitted in this request. \n If the event was successfully submitted, the entry has the event ID in it. \n Otherwise, you can use the error code and error message to identify the problem with the entry.

" + "smithy.api#documentation": "

The result of an event entry the partner submitted in this request. If the event was\n successfully submitted, the entry has the event ID in it. Otherwise, you can use the error\n code and error message to identify the problem with the entry.

" } }, "com.amazonaws.eventbridge#PutPartnerEventsResultEntryList": { @@ -7313,7 +7409,7 @@ } ], "traits": { - "smithy.api#documentation": "

Running PutPermission permits the specified Amazon Web Services account or Amazon Web Services organization\n to put events to the specified event bus. Amazon EventBridge (CloudWatch\n Events) rules in your account are triggered by these events arriving to an event bus in your\n account.

\n

For another account to send events to your account, that external account must have an\n EventBridge rule with your account's event bus as a target.

\n

To enable multiple Amazon Web Services accounts to put events to your event bus, run\n PutPermission once for each of these accounts. Or, if all the accounts are\n members of the same Amazon Web Services organization, you can run PutPermission once specifying\n Principal as \"*\" and specifying the Amazon Web Services organization ID in\n Condition, to grant permissions to all accounts in that organization.

\n

If you grant permissions using an organization, then accounts in that organization must\n specify a RoleArn with proper permissions when they use PutTarget to\n add your account's event bus as a target. For more information, see Sending and\n Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User\n Guide.

\n

The permission policy on the event bus cannot exceed 10 KB in size.

" + "smithy.api#documentation": "

Running PutPermission permits the specified Amazon Web Services account or\n Amazon Web Services organization to put events to the specified event\n bus. Amazon EventBridge (CloudWatch Events) rules in your account are\n triggered by these events arriving to an event bus in your account.

\n

For another account to send events to your account, that external account must have an\n EventBridge rule with your account's event bus as a target.

\n

To enable multiple Amazon Web Services accounts to put events to your event bus, run\n PutPermission once for each of these accounts. Or, if all the accounts are\n members of the same Amazon Web Services organization, you can run PutPermission\n once specifying Principal as \"*\" and specifying the Amazon Web Services\n organization ID in Condition, to grant permissions to all accounts in that\n organization.

\n

If you grant permissions using an organization, then accounts in that organization must\n specify a RoleArn with proper permissions when they use PutTarget to\n add your account's event bus as a target. For more information, see Sending and\n Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide.

\n

The permission policy on the event bus cannot exceed 10 KB in size.

" } }, "com.amazonaws.eventbridge#PutPermissionRequest": { @@ -7334,19 +7430,19 @@ "Principal": { "target": "com.amazonaws.eventbridge#Principal", "traits": { - "smithy.api#documentation": "

The 12-digit Amazon Web Services account ID that you are permitting to put events to your default event\n bus. Specify \"*\" to permit any account to put events to your default event bus.

\n

If you specify \"*\" without specifying Condition, avoid creating rules that\n may match undesirable events. To create more secure rules, make sure that the event pattern\n for each rule contains an account field with a specific account ID from which to\n receive events. Rules with an account field do not match any events sent from other\n accounts.

" + "smithy.api#documentation": "

The 12-digit Amazon Web Services account ID that you are permitting to put events to your\n default event bus. Specify \"*\" to permit any account to put events to your default event\n bus.

\n

If you specify \"*\" without specifying Condition, avoid creating rules that\n may match undesirable events. To create more secure rules, make sure that the event pattern\n for each rule contains an account field with a specific account ID from which to\n receive events. Rules with an account field do not match any events sent from other\n accounts.

" } }, "StatementId": { "target": "com.amazonaws.eventbridge#StatementId", "traits": { - "smithy.api#documentation": "

An identifier string for the external account that you are granting permissions to. If you\n later want to revoke the permission for this external account, specify this\n StatementId when you run RemovePermission.

\n \n

Each StatementId must be unique.

\n
" + "smithy.api#documentation": "

An identifier string for the external account that you are granting permissions to. If you\n later want to revoke the permission for this external account, specify this\n StatementId when you run RemovePermission.

\n \n

Each StatementId must be unique.

\n
" } }, "Condition": { "target": "com.amazonaws.eventbridge#Condition", "traits": { - "smithy.api#documentation": "

This parameter enables you to limit the permission to accounts that fulfill a certain\n condition, such as being a member of a certain Amazon Web Services organization. For more information about\n Amazon Web Services Organizations, see What Is Amazon Web Services \n Organizations in the Amazon Web Services Organizations User Guide.

\n

If you specify Condition with an Amazon Web Services organization ID, and specify \"*\" as the\n value for Principal, you grant permission to all the accounts in the named\n organization.

\n

The Condition is a JSON string which must contain Type,\n Key, and Value fields.

" + "smithy.api#documentation": "

This parameter enables you to limit the permission to accounts that fulfill a certain\n condition, such as being a member of a certain Amazon Web Services organization. For more\n information about Amazon Web Services Organizations, see What Is Amazon Web Services\n Organizations in the Amazon Web Services Organizations User\n Guide.

\n

If you specify Condition with an Amazon Web Services organization ID, and\n specify \"*\" as the value for Principal, you grant permission to all the accounts\n in the named organization.

\n

The Condition is a JSON string which must contain Type,\n Key, and Value fields.

" } }, "Policy": { @@ -7389,7 +7485,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates the specified rule. Rules are enabled by default, or based on value of\n the state. You can disable a rule using DisableRule.

\n

A single rule watches for events from a single event bus. Events generated by Amazon Web Services services\n go to your account's default event bus. Events generated by SaaS partner services or\n applications go to the matching partner event bus. If you have custom applications or\n services, you can specify whether their events go to your default event bus or a custom event\n bus that you have created. For more information, see CreateEventBus.

\n

If you are updating an existing rule, the rule is replaced with what you specify in this\n PutRule command. If you omit arguments in PutRule, the old values\n for those arguments are not kept. Instead, they are replaced with null values.

\n

When you create or update a rule, incoming events might not immediately start matching to\n new or updated rules. Allow a short period of time for changes to take effect.

\n

A rule must contain at least an EventPattern or ScheduleExpression. Rules with\n EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions\n self-trigger based on the given schedule. A rule can have both an EventPattern and a\n ScheduleExpression, in which case the rule triggers on matching events as well as on a\n schedule.

\n

When you initially create a rule, you can optionally assign one or more tags to the rule.\n Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions, by granting a user permission to access or change only rules with certain tag\n values. To use the PutRule operation and assign tags, you must have both the\n events:PutRule and events:TagResource permissions.

\n

If you are updating an existing rule, any tags you specify in the PutRule\n operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.

\n

Most services in Amazon Web Services treat : or / as the same character in Amazon Resource Names (ARNs).\n However, EventBridge uses an exact match in event patterns and rules. Be sure to use the\n correct ARN characters when creating event patterns so that they match the ARN syntax in the\n event you want to match.

\n

In EventBridge, it is possible to create rules that lead to infinite loops, where a rule\n is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket,\n and trigger software to change them to the desired state. If the rule is not written\n carefully, the subsequent change to the ACLs fires the rule again, creating an infinite\n loop.

\n

To prevent this, write the rules so that the triggered actions do not re-fire the same\n rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead\n of after any change.

\n

An infinite loop can quickly cause higher than expected charges. We recommend that you use\n budgeting, which alerts you when charges exceed your specified limit. For more information,\n see Managing Your Costs with\n Budgets.

" + "smithy.api#documentation": "

Creates or updates the specified rule. Rules are enabled by default, or based on value of\n the state. You can disable a rule using DisableRule.

\n

A single rule watches for events from a single event bus. Events generated by Amazon Web Services services go to your account's default event bus. Events generated by SaaS partner\n services or applications go to the matching partner event bus. If you have custom applications\n or services, you can specify whether their events go to your default event bus or a custom\n event bus that you have created. For more information, see CreateEventBus.

\n

If you are updating an existing rule, the rule is replaced with what you specify in this\n PutRule command. If you omit arguments in PutRule, the old values\n for those arguments are not kept. Instead, they are replaced with null values.

\n

When you create or update a rule, incoming events might not immediately start matching to\n new or updated rules. Allow a short period of time for changes to take effect.

\n

A rule must contain at least an EventPattern or ScheduleExpression. Rules with\n EventPatterns are triggered when a matching event is observed. Rules with ScheduleExpressions\n self-trigger based on the given schedule. A rule can have both an EventPattern and a\n ScheduleExpression, in which case the rule triggers on matching events as well as on a\n schedule.

\n

When you initially create a rule, you can optionally assign one or more tags to the rule.\n Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions, by granting a user permission to access or change only rules with certain tag\n values. To use the PutRule operation and assign tags, you must have both the\n events:PutRule and events:TagResource permissions.

\n

If you are updating an existing rule, any tags you specify in the PutRule\n operation are ignored. To update the tags of an existing rule, use TagResource and UntagResource.

\n

Most services in Amazon Web Services treat : or / as the same character in Amazon Resource\n Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be sure to\n use the correct ARN characters when creating event patterns so that they match the ARN syntax\n in the event you want to match.

\n

In EventBridge, it is possible to create rules that lead to infinite loops, where a rule\n is fired repeatedly. For example, a rule might detect that ACLs have changed on an S3 bucket,\n and trigger software to change them to the desired state. If the rule is not written\n carefully, the subsequent change to the ACLs fires the rule again, creating an infinite\n loop.

\n

To prevent this, write the rules so that the triggered actions do not re-fire the same\n rule. For example, your rule could fire only if ACLs are found to be in a bad state, instead\n of after any change.

\n

An infinite loop can quickly cause higher than expected charges. We recommend that you use\n budgeting, which alerts you when charges exceed your specified limit. For more information,\n see Managing Your Costs with\n Budgets.

" } }, "com.amazonaws.eventbridge#PutRuleRequest": { @@ -7411,13 +7507,13 @@ "EventPattern": { "target": "com.amazonaws.eventbridge#EventPattern", "traits": { - "smithy.api#documentation": "

The event pattern. For more information, see Amazon EventBridge event\n patterns in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

The event pattern. For more information, see Amazon EventBridge event\n patterns in the \n Amazon EventBridge User Guide\n .

" } }, "State": { "target": "com.amazonaws.eventbridge#RuleState", "traits": { - "smithy.api#documentation": "

Indicates whether the rule is enabled or disabled.

" + "smithy.api#documentation": "

The state of the rule.

\n

Valid values include:

\n
    \n
  • \n

    \n DISABLED: The rule is disabled. EventBridge does not match any events against the rule.

    \n
  • \n
  • \n

    \n ENABLED: The rule is enabled. \n EventBridge matches events against the rule, except for Amazon Web Services management events delivered through CloudTrail.

    \n
  • \n
  • \n

    \n ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS: The rule is enabled for all\n events, including Amazon Web Services management events delivered through CloudTrail.

    \n

    Management events provide visibility into management operations that are performed on\n resources in your Amazon Web Services account. These are also known as control plane\n operations. For more information, see Logging management events in the CloudTrail User\n Guide, and Filtering management events from Amazon Web Services services in the\n \n Amazon EventBridge User Guide\n .

    \n

    This value is only valid for rules on the default event bus \n or custom event buses. \n It does not apply to partner event buses.

    \n
  • \n
" } }, "Description": { @@ -7489,7 +7585,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds the specified targets to the specified rule, or updates the targets if they are\n already associated with the rule.

\n

Targets are the resources that are invoked when a rule is triggered.

\n

The maximum number of entries per request is 10.

\n \n

Each rule can have up to five (5) targets associated with it at one time.

\n
\n

For a list of services you can configure as targets for events, see EventBridge targets\n in the Amazon EventBridge User Guide.

\n

Creating rules with built-in targets is supported only in the Amazon Web Services Management Console. The\n built-in targets are:

\n
    \n
  • \n

    \n Amazon EBS CreateSnapshot API call\n

    \n
  • \n
  • \n

    \n Amazon EC2 RebootInstances API call\n

    \n
  • \n
  • \n

    \n Amazon EC2 StopInstances API call\n

    \n
  • \n
  • \n

    \n Amazon EC2 TerminateInstances API\n call\n

    \n
  • \n
\n

For some target types, PutTargets provides target-specific parameters. If the\n target is a Kinesis data stream, you can optionally specify which shard the event goes to by\n using the KinesisParameters argument. To invoke a command on multiple EC2\n instances with one rule, you can use the RunCommandParameters field.

\n

To be able to make API calls against the resources that you own, Amazon EventBridge\n needs the appropriate permissions: \n

\n
    \n
  • \n

    For Lambda and Amazon SNS\n resources, EventBridge relies on resource-based policies.

    \n
  • \n
  • \n

    For EC2 instances, Kinesis Data Streams, \n Step Functions state machines and API Gateway APIs, EventBridge relies on\n IAM roles that you specify in the RoleARN argument in PutTargets.

    \n
  • \n
\n

For more information, see Authentication\n and Access Control in the Amazon EventBridge User Guide.

\n

If another Amazon Web Services account is in the same region and has granted you permission (using\n PutPermission), you can send events to that account. Set that account's event\n bus as a target of the rules in your account. To send the matched events to the other account,\n specify that account's event bus as the Arn value when you run\n PutTargets. If your account sends events to another account, your account is\n charged for each sent event. Each event sent to another account is charged as a custom event.\n The account receiving the event is not charged. For more information, see Amazon EventBridge\n Pricing.

\n \n

\n Input, InputPath, and InputTransformer are not\n available with PutTarget if the target is an event bus of a different Amazon Web Services \n account.

\n
\n

If you are setting the event bus of another account as the target, and that account\n granted permission to your account through an organization instead of directly by the account\n ID, then you must specify a RoleArn with proper permissions in the\n Target structure. For more information, see Sending and\n Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User\n Guide.

\n \n

If you have an IAM role on a cross-account event bus target, \n a PutTargets call without a role on the same target (same Id and Arn) will not remove the role.

\n
\n

For more information about enabling cross-account events, see PutPermission.

\n

\n Input, InputPath, and\n InputTransformer are mutually exclusive and optional\n parameters of a target. When a rule is triggered due to a matched event:

\n
    \n
  • \n

    If none of the following arguments are specified for a target, then the entire event\n is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or\n Amazon ECS task, in which case nothing from the event is passed to the target).

    \n
  • \n
  • \n

    If Input is specified in the form of valid JSON, then\n the matched event is overridden with this constant.

    \n
  • \n
  • \n

    If InputPath is specified in the form of JSONPath\n (for example, $.detail), then only the part of the event specified in the\n path is passed to the target (for example, only the detail part of the event is\n passed).

    \n
  • \n
  • \n

    If InputTransformer is specified, then one or more\n specified JSONPaths are extracted from the event and used as values in a template that you\n specify as the input to the target.

    \n
  • \n
\n

When you specify InputPath or InputTransformer, you must use\n JSON dot notation, not bracket notation.

\n

When you add targets to a rule and the associated rule triggers soon after, new or updated\n targets might not be immediately invoked. Allow a short period of time for changes to take\n effect.

\n

This action can partially fail if too many requests are made at the same time. If that\n happens, FailedEntryCount is non-zero in the response and each entry in\n FailedEntries provides the ID of the failed target and the error code.

" + "smithy.api#documentation": "

Adds the specified targets to the specified rule, or updates the targets if they are\n already associated with the rule.

\n

Targets are the resources that are invoked when a rule is triggered.

\n

The maximum number of entries per request is 10.

\n \n

Each rule can have up to five (5) targets associated with it at one time.

\n
\n

For a list of services you can configure as targets for events, see EventBridge targets\n in the \n Amazon EventBridge User Guide\n .

\n

Creating rules with built-in targets is supported only in the Amazon Web Services Management Console. The\n built-in targets are:

\n
    \n
  • \n

    \n Amazon EBS CreateSnapshot API call\n

    \n
  • \n
  • \n

    \n Amazon EC2 RebootInstances API call\n

    \n
  • \n
  • \n

    \n Amazon EC2 StopInstances API call\n

    \n
  • \n
  • \n

    \n Amazon EC2 TerminateInstances API call\n

    \n
  • \n
\n

For some target types, PutTargets provides target-specific parameters. If the\n target is a Kinesis data stream, you can optionally specify which shard the event\n goes to by using the KinesisParameters argument. To invoke a command on multiple\n EC2 instances with one rule, you can use the RunCommandParameters field.

\n

To be able to make API calls against the resources that you own, Amazon EventBridge\n needs the appropriate permissions:

\n
    \n
  • \n

    For Lambda and Amazon SNS resources, EventBridge relies\n on resource-based policies.

    \n
  • \n
  • \n

    For EC2 instances, Kinesis Data Streams, Step Functions state machines and\n API Gateway APIs, EventBridge relies on IAM roles that you specify in the\n RoleARN argument in PutTargets.

    \n
  • \n
\n

For more information, see Authentication\n and Access Control in the \n Amazon EventBridge User Guide\n .

\n

If another Amazon Web Services account is in the same region and has granted you permission\n (using PutPermission), you can send events to that account. Set that account's\n event bus as a target of the rules in your account. To send the matched events to the other\n account, specify that account's event bus as the Arn value when you run\n PutTargets. If your account sends events to another account, your account is\n charged for each sent event. Each event sent to another account is charged as a custom event.\n The account receiving the event is not charged. For more information, see Amazon EventBridge Pricing.

\n \n

\n Input, InputPath, and InputTransformer are not\n available with PutTarget if the target is an event bus of a different Amazon Web Services account.

\n
\n

If you are setting the event bus of another account as the target, and that account\n granted permission to your account through an organization instead of directly by the account\n ID, then you must specify a RoleArn with proper permissions in the\n Target structure. For more information, see Sending and\n Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide.

\n \n

If you have an IAM role on a cross-account event bus target, a PutTargets\n call without a role on the same target (same Id and Arn) will not\n remove the role.

\n
\n

For more information about enabling cross-account events, see PutPermission.

\n

\n Input, InputPath, and\n InputTransformer are mutually exclusive and optional\n parameters of a target. When a rule is triggered due to a matched event:

\n
    \n
  • \n

    If none of the following arguments are specified for a target, then the entire event\n is passed to the target in JSON format (unless the target is Amazon EC2 Run Command or\n Amazon ECS task, in which case nothing from the event is passed to the target).

    \n
  • \n
  • \n

    If Input is specified in the form of valid JSON, then\n the matched event is overridden with this constant.

    \n
  • \n
  • \n

    If InputPath is specified in the form of JSONPath\n (for example, $.detail), then only the part of the event specified in the\n path is passed to the target (for example, only the detail part of the event is\n passed).

    \n
  • \n
  • \n

    If InputTransformer is specified, then one or more\n specified JSONPaths are extracted from the event and used as values in a template that you\n specify as the input to the target.

    \n
  • \n
\n

When you specify InputPath or InputTransformer, you must use\n JSON dot notation, not bracket notation.

\n

When you add targets to a rule and the associated rule triggers soon after, new or updated\n targets might not be immediately invoked. Allow a short period of time for changes to take\n effect.

\n

This action can partially fail if too many requests are made at the same time. If that\n happens, FailedEntryCount is non-zero in the response and each entry in\n FailedEntries provides the ID of the failed target and the error code.

" } }, "com.amazonaws.eventbridge#PutTargetsRequest": { @@ -7657,12 +7753,12 @@ "Sqls": { "target": "com.amazonaws.eventbridge#Sqls", "traits": { - "smithy.api#documentation": "

One or more SQL statements to run. The SQL statements are run as a single transaction. They run serially in the order of the array. \n Subsequent SQL statements don't start until the previous statement in the array completes. \n If any SQL statement fails, then because they are run as one transaction, all work is rolled back.

" + "smithy.api#documentation": "

One or more SQL statements to run. The SQL statements are run as a single transaction.\n They run serially in the order of the array. Subsequent SQL statements don't start until the\n previous statement in the array completes. If any SQL statement fails, then because they are\n run as one transaction, all work is rolled back.

" } } }, "traits": { - "smithy.api#documentation": "

These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the\n Amazon Redshift Data API ExecuteStatement based on EventBridge events.

" + "smithy.api#documentation": "

These are custom parameters to be used when the target is a Amazon Redshift cluster\n to invoke the Amazon Redshift Data API\n ExecuteStatement based on EventBridge events.

" } }, "com.amazonaws.eventbridge#RedshiftSecretManagerArn": { @@ -7708,7 +7804,7 @@ } ], "traits": { - "smithy.api#documentation": "

Revokes the permission of another Amazon Web Services account to be able to put events to the specified\n event bus. Specify the account to revoke by the StatementId value that you\n associated with the account when you granted it permission with PutPermission.\n You can find the StatementId by using DescribeEventBus.

" + "smithy.api#documentation": "

Revokes the permission of another Amazon Web Services account to be able to put events to\n the specified event bus. Specify the account to revoke by the StatementId value\n that you associated with the account when you granted it permission with\n PutPermission. You can find the StatementId by using DescribeEventBus.

" } }, "com.amazonaws.eventbridge#RemovePermissionRequest": { @@ -7761,7 +7857,7 @@ } ], "traits": { - "smithy.api#documentation": "

Removes the specified targets from the specified rule. When the rule is triggered, those\n targets are no longer be invoked.

\n \n

A successful execution of RemoveTargets doesn't guarantee all targets are removed from the rule, it means that the target(s) listed in the request are removed.

\n
\n

When you remove a target, when the associated rule triggers, removed targets might\n continue to be invoked. Allow a short period of time for changes to take effect.

\n

This action can partially fail if too many requests are made at the same time. If that\n happens, FailedEntryCount is non-zero in the response and each entry in\n FailedEntries provides the ID of the failed target and the error code.

\n

The maximum number of entries per request is 10.

" + "smithy.api#documentation": "

Removes the specified targets from the specified rule. When the rule is triggered, those\n targets are no longer be invoked.

\n \n

A successful execution of RemoveTargets doesn't guarantee all targets are\n removed from the rule, it means that the target(s) listed in the request are removed.

\n
\n

When you remove a target, when the associated rule triggers, removed targets might\n continue to be invoked. Allow a short period of time for changes to take effect.

\n

This action can partially fail if too many requests are made at the same time. If that\n happens, FailedEntryCount is non-zero in the response and each entry in\n FailedEntries provides the ID of the failed target and the error code.

\n

The maximum number of entries per request is 10.

" } }, "com.amazonaws.eventbridge#RemoveTargetsRequest": { @@ -7791,7 +7887,7 @@ "target": "com.amazonaws.eventbridge#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

If this is a managed rule, created by an Amazon Web Services service on your behalf, you must specify\n Force as True to remove targets. This parameter is ignored for\n rules that are not managed rules. You can check whether a rule is a managed rule by using\n DescribeRule or ListRules and checking the ManagedBy\n field of the response.

" + "smithy.api#documentation": "

If this is a managed rule, created by an Amazon Web Services service on your behalf, you\n must specify Force as True to remove targets. This parameter is\n ignored for rules that are not managed rules. You can check whether a rule is a managed rule\n by using DescribeRule or ListRules and checking the\n ManagedBy field of the response.

" } } }, @@ -8145,7 +8241,7 @@ "FailoverConfig": { "target": "com.amazonaws.eventbridge#FailoverConfig", "traits": { - "smithy.api#documentation": "

The failover configuration for an endpoint. This includes what triggers failover and what happens when it's triggered.

", + "smithy.api#documentation": "

The failover configuration for an endpoint. This includes what triggers failover and what\n happens when it's triggered.

", "smithy.api#required": {} } } @@ -8172,13 +8268,13 @@ "EventPattern": { "target": "com.amazonaws.eventbridge#EventPattern", "traits": { - "smithy.api#documentation": "

The event pattern of the rule. For more information, see Events and Event\n Patterns in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

The event pattern of the rule. For more information, see Events and Event\n Patterns in the \n Amazon EventBridge User Guide\n .

" } }, "State": { "target": "com.amazonaws.eventbridge#RuleState", "traits": { - "smithy.api#documentation": "

The state of the rule.

" + "smithy.api#documentation": "

The state of the rule.

\n

Valid values include:

\n
    \n
  • \n

    \n DISABLED: The rule is disabled. EventBridge does not match any events against the rule.

    \n
  • \n
  • \n

    \n ENABLED: The rule is enabled. \n EventBridge matches events against the rule, except for Amazon Web Services management events delivered through CloudTrail.

    \n
  • \n
  • \n

    \n ENABLED_WITH_ALL_CLOUDTRAIL_MANAGEMENT_EVENTS: The rule is enabled for all\n events, including Amazon Web Services management events delivered through CloudTrail.

    \n

    Management events provide visibility into management operations that are performed on\n resources in your Amazon Web Services account. These are also known as control plane\n operations. For more information, see Logging management events in the CloudTrail User\n Guide, and Filtering management events from Amazon Web Services services in the\n \n Amazon EventBridge User Guide\n .

    \n

    This value is only valid for rules on the default event bus \n or custom event buses. \n It does not apply to partner event buses.

    \n
  • \n
" } }, "Description": { @@ -8190,7 +8286,7 @@ "ScheduleExpression": { "target": "com.amazonaws.eventbridge#ScheduleExpression", "traits": { - "smithy.api#documentation": "

The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\". For more information, see Creating an Amazon EventBridge rule that runs on a schedule.

" + "smithy.api#documentation": "

The scheduling expression. For example, \"cron(0 20 * * ? *)\", \"rate(5 minutes)\". For more\n information, see Creating an Amazon EventBridge rule\n that runs on a schedule.

" } }, "RoleArn": { @@ -8202,7 +8298,7 @@ "ManagedBy": { "target": "com.amazonaws.eventbridge#ManagedBy", "traits": { - "smithy.api#documentation": "

If the rule was created on behalf of your account by an Amazon Web Services service, this field displays\n the principal name of the service that created the rule.

" + "smithy.api#documentation": "

If the rule was created on behalf of your account by an Amazon Web Services service, this\n field displays the principal name of the service that created the rule.

" } }, "EventBusName": { @@ -8365,14 +8461,14 @@ "Name": { "target": "com.amazonaws.eventbridge#SageMakerPipelineParameterName", "traits": { - "smithy.api#documentation": "

Name of parameter to start execution of a SageMaker Model Building Pipeline.

", + "smithy.api#documentation": "

Name of parameter to start execution of a SageMaker Model Building\n Pipeline.

", "smithy.api#required": {} } }, "Value": { "target": "com.amazonaws.eventbridge#SageMakerPipelineParameterValue", "traits": { - "smithy.api#documentation": "

Value of parameter to start execution of a SageMaker Model Building Pipeline.

", + "smithy.api#documentation": "

Value of parameter to start execution of a SageMaker Model Building\n Pipeline.

", "smithy.api#required": {} } } @@ -8418,12 +8514,12 @@ "PipelineParameterList": { "target": "com.amazonaws.eventbridge#SageMakerPipelineParameterList", "traits": { - "smithy.api#documentation": "

List of Parameter names and values for SageMaker Model Building Pipeline execution.

" + "smithy.api#documentation": "

List of Parameter names and values for SageMaker Model Building Pipeline\n execution.

" } } }, "traits": { - "smithy.api#documentation": "

These are custom parameters to use when the target is a SageMaker Model Building Pipeline\n that starts based on EventBridge events.

" + "smithy.api#documentation": "

These are custom parameters to use when the target is a SageMaker Model Building\n Pipeline that starts based on EventBridge events.

" } }, "com.amazonaws.eventbridge#ScheduleExpression": { @@ -8447,7 +8543,7 @@ } }, "traits": { - "smithy.api#documentation": "

The secondary Region that processes events when failover is triggered or replication is enabled.

" + "smithy.api#documentation": "

The secondary Region that processes events when failover is triggered or replication is\n enabled.

" } }, "com.amazonaws.eventbridge#SecretsManagerSecretArn": { @@ -8663,7 +8759,7 @@ } }, "traits": { - "smithy.api#documentation": "

A key-value pair associated with an Amazon Web Services resource. In EventBridge, rules and event buses\n support tagging.

" + "smithy.api#documentation": "

A key-value pair associated with an Amazon Web Services resource. In EventBridge,\n rules and event buses support tagging.

" } }, "com.amazonaws.eventbridge#TagKey": { @@ -8710,7 +8806,7 @@ } ], "traits": { - "smithy.api#documentation": "

Assigns one or more tags (key-value pairs) to the specified EventBridge resource. Tags can\n help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user permission to access or change only resources with certain tag\n values. In EventBridge, rules and event buses can be tagged.

\n

Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of\n characters.

\n

You can use the TagResource action with a resource that already has tags. If\n you specify a new tag key, this tag is appended to the list of tags associated with the\n resource. If you specify a tag key that is already associated with the resource, the new tag\n value that you specify replaces the previous value for that tag.

\n

You can associate as many as 50 tags with a resource.

" + "smithy.api#documentation": "

Assigns one or more tags (key-value pairs) to the specified EventBridge resource. Tags can\n help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user permission to access or change only resources with certain tag\n values. In EventBridge, rules and event buses can be tagged.

\n

Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as\n strings of characters.

\n

You can use the TagResource action with a resource that already has tags. If\n you specify a new tag key, this tag is appended to the list of tags associated with the\n resource. If you specify a tag key that is already associated with the resource, the new tag\n value that you specify replaces the previous value for that tag.

\n

You can associate as many as 50 tags with a resource.

" } }, "com.amazonaws.eventbridge#TagResourceRequest": { @@ -8757,7 +8853,7 @@ "Id": { "target": "com.amazonaws.eventbridge#TargetId", "traits": { - "smithy.api#documentation": "

The ID of the target within the specified rule. Use this ID to reference the target when updating the rule. We recommend using a memorable and unique string.

", + "smithy.api#documentation": "

The ID of the target within the specified rule. Use this ID to reference the target when\n updating the rule. We recommend using a memorable and unique string.

", "smithy.api#required": {} } }, @@ -8771,7 +8867,7 @@ "RoleArn": { "target": "com.amazonaws.eventbridge#RoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role to be used for this target when the rule is\n triggered. If one rule triggers multiple targets, you can use a different IAM role for each\n target.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role to be used for this target when the rule is triggered. If\n one rule triggers multiple targets, you can use a different IAM role for each target.

" } }, "Input": { @@ -8813,7 +8909,7 @@ "BatchParameters": { "target": "com.amazonaws.eventbridge#BatchParameters", "traits": { - "smithy.api#documentation": "

If the event target is an Batch job, this contains the job definition, job name, and\n other parameters. For more information, see Jobs in the Batch User\n Guide.

" + "smithy.api#documentation": "

If the event target is an Batch job, this contains the job definition, job\n name, and other parameters. For more information, see Jobs in the Batch\n User Guide.

" } }, "SqsParameters": { @@ -8825,19 +8921,19 @@ "HttpParameters": { "target": "com.amazonaws.eventbridge#HttpParameters", "traits": { - "smithy.api#documentation": "

Contains the HTTP parameters to use when the target is a API Gateway endpoint or\n EventBridge ApiDestination.

\n

If you specify an API Gateway API or EventBridge ApiDestination as a target, you can\n use this parameter to specify headers, path parameters, and query string keys/values as part\n of your target invoking request. If you're using ApiDestinations, the corresponding Connection\n can also have these values configured. In case of any conflicting keys, values from the\n Connection take precedence.

" + "smithy.api#documentation": "

Contains the HTTP parameters to use when the target is a API Gateway endpoint or\n EventBridge ApiDestination.

\n

If you specify an API Gateway API or EventBridge ApiDestination as a target,\n you can use this parameter to specify headers, path parameters, and query string keys/values\n as part of your target invoking request. If you're using ApiDestinations, the corresponding\n Connection can also have these values configured. In case of any conflicting keys, values from\n the Connection take precedence.

" } }, "RedshiftDataParameters": { "target": "com.amazonaws.eventbridge#RedshiftDataParameters", "traits": { - "smithy.api#documentation": "

Contains the Amazon Redshift Data API parameters to use when the target is a Amazon Redshift\n cluster.

\n

If you specify a Amazon Redshift Cluster as a Target, you can use this to specify parameters to\n invoke the Amazon Redshift Data API ExecuteStatement based on EventBridge events.

" + "smithy.api#documentation": "

Contains the Amazon Redshift Data API parameters to use when the target is a Amazon Redshift cluster.

\n

If you specify a Amazon Redshift Cluster as a Target, you can use this to specify\n parameters to invoke the Amazon Redshift Data API ExecuteStatement based on EventBridge events.

" } }, "SageMakerPipelineParameters": { "target": "com.amazonaws.eventbridge#SageMakerPipelineParameters", "traits": { - "smithy.api#documentation": "

Contains the SageMaker Model Building Pipeline parameters to start execution of a\n SageMaker Model Building Pipeline.

\n

If you specify a SageMaker Model Building Pipeline as a target, you can use this to\n specify parameters to start a pipeline execution based on EventBridge events.

" + "smithy.api#documentation": "

Contains the SageMaker Model Building Pipeline parameters to start execution of a\n SageMaker Model Building Pipeline.

\n

If you specify a SageMaker Model Building Pipeline as a target, you can use this\n to specify parameters to start a pipeline execution based on EventBridge events.

" } }, "DeadLetterConfig": { @@ -8855,12 +8951,12 @@ "AppSyncParameters": { "target": "com.amazonaws.eventbridge#AppSyncParameters", "traits": { - "smithy.api#documentation": "

Contains the GraphQL operation to be parsed and executed, if the event target is an AppSync API.

" + "smithy.api#documentation": "

Contains the GraphQL operation to be parsed and executed, if the event target is an\n AppSync API.

" } } }, "traits": { - "smithy.api#documentation": "

Targets are the resources to be invoked when a rule is triggered. For a complete list of\n services and resources that can be set as a target, see PutTargets.

\n

If you are setting the event bus of another account as the target, and that account\n granted permission to your account through an organization instead of directly by the account\n ID, then you must specify a RoleArn with proper permissions in the\n Target structure. For more information, see Sending and\n Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User\n Guide.

" + "smithy.api#documentation": "

Targets are the resources to be invoked when a rule is triggered. For a complete list of\n services and resources that can be set as a target, see PutTargets.

\n

If you are setting the event bus of another account as the target, and that account\n granted permission to your account through an organization instead of directly by the account\n ID, then you must specify a RoleArn with proper permissions in the\n Target structure. For more information, see Sending and\n Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide.

" } }, "com.amazonaws.eventbridge#TargetArn": { @@ -8950,7 +9046,7 @@ } ], "traits": { - "smithy.api#documentation": "

Tests whether the specified event pattern matches the provided event.

\n

Most services in Amazon Web Services treat : or / as the same character in Amazon Resource Names (ARNs).\n However, EventBridge uses an exact match in event patterns and rules. Be sure to use the\n correct ARN characters when creating event patterns so that they match the ARN syntax in the\n event you want to match.

" + "smithy.api#documentation": "

Tests whether the specified event pattern matches the provided event.

\n

Most services in Amazon Web Services treat : or / as the same character in Amazon Resource\n Names (ARNs). However, EventBridge uses an exact match in event patterns and rules. Be\n sure to use the correct ARN characters when creating event patterns so that they match the ARN\n syntax in the event you want to match.

" } }, "com.amazonaws.eventbridge#TestEventPatternRequest": { @@ -8959,14 +9055,14 @@ "EventPattern": { "target": "com.amazonaws.eventbridge#EventPattern", "traits": { - "smithy.api#documentation": "

The event pattern. For more information, see Events and Event\n Patterns in the Amazon EventBridge User Guide.

", + "smithy.api#documentation": "

The event pattern. For more information, see Events and Event\n Patterns in the \n Amazon EventBridge User Guide\n .

", "smithy.api#required": {} } }, "Event": { "target": "com.amazonaws.eventbridge#String", "traits": { - "smithy.api#documentation": "

The event, in JSON format, to test against the event pattern. The JSON must follow the\n format specified in Amazon Web Services Events, and the following\n fields are mandatory:

\n
    \n
  • \n

    \n id\n

    \n
  • \n
  • \n

    \n account\n

    \n
  • \n
  • \n

    \n source\n

    \n
  • \n
  • \n

    \n time\n

    \n
  • \n
  • \n

    \n region\n

    \n
  • \n
  • \n

    \n resources\n

    \n
  • \n
  • \n

    \n detail-type\n

    \n
  • \n
", + "smithy.api#documentation": "

The event, in JSON format, to test against the event pattern. The JSON must follow the\n format specified in Amazon Web Services Events, and\n the following fields are mandatory:

\n
    \n
  • \n

    \n id\n

    \n
  • \n
  • \n

    \n account\n

    \n
  • \n
  • \n

    \n source\n

    \n
  • \n
  • \n

    \n time\n

    \n
  • \n
  • \n

    \n region\n

    \n
  • \n
  • \n

    \n resources\n

    \n
  • \n
  • \n

    \n detail-type\n

    \n
  • \n
", "smithy.api#required": {} } } @@ -9519,7 +9615,7 @@ } ], "traits": { - "smithy.api#documentation": "

Update an existing endpoint. For more information about global endpoints, see Making applications Regional-fault tolerant with global endpoints and event replication in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

Update an existing endpoint. For more information about global endpoints, see Making\n applications Regional-fault tolerant with global endpoints and event replication in\n the \n Amazon EventBridge User Guide\n .

" } }, "com.amazonaws.eventbridge#UpdateEndpointRequest": { @@ -9591,7 +9687,7 @@ "ReplicationConfig": { "target": "com.amazonaws.eventbridge#ReplicationConfig", "traits": { - "smithy.api#documentation": "

Whether event replication was enabled or disabled for the endpoint you updated in this request.

" + "smithy.api#documentation": "

Whether event replication was enabled or disabled for the endpoint you updated in this\n request.

" } }, "EventBuses": { @@ -9603,7 +9699,7 @@ "RoleArn": { "target": "com.amazonaws.eventbridge#IamRoleArn", "traits": { - "smithy.api#documentation": "

The ARN of the role used by event replication for the endpoint you updated in this request.

" + "smithy.api#documentation": "

The ARN of the role used by event replication for the endpoint you updated in this\n request.

" } }, "EndpointId": { @@ -9628,6 +9724,96 @@ "traits": { "smithy.api#output": {} } + }, + "com.amazonaws.eventbridge#UpdateEventBus": { + "type": "operation", + "input": { + "target": "com.amazonaws.eventbridge#UpdateEventBusRequest" + }, + "output": { + "target": "com.amazonaws.eventbridge#UpdateEventBusResponse" + }, + "errors": [ + { + "target": "com.amazonaws.eventbridge#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.eventbridge#InternalException" + }, + { + "target": "com.amazonaws.eventbridge#OperationDisabledException" + }, + { + "target": "com.amazonaws.eventbridge#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the specified event bus.

" + } + }, + "com.amazonaws.eventbridge#UpdateEventBusRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.eventbridge#EventBusName", + "traits": { + "smithy.api#documentation": "

The name of the event bus.

" + } + }, + "KmsKeyIdentifier": { + "target": "com.amazonaws.eventbridge#KmsKeyIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the KMS\n customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt events on this event bus. The identifier can be the key \n Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN.

\n

If you do not specify a customer managed key identifier, EventBridge uses an\n Amazon Web Services owned key to encrypt events on the event bus.

\n

For more information, see Managing keys in the Key Management Service\n Developer Guide.

\n \n

Archives and schema discovery are not supported for event buses encrypted using a\n customer managed key. EventBridge returns an error if:

\n
    \n
  • \n

    You call \n CreateArchive\n on an event bus set to use a customer managed key for encryption.

    \n
  • \n
  • \n

    You call \n CreateDiscoverer\n on an event bus set to use a customer managed key for encryption.

    \n
  • \n
  • \n

    You call \n UpdatedEventBus\n to set a customer managed key on an event bus with an archives or schema discovery enabled.

    \n
  • \n
\n

To enable archives or schema discovery on an event bus, choose to\n use an Amazon Web Services owned key. For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide.

\n
" + } + }, + "Description": { + "target": "com.amazonaws.eventbridge#EventBusDescription", + "traits": { + "smithy.api#documentation": "

The event bus description.

" + } + }, + "DeadLetterConfig": { + "target": "com.amazonaws.eventbridge#DeadLetterConfig" + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.eventbridge#UpdateEventBusResponse": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.eventbridge#String", + "traits": { + "smithy.api#documentation": "

The event bus Amazon Resource Name (ARN).

" + } + }, + "Name": { + "target": "com.amazonaws.eventbridge#EventBusName", + "traits": { + "smithy.api#documentation": "

The event bus name.

" + } + }, + "KmsKeyIdentifier": { + "target": "com.amazonaws.eventbridge#KmsKeyIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the KMS\n customer managed key for EventBridge to use to encrypt events on this event bus, if one has been specified.

\n

For more information, see Data encryption in EventBridge in the Amazon EventBridge User Guide.

" + } + }, + "Description": { + "target": "com.amazonaws.eventbridge#EventBusDescription", + "traits": { + "smithy.api#documentation": "

The event bus description.

" + } + }, + "DeadLetterConfig": { + "target": "com.amazonaws.eventbridge#DeadLetterConfig" + } + }, + "traits": { + "smithy.api#output": {} + } } } } diff --git a/models/firehose.json b/models/firehose.json index 09628529c9..b31afc12c0 100644 --- a/models/firehose.json +++ b/models/firehose.json @@ -2614,7 +2614,7 @@ "name": "firehose" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Data Firehose\n \n

Amazon Data Firehose was previously known as Amazon Kinesis Data Firehose.

\n
\n

Amazon Data Firehose is a fully managed service that delivers real-time\n streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon\n OpenSearch Service, Amazon Redshift, Splunk, and various other supportd\n destinations.

", + "smithy.api#documentation": "Amazon Data Firehose\n \n

Amazon Data Firehose was previously known as Amazon Kinesis Data Firehose.

\n
\n

Amazon Data Firehose is a fully managed service that delivers real-time streaming\n data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch\n Service, Amazon Redshift, Splunk, and various other supported destinations.

", "smithy.api#title": "Amazon Kinesis Firehose", "smithy.api#xmlNamespace": { "uri": "http://firehose.amazonaws.com/doc/2015-08-04" @@ -3834,7 +3834,7 @@ "RequestConfiguration": { "target": "com.amazonaws.firehose#HttpEndpointRequestConfiguration", "traits": { - "smithy.api#documentation": "

The configuration of the requeste sent to the HTTP endpoint specified as the\n destination.

" + "smithy.api#documentation": "

The configuration of the request sent to the HTTP endpoint that is specified as the\n destination.

" } }, "ProcessingConfiguration": { @@ -3863,6 +3863,12 @@ "traits": { "smithy.api#required": {} } + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n The configuration that defines how you access secrets for HTTP Endpoint destination.\n

" + } } }, "traits": { @@ -3916,6 +3922,12 @@ }, "S3DestinationDescription": { "target": "com.amazonaws.firehose#S3DestinationDescription" + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n The configuration that defines how you access secrets for HTTP Endpoint destination.\n

" + } } }, "traits": { @@ -3969,6 +3981,12 @@ }, "S3Update": { "target": "com.amazonaws.firehose#S3DestinationUpdate" + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n The configuration that defines how you access secrets for HTTP Endpoint destination.\n

" + } } }, "traits": { @@ -4769,7 +4787,7 @@ } }, "traits": { - "smithy.api#documentation": "

A serializer to use for converting data to the Parquet format before storing it in\n Amazon S3. For more information, see Apache Parquet.

" + "smithy.api#documentation": "

A serializer to use for converting data to the Parquet format before storing it in\n Amazon S3. For more information, see Apache Parquet.

" } }, "com.amazonaws.firehose#ParquetWriterVersion": { @@ -5267,15 +5285,13 @@ "Username": { "target": "com.amazonaws.firehose#Username", "traits": { - "smithy.api#documentation": "

The name of the user.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The name of the user.

" } }, "Password": { "target": "com.amazonaws.firehose#Password", "traits": { - "smithy.api#documentation": "

The user password.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The user password.

" } }, "RetryOptions": { @@ -5314,6 +5330,12 @@ "traits": { "smithy.api#documentation": "

The CloudWatch logging options for your delivery stream.

" } + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n The configuration that defines how you access secrets for Amazon Redshift.\n

" + } } }, "traits": { @@ -5347,8 +5369,7 @@ "Username": { "target": "com.amazonaws.firehose#Username", "traits": { - "smithy.api#documentation": "

The name of the user.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The name of the user.

" } }, "RetryOptions": { @@ -5387,6 +5408,12 @@ "traits": { "smithy.api#documentation": "

The Amazon CloudWatch logging options for your delivery stream.

" } + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n The configuration that defines how you access secrets for Amazon Redshift.\n

" + } } }, "traits": { @@ -5461,6 +5488,12 @@ "traits": { "smithy.api#documentation": "

The Amazon CloudWatch logging options for your delivery stream.

" } + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n The configuration that defines how you access secrets for Amazon Redshift.\n

" + } } }, "traits": { @@ -5806,6 +5839,43 @@ "smithy.api#documentation": "

Specifies the schema to which you want Firehose to configure your data\n before it writes it to Amazon S3. This parameter is required if Enabled is set\n to true.

" } }, + "com.amazonaws.firehose#SecretARN": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^arn:" + } + }, + "com.amazonaws.firehose#SecretsManagerConfiguration": { + "type": "structure", + "members": { + "SecretARN": { + "target": "com.amazonaws.firehose#SecretARN", + "traits": { + "smithy.api#documentation": "

The ARN of the secret that stores your credentials. It must be in the same region as the\n Firehose stream and the role. The secret ARN can reside in a different account than the delivery stream and role as Firehose supports cross-account secret access. This parameter is required when Enabled is set to True.

" + } + }, + "RoleARN": { + "target": "com.amazonaws.firehose#RoleARN", + "traits": { + "smithy.api#documentation": "

\n Specifies the role that Firehose assumes when calling the Secrets Manager API operation. When you provide the role, it overrides any destination specific role defined in the destination configuration. If you do not provide the then we use the destination specific role. This parameter is required for Splunk.\n

" + } + }, + "Enabled": { + "target": "com.amazonaws.firehose#BooleanObject", + "traits": { + "smithy.api#documentation": "

Specifies whether you want to use the the secrets manager feature. When set as\n True the secrets manager configuration overwrites the existing secrets in\n the destination configuration. When it's set to False Firehose falls back to\n the credentials in the destination configuration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The structure that defines how Firehose accesses the secret.

" + } + }, "com.amazonaws.firehose#SecurityGroupIdList": { "type": "list", "member": { @@ -5930,8 +6000,7 @@ "PrivateKey": { "target": "com.amazonaws.firehose#SnowflakePrivateKey", "traits": { - "smithy.api#documentation": "

The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.

" } }, "KeyPassphrase": { @@ -5943,8 +6012,7 @@ "User": { "target": "com.amazonaws.firehose#SnowflakeUser", "traits": { - "smithy.api#documentation": "

User login name for the Snowflake account.

", - "smithy.api#required": {} + "smithy.api#documentation": "

User login name for the Snowflake account.

" } }, "Database": { @@ -6028,6 +6096,12 @@ "traits": { "smithy.api#required": {} } + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n The configuration that defines how you access secrets for Snowflake.\n

" + } } }, "traits": { @@ -6123,6 +6197,12 @@ }, "S3DestinationDescription": { "target": "com.amazonaws.firehose#S3DestinationDescription" + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n The configuration that defines how you access secrets for Snowflake.\n

" + } } }, "traits": { @@ -6224,6 +6304,12 @@ }, "S3Update": { "target": "com.amazonaws.firehose#S3DestinationUpdate" + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n Describes the Secrets Manager configuration in Snowflake.\n

" + } } }, "traits": { @@ -6465,8 +6551,7 @@ "HECToken": { "target": "com.amazonaws.firehose#HECToken", "traits": { - "smithy.api#documentation": "

This is a GUID that you obtain from your Splunk cluster when you create a new HEC\n endpoint.

", - "smithy.api#required": {} + "smithy.api#documentation": "

This is a GUID that you obtain from your Splunk cluster when you create a new HEC\n endpoint.

" } }, "HECAcknowledgmentTimeoutInSeconds": { @@ -6511,6 +6596,12 @@ "traits": { "smithy.api#documentation": "

The buffering options. If no value is specified, the default values for Splunk are used.

" } + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n The configuration that defines how you access secrets for Splunk.\n

" + } } }, "traits": { @@ -6579,6 +6670,12 @@ "traits": { "smithy.api#documentation": "

The buffering options. If no value is specified, the default values for Splunk are used.

" } + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n The configuration that defines how you access secrets for Splunk.\n

" + } } }, "traits": { @@ -6647,6 +6744,12 @@ "traits": { "smithy.api#documentation": "

The buffering options. If no value is specified, the default values for Splunk are used.

" } + }, + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#documentation": "

\n The configuration that defines how you access secrets for Splunk.\n

" + } } }, "traits": { @@ -6719,7 +6822,7 @@ } ], "traits": { - "smithy.api#documentation": "

Enables server-side encryption (SSE) for the delivery stream.

\n

This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then\n to ENABLED. The encryption status of a delivery stream is the\n Status property in DeliveryStreamEncryptionConfiguration.\n If the operation fails, the encryption status changes to ENABLING_FAILED. You\n can continue to read and write data to your delivery stream while the encryption status is\n ENABLING, but the data is not encrypted. It can take up to 5 seconds after\n the encryption status changes to ENABLED before all records written to the\n delivery stream are encrypted. To find out whether a record or a batch of records was\n encrypted, check the response elements PutRecordOutput$Encrypted and\n PutRecordBatchOutput$Encrypted, respectively.

\n

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

\n

Even if encryption is currently enabled for a delivery stream, you can still invoke this\n operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this\n method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK,\n Firehose schedules the grant it had on the old CMK for retirement. If the new\n CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant\n that enables it to use the new CMK to encrypt and decrypt data and to manage the\n grant.

\n

For the KMS grant creation to be successful, Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

\n

If a delivery stream already has encryption enabled and then you invoke this operation\n to change the ARN of the CMK or both its type and ARN and you get\n ENABLING_FAILED, this only means that the attempt to change the CMK failed.\n In this case, encryption remains enabled with the old CMK.

\n

If the encryption status of your delivery stream is ENABLING_FAILED, you\n can invoke this operation again with a valid CMK. The CMK must be enabled and the key\n policy mustn't explicitly deny the permission for Firehose to invoke KMS\n encrypt and decrypt operations.

\n

You can enable SSE for a delivery stream only if it's a delivery stream that uses\n DirectPut as its source.

\n

The StartDeliveryStreamEncryption and\n StopDeliveryStreamEncryption operations have a combined limit of 25 calls\n per delivery stream per 24 hours. For example, you reach the limit if you call\n StartDeliveryStreamEncryption 13 times and\n StopDeliveryStreamEncryption 12 times for the same delivery stream in a\n 24-hour period.

" + "smithy.api#documentation": "

Enables server-side encryption (SSE) for the delivery stream.

\n

This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then\n to ENABLED. The encryption status of a delivery stream is the\n Status property in DeliveryStreamEncryptionConfiguration.\n If the operation fails, the encryption status changes to ENABLING_FAILED. You\n can continue to read and write data to your delivery stream while the encryption status is\n ENABLING, but the data is not encrypted. It can take up to 5 seconds after\n the encryption status changes to ENABLED before all records written to the\n delivery stream are encrypted. To find out whether a record or a batch of records was\n encrypted, check the response elements PutRecordOutput$Encrypted and\n PutRecordBatchOutput$Encrypted, respectively.

\n

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

\n

Even if encryption is currently enabled for a delivery stream, you can still invoke this\n operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this\n method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK,\n Firehose schedules the grant it had on the old CMK for retirement. If the new\n CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant\n that enables it to use the new CMK to encrypt and decrypt data and to manage the\n grant.

\n

For the KMS grant creation to be successful, the Firehose API operations\n StartDeliveryStreamEncryption and CreateDeliveryStream should\n not be called with session credentials that are more than 6 hours old.

\n

If a delivery stream already has encryption enabled and then you invoke this operation\n to change the ARN of the CMK or both its type and ARN and you get\n ENABLING_FAILED, this only means that the attempt to change the CMK failed.\n In this case, encryption remains enabled with the old CMK.

\n

If the encryption status of your delivery stream is ENABLING_FAILED, you\n can invoke this operation again with a valid CMK. The CMK must be enabled and the key\n policy mustn't explicitly deny the permission for Firehose to invoke KMS\n encrypt and decrypt operations.

\n

You can enable SSE for a delivery stream only if it's a delivery stream that uses\n DirectPut as its source.

\n

The StartDeliveryStreamEncryption and\n StopDeliveryStreamEncryption operations have a combined limit of 25 calls\n per delivery stream per 24 hours. For example, you reach the limit if you call\n StartDeliveryStreamEncryption 13 times and\n StopDeliveryStreamEncryption 12 times for the same delivery stream in a\n 24-hour period.

" } }, "com.amazonaws.firehose#StartDeliveryStreamEncryptionInput": { @@ -7100,7 +7203,7 @@ "SnowflakeDestinationUpdate": { "target": "com.amazonaws.firehose#SnowflakeDestinationUpdate", "traits": { - "smithy.api#documentation": "

Update to the Snowflake destination condiguration settings

" + "smithy.api#documentation": "

Update to the Snowflake destination configuration settings.

" } } }, diff --git a/models/fms.json b/models/fms.json index 3d53d7ff4c..b6d87a8a35 100644 --- a/models/fms.json +++ b/models/fms.json @@ -5275,7 +5275,7 @@ "ForceRemediateForFirstEntries": { "target": "com.amazonaws.fms#BooleanObject", "traits": { - "smithy.api#documentation": "

Applies only when remediation is enabled for the policy as a whole. Firewall Manager uses this setting when it finds policy \n violations that involve conflicts between the custom entries and the policy entries.

\n

If forced remediation is disabled, Firewall Manager marks the network ACL as noncompliant and does not try to \n remediate. For more information about the remediation behavior, see \nNetwork access control list (ACL) policies \n in the Firewall Manager Developer Guide.

", + "smithy.api#documentation": "

Applies only when remediation is enabled for the policy as a whole. Firewall Manager uses this setting when it finds policy \n violations that involve conflicts between the custom entries and the policy entries.

\n

If forced remediation is disabled, Firewall Manager marks the network ACL as noncompliant and does not try to \n remediate. For more information about the remediation behavior, see \nRemediation for managed network ACLs \n in the Firewall Manager Developer Guide.

", "smithy.api#required": {} } }, @@ -5288,7 +5288,7 @@ "ForceRemediateForLastEntries": { "target": "com.amazonaws.fms#BooleanObject", "traits": { - "smithy.api#documentation": "

Applies only when remediation is enabled for the policy as a whole. Firewall Manager uses this setting when it finds policy \n violations that involve conflicts between the custom entries and the policy entries.

\n

If forced remediation is disabled, Firewall Manager marks the network ACL as noncompliant and does not try to \n remediate. For more information about the remediation behavior, see \nNetwork access control list (ACL) policies \n in the Firewall Manager Developer Guide.

", + "smithy.api#documentation": "

Applies only when remediation is enabled for the policy as a whole. Firewall Manager uses this setting when it finds policy \n violations that involve conflicts between the custom entries and the policy entries.

\n

If forced remediation is disabled, Firewall Manager marks the network ACL as noncompliant and does not try to \n remediate. For more information about the remediation behavior, see \nRemediation for managed network ACLs \n in the Firewall Manager Developer Guide.

", "smithy.api#required": {} } } @@ -6145,7 +6145,7 @@ "PolicyDescription": { "target": "com.amazonaws.fms#ResourceDescription", "traits": { - "smithy.api#documentation": "

The definition of the Network Firewall firewall policy.

" + "smithy.api#documentation": "

Your description of the Firewall Manager policy.

" } }, "PolicyStatus": { @@ -6799,7 +6799,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Firewall Manager policy.

\n

A Firewall Manager policy is specific to the individual policy type. If you want to enforce multiple\n\t\tpolicy types across accounts, you can create multiple policies. You can create more than one\n\t\tpolicy for each type.

\n

If you add a new account to an organization that you created with Organizations, Firewall Manager\n\t\tautomatically applies the policy to the resources in that account that are within scope of\n\t\tthe policy.

\n

Firewall Manager provides the following types of policies:

\n
    \n
  • \n

    \n WAF policy - This policy applies WAF web ACL\n\t\t\t\tprotections to specified accounts and resources.

    \n
  • \n
  • \n

    \n Shield Advanced policy - This policy applies Shield Advanced\n\t\t\t\tprotection to specified accounts and resources.

    \n
  • \n
  • \n

    \n Security Groups policy - This type of policy gives you\n\t\t\t\tcontrol over security groups that are in use throughout your organization in\n\t\t\t\tOrganizations and lets you enforce a baseline set of rules across your organization.

    \n
  • \n
  • \n

    \n Network ACL policy - This type of policy gives you\n\t\t\t\tcontrol over the network ACLs that are in use throughout your organization in\n\t\t\t\tOrganizations and lets you enforce a baseline set of first and last network ACL rules across your organization.

    \n
  • \n
  • \n

    \n Network Firewall policy - This policy applies\n\t\t\t\tNetwork Firewall protection to your organization's VPCs.

    \n
  • \n
  • \n

    \n DNS Firewall policy - This policy applies\n\t\t\t\tAmazon Route 53 Resolver DNS Firewall protections to your organization's VPCs.

    \n
  • \n
  • \n

    \n Third-party firewall policy - This policy applies third-party firewall protections. Third-party firewalls are available by subscription through the Amazon Web Services Marketplace console at Amazon Web Services Marketplace.

    \n
      \n
    • \n

      \n Palo Alto Networks Cloud NGFW policy - This policy applies Palo Alto Networks Cloud Next Generation Firewall (NGFW) protections and Palo Alto Networks Cloud NGFW rulestacks to your organization's VPCs.

      \n
    • \n
    • \n

      \n Fortigate CNF policy - This policy applies\n\t\t\t\t\t\tFortigate Cloud Native Firewall (CNF) protections. Fortigate CNF is a cloud-centered solution that blocks Zero-Day threats and secures cloud infrastructures with industry-leading advanced threat prevention, smart web application firewalls (WAF), and API protection.

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

Creates an Firewall Manager policy.

\n

A Firewall Manager policy is specific to the individual policy type. If you want to enforce multiple\n\t\tpolicy types across accounts, you can create multiple policies. You can create more than one\n\t\tpolicy for each type.

\n

If you add a new account to an organization that you created with Organizations, Firewall Manager\n\t\tautomatically applies the policy to the resources in that account that are within scope of\n\t\tthe policy.

\n

Firewall Manager provides the following types of policies:

\n
    \n
  • \n

    \n WAF policy - This policy applies WAF web ACL\n\t\t\t\tprotections to specified accounts and resources.

    \n
  • \n
  • \n

    \n Shield Advanced policy - This policy applies Shield Advanced\n\t\t\t\tprotection to specified accounts and resources.

    \n
  • \n
  • \n

    \n Security Groups policy - This type of policy gives you\n\t\t\t\tcontrol over security groups that are in use throughout your organization in\n\t\t\t\tOrganizations and lets you enforce a baseline set of rules across your organization.

    \n
  • \n
  • \n

    \n Network ACL policy - This type of policy gives you\n\t\t\t\tcontrol over the network ACLs that are in use throughout your organization in\n\t\t\t\tOrganizations and lets you enforce a baseline set of first and last network ACL rules across your organization.

    \n
  • \n
  • \n

    \n Network Firewall policy - This policy applies\n\t\t\t\tNetwork Firewall protection to your organization's VPCs.

    \n
  • \n
  • \n

    \n DNS Firewall policy - This policy applies\n\t\t\t\tAmazon Route 53 Resolver DNS Firewall protections to your organization's VPCs.

    \n
  • \n
  • \n

    \n Third-party firewall policy - This policy applies third-party firewall protections. Third-party firewalls are available by subscription through the Amazon Web Services Marketplace console at Amazon Web Services Marketplace.

    \n
      \n
    • \n

      \n Palo Alto Networks Cloud NGFW policy - This policy applies Palo Alto Networks Cloud Next Generation Firewall (NGFW) protections and Palo Alto Networks Cloud NGFW rulestacks to your organization's VPCs.

      \n
    • \n
    • \n

      \n Fortigate CNF policy - This policy applies\n\t\t\t\t\t\tFortigate Cloud Native Firewall (CNF) protections. Fortigate CNF is a cloud-centered solution that blocks Zero-Day threats and secures cloud infrastructures with industry-leading advanced threat prevention, smart web application firewalls (WAF), and API protection.

      \n
    • \n
    \n
  • \n
" } }, "com.amazonaws.fms#PutPolicyRequest": { @@ -7396,12 +7396,12 @@ "Value": { "target": "com.amazonaws.fms#ResourceTagValue", "traits": { - "smithy.api#documentation": "

The resource tag value.

" + "smithy.api#documentation": "

The resource tag value. To specify an empty string value, either don't provide this or specify it as \"\".

" } } }, "traits": { - "smithy.api#documentation": "

The resource tags that Firewall Manager uses to determine if a particular resource\n should be included or excluded from the Firewall Manager policy. Tags enable you to\n categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or\n environment. Each tag consists of a key and an optional value. Firewall Manager combines the\n tags with \"AND\" so that, if you add more than one tag to a policy scope, a resource must have\n all the specified tags to be included or excluded. For more information, see\n Working with Tag Editor.

" + "smithy.api#documentation": "

The resource tags that Firewall Manager uses to determine if a particular resource\n should be included or excluded from the Firewall Manager policy. Tags enable you to\n categorize your Amazon Web Services resources in different ways, for example, by purpose, owner, or\n environment. Each tag consists of a key and an optional value. Firewall Manager combines the\n tags with \"AND\" so that, if you add more than one tag to a policy scope, a resource must have\n all the specified tags to be included or excluded. For more information, see\n Working with Tag Editor.

\n

Every resource tag must have a string value, either a non-empty string or an empty string. If you don't \n provide a value for a resource tag, Firewall Manager saves the value as an empty string: \"\". When Firewall Manager compares tags, it only \n matches two tags if they have the same key and the same value. A tag with an empty string value only \n matches with tags that also have an empty string value.

" } }, "com.amazonaws.fms#ResourceTagKey": { diff --git a/models/fsx.json b/models/fsx.json index 42c5e7570b..29023e09dd 100644 --- a/models/fsx.json +++ b/models/fsx.json @@ -3013,7 +3013,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new, empty Amazon FSx file system. You can create the following supported \n Amazon FSx file systems using the CreateFileSystem API operation:

\n
    \n
  • \n

    Amazon FSx for Lustre

    \n
  • \n
  • \n

    Amazon FSx for NetApp ONTAP

    \n
  • \n
  • \n

    Amazon FSx for OpenZFS

    \n
  • \n
  • \n

    Amazon FSx for Windows File Server

    \n
  • \n
\n

This operation requires a client request token in the request that Amazon FSx uses\n to ensure idempotent creation. This means that calling the operation multiple times with\n the same client request token has no effect. By using the idempotent operation, you can\n retry a CreateFileSystem operation without the risk of creating an extra\n file system. This approach can be useful when an initial call fails in a way that makes\n it unclear whether a file system was created. Examples are if a transport level timeout\n occurred, or your connection was reset. If you use the same client request token and the\n initial call created a file system, the client receives success as long as the\n parameters are the same.

\n

If a file system with the specified client request token exists and the parameters\n match, CreateFileSystem returns the description of the existing file\n system. If a file system with the specified client request token exists and the\n parameters don't match, this call returns IncompatibleParameterError. If a\n file system with the specified client request token doesn't exist,\n CreateFileSystem does the following:

\n
    \n
  • \n

    Creates a new, empty Amazon FSx file system with an assigned ID, and\n an initial lifecycle state of CREATING.

    \n
  • \n
  • \n

    Returns the description of the file system in JSON format.

    \n
  • \n
\n \n

The CreateFileSystem call returns while the file system's lifecycle\n state is still CREATING. You can check the file-system creation status\n by calling the DescribeFileSystems operation, which returns the file system state\n along with other information.

\n
" + "smithy.api#documentation": "

Creates a new, empty Amazon FSx file system. You can create the following supported \n Amazon FSx file systems using the CreateFileSystem API operation:

\n
    \n
  • \n

    Amazon FSx for Lustre

    \n
  • \n
  • \n

    Amazon FSx for NetApp ONTAP

    \n
  • \n
  • \n

    Amazon FSx for OpenZFS

    \n
  • \n
  • \n

    Amazon FSx for Windows File Server

    \n
  • \n
\n

This operation requires a client request token in the request that Amazon FSx uses\n to ensure idempotent creation. This means that calling the operation multiple times with\n the same client request token has no effect. By using the idempotent operation, you can\n retry a CreateFileSystem operation without the risk of creating an extra\n file system. This approach can be useful when an initial call fails in a way that makes\n it unclear whether a file system was created. Examples are if a transport level timeout\n occurred, or your connection was reset. If you use the same client request token and the\n initial call created a file system, the client receives success as long as the\n parameters are the same.

\n

If a file system with the specified client request token exists and the parameters\n match, CreateFileSystem returns the description of the existing file\n system. If a file system with the specified client request token exists and the\n parameters don't match, this call returns IncompatibleParameterError. If a\n file system with the specified client request token doesn't exist,\n CreateFileSystem does the following:

\n
    \n
  • \n

    Creates a new, empty Amazon FSx file system with an assigned ID, and\n an initial lifecycle state of CREATING.

    \n
  • \n
  • \n

    Returns the description of the file system in JSON format.

    \n
  • \n
\n \n

The CreateFileSystem call returns while the file system's lifecycle\n state is still CREATING. You can check the file-system creation status\n by calling the DescribeFileSystems operation, which returns the file system state\n along with other information.

\n
" } }, "com.amazonaws.fsx#CreateFileSystemFromBackup": { @@ -3181,7 +3181,7 @@ "DeploymentType": { "target": "com.amazonaws.fsx#LustreDeploymentType", "traits": { - "smithy.api#documentation": "

(Optional) Choose SCRATCH_1 and SCRATCH_2 deployment \n types when you need temporary storage and shorter-term processing of data. \n The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst \n throughput capacity than SCRATCH_1.

\n

Choose PERSISTENT_1 for longer-term storage and for throughput-focused \n workloads that aren’t latency-sensitive.\n PERSISTENT_1 supports encryption of data in transit, and is available in all \n Amazon Web Services Regions in which FSx for Lustre is available.

\n

Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads \n that require the highest levels of IOPS/throughput. PERSISTENT_2 supports \n SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB). PERSISTENT_2 \n is available in a limited number of Amazon Web Services Regions. \n For more information, and an up-to-date list of Amazon Web Services Regions in which \n PERSISTENT_2 is available, see \n File \n system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.

\n \n

If you choose PERSISTENT_2, and you set FileSystemTypeVersion to\n 2.10, the CreateFileSystem operation fails.

\n
\n

Encryption of data in transit is automatically turned on when you access\n SCRATCH_2, PERSISTENT_1 and PERSISTENT_2 file\n systems from Amazon EC2 instances that support automatic encryption in\n the Amazon Web Services Regions where they are available. For more information about\n encryption in transit for FSx for Lustre file systems, see Encrypting data in\n transit in the Amazon FSx for Lustre User Guide.

\n

(Default = SCRATCH_1)

" + "smithy.api#documentation": "

(Optional) Choose SCRATCH_1 and SCRATCH_2 deployment \n types when you need temporary storage and shorter-term processing of data. \n The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst \n throughput capacity than SCRATCH_1.

\n

Choose PERSISTENT_1 for longer-term storage and for throughput-focused \n workloads that aren’t latency-sensitive.\n PERSISTENT_1 supports encryption of data in transit, and is available in all \n Amazon Web Services Regions in which FSx for Lustre is available.

\n

Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads \n that require the highest levels of IOPS/throughput. PERSISTENT_2 supports \n SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB).\n You can optionally specify a metadata configuration mode for PERSISTENT_2\n which supports increasing metadata performance. PERSISTENT_2 is available\n in a limited number of Amazon Web Services Regions. For more information, and an up-to-date\n list of Amazon Web Services Regions in which PERSISTENT_2 is available, see \n File \n system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.

\n \n

If you choose PERSISTENT_2, and you set FileSystemTypeVersion to\n 2.10, the CreateFileSystem operation fails.

\n
\n

Encryption of data in transit is automatically turned on when you access\n SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 file\n systems from Amazon EC2 instances that support automatic encryption in\n the Amazon Web Services Regions where they are available. For more information about\n encryption in transit for FSx for Lustre file systems, see Encrypting data in\n transit in the Amazon FSx for Lustre User Guide.

\n

(Default = SCRATCH_1)

" } }, "AutoImportPolicy": { @@ -3234,12 +3234,40 @@ "traits": { "smithy.api#documentation": "

The Lustre root squash configuration used when creating an Amazon FSx for Lustre\n file system. When enabled, root squash restricts root-level access from clients that\n try to access your file system as a root user.

" } + }, + "MetadataConfiguration": { + "target": "com.amazonaws.fsx#CreateFileSystemLustreMetadataConfiguration", + "traits": { + "smithy.api#documentation": "

The Lustre metadata performance configuration for the creation of an\n FSx for Lustre file system using a PERSISTENT_2\n deployment type.

" + } } }, "traits": { "smithy.api#documentation": "

The Lustre configuration for the file system being created.

\n \n

The following parameters are not supported for file systems\n with a data repository association created with\n .

\n
    \n
  • \n

    \n AutoImportPolicy\n

    \n
  • \n
  • \n

    \n ExportPath\n

    \n
  • \n
  • \n

    \n ImportedFileChunkSize\n

    \n
  • \n
  • \n

    \n ImportPath\n

    \n
  • \n
\n
" } }, + "com.amazonaws.fsx#CreateFileSystemLustreMetadataConfiguration": { + "type": "structure", + "members": { + "Iops": { + "target": "com.amazonaws.fsx#MetadataIops", + "traits": { + "smithy.api#documentation": "

(USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision\n for the file system. This parameter sets the maximum rate of metadata disk IOPS\n supported by the file system. Valid values are 1500, 3000,\n 6000, 12000, and multiples of 12000\n up to a maximum of 192000.

\n \n

Iops doesn’t have a default value. If you're using USER_PROVISIONED mode,\n you can choose to specify a valid value. If you're using AUTOMATIC mode,\n you cannot specify a value because FSx for Lustre automatically sets\n the value based on your file system storage capacity.\n

\n
" + } + }, + "Mode": { + "target": "com.amazonaws.fsx#MetadataConfigurationMode", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The metadata configuration mode for provisioning Metadata IOPS for\n an FSx for Lustre file system using a PERSISTENT_2\n deployment type.

\n
    \n
  • \n

    In AUTOMATIC mode, FSx for Lustre automatically\n provisions and scales the number of Metadata IOPS for your file system\n based on your file system storage capacity.

    \n
  • \n
  • \n

    In USER_PROVISIONED mode, you specify the number of Metadata\n IOPS to provision for your file system.

    \n
  • \n
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The Lustre metadata performance configuration for the creation of an\n Amazon FSx for Lustre file system using a PERSISTENT_2\n deployment type. The configuration uses a Metadata IOPS value to set the\n maximum rate of metadata disk IOPS supported by the file system.

\n

After creation, the file system supports increasing metadata performance.\n For more information on Metadata IOPS, see Lustre\n metadata performance configuration in the Amazon FSx for Lustre User Guide.

" + } + }, "com.amazonaws.fsx#CreateFileSystemOntapConfiguration": { "type": "structure", "members": { @@ -3299,7 +3327,7 @@ "HAPairs": { "target": "com.amazonaws.fsx#HAPairs", "traits": { - "smithy.api#documentation": "

Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. \n FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of StorageCapacity, \n Iops, and ThroughputCapacity. For more information, see \n High-availability (HA) pairs in the FSx for ONTAP user guide.

\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of HAPairs is less than 1 or greater than 12.

    \n
  • \n
  • \n

    The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1 or MULTI_AZ_1.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. \n FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of StorageCapacity, \n Iops, and ThroughputCapacity. For more information, see \n High-availability (HA) pairs in the FSx for ONTAP user guide.

\n

Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:

\n
    \n
  • \n

    The value of HAPairs is less than 1 or greater than 12.

    \n
  • \n
  • \n

    The value of HAPairs is greater than 1 and the value of DeploymentType is SINGLE_AZ_1 or MULTI_AZ_1.

    \n
  • \n
" } }, "ThroughputCapacityPerHAPair": { @@ -3407,14 +3435,14 @@ "target": "com.amazonaws.fsx#StorageCapacity", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Sets the storage capacity of the file system that you're creating, in gibibytes (GiB).

\n

\n FSx for Lustre file systems - The amount of\n storage capacity that you can configure depends on the value that you set for\n StorageType and the Lustre DeploymentType, as\n follows:

\n
    \n
  • \n

    For SCRATCH_2, PERSISTENT_2 and PERSISTENT_1 deployment types \n using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.

    \n
  • \n
  • \n

    For PERSISTENT_1 HDD file systems, valid values are increments of 6000 GiB for \n 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.

    \n
  • \n
  • \n

    For SCRATCH_1 deployment type, valid values are \n 1200 GiB, 2400 GiB, and increments of 3600 GiB.

    \n
  • \n
\n

\n FSx for ONTAP file systems - The amount of storage capacity \n that you can configure depends on the value of the HAPairs property. The minimum value is calculated as 1,024 * HAPairs and the maximum is calculated as 524,288 * HAPairs.

\n

\n FSx for OpenZFS file systems - The amount of storage capacity that \n you can configure is from 64 GiB up to 524,288 GiB (512 TiB).

\n

\n FSx for Windows File Server file systems - The amount\n of storage capacity that you can configure depends on the value that you set for\n StorageType as follows:

\n
    \n
  • \n

    For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).

    \n
  • \n
  • \n

    For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).

    \n
  • \n
", + "smithy.api#documentation": "

Sets the storage capacity of the file system that you're creating, in gibibytes (GiB).

\n

\n FSx for Lustre file systems - The amount of\n storage capacity that you can configure depends on the value that you set for\n StorageType and the Lustre DeploymentType, as\n follows:

\n
    \n
  • \n

    For SCRATCH_2, PERSISTENT_2, and PERSISTENT_1 deployment types \n using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.

    \n
  • \n
  • \n

    For PERSISTENT_1 HDD file systems, valid values are increments of 6000 GiB for \n 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.

    \n
  • \n
  • \n

    For SCRATCH_1 deployment type, valid values are \n 1200 GiB, 2400 GiB, and increments of 3600 GiB.

    \n
  • \n
\n

\n FSx for ONTAP file systems - The amount of storage capacity \n that you can configure depends on the value of the HAPairs property. The minimum value is calculated as 1,024 * HAPairs and the maximum is calculated as 524,288 * HAPairs.

\n

\n FSx for OpenZFS file systems - The amount of storage capacity that \n you can configure is from 64 GiB up to 524,288 GiB (512 TiB).

\n

\n FSx for Windows File Server file systems - The amount\n of storage capacity that you can configure depends on the value that you set for\n StorageType as follows:

\n
    \n
  • \n

    For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).

    \n
  • \n
  • \n

    For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).

    \n
  • \n
", "smithy.api#required": {} } }, "StorageType": { "target": "com.amazonaws.fsx#StorageType", "traits": { - "smithy.api#documentation": "

Sets the storage type for the file system that you're creating. Valid values are\n SSD and HDD.

\n
    \n
  • \n

    Set to SSD to use solid state drive storage. SSD is supported on all Windows,\n Lustre, ONTAP, and OpenZFS deployment types.

    \n
  • \n
  • \n

    Set to HDD to use hard disk drive storage. \n HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types,\n and on PERSISTENT_1 Lustre file system deployment types. \n

    \n
  • \n
\n

Default value is SSD. For more information, see Storage\n type options in the FSx for Windows File Server User\n Guide and Multiple storage\n options in the FSx for Lustre User\n Guide.

" + "smithy.api#documentation": "

Sets the storage type for the file system that you're creating. Valid values are\n SSD and HDD.

\n
    \n
  • \n

    Set to SSD to use solid state drive storage. SSD is supported on all Windows,\n Lustre, ONTAP, and OpenZFS deployment types.

    \n
  • \n
  • \n

    Set to HDD to use hard disk drive storage. \n HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types,\n and on PERSISTENT_1 Lustre file system deployment types.

    \n
  • \n
\n

Default value is SSD. For more information, see Storage\n type options in the FSx for Windows File Server User\n Guide and Multiple storage\n options in the FSx for Lustre User\n Guide.

" } }, "SubnetIds": { @@ -3455,7 +3483,7 @@ "FileSystemTypeVersion": { "target": "com.amazonaws.fsx#FileSystemTypeVersion", "traits": { - "smithy.api#documentation": "

(Optional) For FSx for Lustre file systems, sets the Lustre version\n for the file system that you're creating. Valid values are 2.10, \n 2.12, and 2.15:

\n
    \n
  • \n

    2.10 is supported by the Scratch and Persistent_1 Lustre deployment types.

    \n
  • \n
  • \n

    2.12 and 2.15 are supported by all Lustre deployment types. 2.12\n or 2.15 is required when setting FSx for Lustre DeploymentType\n to PERSISTENT_2.

    \n
  • \n
\n

Default value = 2.10, except when DeploymentType is set to \n PERSISTENT_2, then the default is 2.12.

\n \n

If you set FileSystemTypeVersion to 2.10 for a \n PERSISTENT_2 Lustre deployment type, the CreateFileSystem \n operation fails.

\n
" + "smithy.api#documentation": "

For FSx for Lustre file systems, sets the Lustre version for the file system\n that you're creating. Valid values are 2.10, 2.12, and\n 2.15:

\n
    \n
  • \n

    \n 2.10 is supported by the Scratch and Persistent_1 Lustre \n deployment types.

    \n
  • \n
  • \n

    \n 2.12 is supported by all Lustre deployment types, except\n for PERSISTENT_2 with a metadata configuration mode.

    \n
  • \n
  • \n

    \n 2.15 is supported by all Lustre deployment types and is\n recommended for all new file systems.

    \n
  • \n
\n

Default value is 2.10, except for the following deployments:

\n
    \n
  • \n

    Default value is 2.12 when DeploymentType is set to \n PERSISTENT_2 without a metadata configuration mode.

    \n
  • \n
  • \n

    Default value is 2.15 when DeploymentType is set to \n PERSISTENT_2 with a metadata configuration mode.

    \n
  • \n
" } }, "OpenZFSConfiguration": { @@ -3576,7 +3604,7 @@ "SecurityStyle": { "target": "com.amazonaws.fsx#SecurityStyle", "traits": { - "smithy.api#documentation": "

Specifies the security style for the volume. If a volume's security style is not specified, \n it is automatically set to the root volume's security style. The security style determines the type of permissions \n that FSx for ONTAP uses to control data access. For more information, see \n Volume security style \n in the Amazon FSx for NetApp ONTAP User Guide.\n Specify one of the following values:

\n
    \n
  • \n

    \n UNIX if the file system is managed by a UNIX\n administrator, the majority of users are NFS clients, and an application\n accessing the data uses a UNIX user as the service account.\n

    \n
  • \n
  • \n

    \n NTFS if the file system is managed by a Windows\n administrator, the majority of users are SMB clients, and an application\n accessing the data uses a Windows user as the service account.

    \n
  • \n
  • \n

    \n MIXED This is an advanced setting. For more information, see the topic \n What the security styles and their effects are \n in the NetApp Documentation Center.

    \n
  • \n
\n

For more information, see Volume security style in the \n FSx for ONTAP User Guide.

" + "smithy.api#documentation": "

Specifies the security style for the volume. If a volume's security style is not specified, \n it is automatically set to the root volume's security style. The security style determines the type of permissions \n that FSx for ONTAP uses to control data access. Specify one of the following values:

\n
    \n
  • \n

    \n UNIX if the file system is managed by a UNIX\n administrator, the majority of users are NFS clients, and an application\n accessing the data uses a UNIX user as the service account.\n

    \n
  • \n
  • \n

    \n NTFS if the file system is managed by a Windows\n administrator, the majority of users are SMB clients, and an application\n accessing the data uses a Windows user as the service account.

    \n
  • \n
  • \n

    \n MIXED This is an advanced setting. For more information, see the topic \n What the security styles and their effects are \n in the NetApp Documentation Center.

    \n
  • \n
\n

For more information, see Volume security style in the \n FSx for ONTAP User Guide.

" } }, "SizeInMegabytes": { @@ -3608,7 +3636,7 @@ "OntapVolumeType": { "target": "com.amazonaws.fsx#InputOntapVolumeType", "traits": { - "smithy.api#documentation": "

Specifies the type of volume you are creating. Valid values are the following:

\n
    \n
  • \n

    \n RW specifies a read/write volume. RW is the default.

    \n
  • \n
  • \n

    \n DP specifies a data-protection volume. A DP volume\n is read-only and can be used as the destination of a NetApp SnapMirror relationship.

    \n
  • \n
\n

For more information, see Volume types \n in the Amazon FSx for NetApp ONTAP User Guide.

" + "smithy.api#documentation": "

Specifies the type of volume you are creating. Valid values are the following:

\n
    \n
  • \n

    \n RW specifies a read/write volume. RW is the default.

    \n
  • \n
  • \n

    \n DP specifies a data-protection volume. A DP volume\n is read-only and can be used as the destination of a NetApp SnapMirror relationship.

    \n
  • \n
\n

For more information, see Volume types \n in the Amazon FSx for NetApp ONTAP User Guide.

" } }, "SnapshotPolicy": { @@ -3632,7 +3660,7 @@ "VolumeStyle": { "target": "com.amazonaws.fsx#VolumeStyle", "traits": { - "smithy.api#documentation": "

Use to specify the style of an ONTAP volume. FSx for ONTAP offers two styles of volumes that you can use for different purposes, \n FlexVol and FlexGroup volumes. For more information, see \n Volume styles in the Amazon FSx for NetApp ONTAP User Guide.

" + "smithy.api#documentation": "

Use to specify the style of an ONTAP volume. FSx for ONTAP offers two styles of volumes that you can use for different purposes, \n FlexVol and FlexGroup volumes. For more information, see \n Volume styles in the Amazon FSx for NetApp ONTAP User Guide.

" } }, "AggregateConfiguration": { @@ -3942,7 +3970,7 @@ "RootVolumeSecurityStyle": { "target": "com.amazonaws.fsx#StorageVirtualMachineRootVolumeSecurityStyle", "traits": { - "smithy.api#documentation": "

The security style of the root volume of the SVM. Specify one of the following values:

\n
    \n
  • \n

    \n UNIX if the file system is managed by a UNIX\n administrator, the majority of users are NFS clients, and an application\n accessing the data uses a UNIX user as the service account.

    \n
  • \n
  • \n

    \n NTFS if the file system is managed by a Microsoft Windows\n administrator, the majority of users are SMB clients, and an application\n accessing the data uses a Microsoft Windows user as the service account.

    \n
  • \n
  • \n

    \n MIXED This is an advanced setting. For more information, see \n Volume security style in the Amazon FSx for NetApp ONTAP User Guide.

    \n
  • \n
\n

" + "smithy.api#documentation": "

The security style of the root volume of the SVM. Specify one of the following values:

\n
    \n
  • \n

    \n UNIX if the file system is managed by a UNIX\n administrator, the majority of users are NFS clients, and an application\n accessing the data uses a UNIX user as the service account.

    \n
  • \n
  • \n

    \n NTFS if the file system is managed by a Microsoft Windows\n administrator, the majority of users are SMB clients, and an application\n accessing the data uses a Microsoft Windows user as the service account.

    \n
  • \n
  • \n

    \n MIXED This is an advanced setting. For more information, see \n Volume security style\n in the Amazon FSx for NetApp ONTAP User Guide.

    \n
  • \n
\n

" } } }, @@ -5096,7 +5124,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a file system. After deletion, the file system no longer exists, and its data\n is gone. Any existing automatic backups and snapshots are also deleted.

\n

To delete an Amazon FSx for NetApp ONTAP file system, first delete all the\n volumes and storage virtual machines (SVMs) on the file system. Then provide a\n FileSystemId value to the DeleFileSystem operation.

\n

By default, when you delete an Amazon FSx for Windows File Server file system,\n a final backup is created upon deletion. This final backup isn't subject to the file\n system's retention policy, and must be manually deleted.

\n

To delete an Amazon FSx for Lustre file system, first \n unmount\n it from every connected Amazon EC2 instance, then provide a FileSystemId\n value to the DeleFileSystem operation. By default, Amazon FSx will not\n take a final backup when the DeleteFileSystem operation is invoked. On file systems\n not linked to an Amazon S3 bucket, set SkipFinalBackup to false\n to take a final backup of the file system you are deleting. Backups cannot be enabled on S3-linked\n file systems. To ensure all of your data is written back to S3 before deleting your file system,\n you can either monitor for the\n AgeOfOldestQueuedMessage\n metric to be zero (if using automatic export) or you can run an\n export data repository task.\n If you have automatic export enabled and want to use an export data repository task, you have\n to disable automatic export before executing the export data repository task.

\n

The DeleteFileSystem operation returns while the file system has the\n DELETING status. You can check the file system deletion status by\n calling the DescribeFileSystems operation, which returns a list of file systems in your\n account. If you pass the file system ID for a deleted file system, the\n DescribeFileSystems operation returns a FileSystemNotFound\n error.

\n \n

If a data repository task is in a PENDING or EXECUTING state,\n deleting an Amazon FSx for Lustre file system will fail with an HTTP status\n code 400 (Bad Request).

\n
\n \n

The data in a deleted file system is also deleted and can't be recovered by\n any means.

\n
", + "smithy.api#documentation": "

Deletes a file system. After deletion, the file system no longer exists, and its data\n is gone. Any existing automatic backups and snapshots are also deleted.

\n

To delete an Amazon FSx for NetApp ONTAP file system, first delete all the\n volumes and storage virtual machines (SVMs) on the file system. Then provide a\n FileSystemId value to the DeleteFileSystem operation.

\n

By default, when you delete an Amazon FSx for Windows File Server file system,\n a final backup is created upon deletion. This final backup isn't subject to the file\n system's retention policy, and must be manually deleted.

\n

To delete an Amazon FSx for Lustre file system, first \n unmount\n it from every connected Amazon EC2 instance, then provide a FileSystemId\n value to the DeleteFileSystem operation. By default, Amazon FSx will not\n take a final backup when the DeleteFileSystem operation is invoked. On file systems\n not linked to an Amazon S3 bucket, set SkipFinalBackup to false\n to take a final backup of the file system you are deleting. Backups cannot be enabled on S3-linked\n file systems. To ensure all of your data is written back to S3 before deleting your file system,\n you can either monitor for the\n AgeOfOldestQueuedMessage\n metric to be zero (if using automatic export) or you can run an\n export data repository task.\n If you have automatic export enabled and want to use an export data repository task, you have\n to disable automatic export before executing the export data repository task.

\n

The DeleteFileSystem operation returns while the file system has the\n DELETING status. You can check the file system deletion status by\n calling the DescribeFileSystems operation, which returns a list of file systems in your\n account. If you pass the file system ID for a deleted file system, the\n DescribeFileSystems operation returns a FileSystemNotFound\n error.

\n \n

If a data repository task is in a PENDING or EXECUTING state,\n deleting an Amazon FSx for Lustre file system will fail with an HTTP status\n code 400 (Bad Request).

\n
\n \n

The data in a deleted file system is also deleted and can't be recovered by\n any means.

\n
", "smithy.api#examples": [ { "title": "To delete a file system", @@ -6783,7 +6811,7 @@ "target": "com.amazonaws.fsx#ArchivePath", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The path to the S3 or NFS data repository that links to the\n cache. You must provide one of the following paths:

\n
    \n
  • \n

    The path can be an NFS data repository that links to\n the cache. The path can be in one of two formats:

    \n
      \n
    • \n

      If you are not using the DataRepositorySubdirectories\n parameter, the path is to an NFS Export directory (or one of its subdirectories)\n in the format nsf://nfs-domain-name/exportpath. You can therefore\n link a single NFS Export to a single data repository association.

      \n
    • \n
    • \n

      If you are using the DataRepositorySubdirectories\n parameter, the path is the domain name of the NFS file system in the format\n nfs://filer-domain-name, which indicates the root of the\n subdirectories specified with the DataRepositorySubdirectories\n parameter.

      \n
    • \n
    \n
  • \n
  • \n

    The path can be an S3 bucket or prefix\n in the format s3://myBucket/myPrefix/.

    \n
  • \n
", + "smithy.api#documentation": "

The path to the S3 or NFS data repository that links to the\n cache. You must provide one of the following paths:

\n
    \n
  • \n

    The path can be an NFS data repository that links to\n the cache. The path can be in one of two formats:

    \n
      \n
    • \n

      If you are not using the DataRepositorySubdirectories\n parameter, the path is to an NFS Export directory (or one of its subdirectories)\n in the format nfs://nfs-domain-name/exportpath. You can therefore\n link a single NFS Export to a single data repository association.

      \n
    • \n
    • \n

      If you are using the DataRepositorySubdirectories\n parameter, the path is the domain name of the NFS file system in the format\n nfs://filer-domain-name, which indicates the root of the\n subdirectories specified with the DataRepositorySubdirectories\n parameter.

      \n
    • \n
    \n
  • \n
  • \n

    The path can be an S3 bucket or prefix\n in the format s3://myBucket/myPrefix/.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -7263,6 +7291,28 @@ "smithy.api#documentation": "

The lifecycle status of the file system.

" } }, + "com.amazonaws.fsx#FileSystemLustreMetadataConfiguration": { + "type": "structure", + "members": { + "Iops": { + "target": "com.amazonaws.fsx#MetadataIops", + "traits": { + "smithy.api#documentation": "

The number of Metadata IOPS provisioned for the file system. Valid values\n are 1500, 3000, 6000, 12000,\n and multiples of 12000 up to a maximum of 192000.

" + } + }, + "Mode": { + "target": "com.amazonaws.fsx#MetadataConfigurationMode", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The metadata configuration mode for provisioning Metadata IOPS for the\n file system.

\n
    \n
  • \n

    In AUTOMATIC mode, FSx for Lustre automatically\n provisions and scales the number of Metadata IOPS on your file system based\n on your file system storage capacity.

    \n
  • \n
  • \n

    In USER_PROVISIONED mode, you can choose to specify the number\n of Metadata IOPS to provision for your file system.

    \n
  • \n
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The Lustre metadata performance configuration of an Amazon FSx for Lustre\n file system using a PERSISTENT_2 deployment type. The configuration\n enables the file system to support increasing metadata performance.

" + } + }, "com.amazonaws.fsx#FileSystemMaintenanceOperation": { "type": "enum", "members": { @@ -7982,7 +8032,7 @@ "MountName": { "target": "com.amazonaws.fsx#LustreFileSystemMountName", "traits": { - "smithy.api#documentation": "

You use the MountName value when mounting the file system.

\n

For the SCRATCH_1 deployment type, this value is always \"fsx\". \n For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 deployment\n types, this value is a string that is unique within an Amazon Web Services Region. \n \n

" + "smithy.api#documentation": "

You use the MountName value when mounting the file system.

\n

For the SCRATCH_1 deployment type, this value is always \"fsx\". \n For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 deployment\n types, this value is a string that is unique within an Amazon Web Services Region. \n

" } }, "DailyAutomaticBackupStartTime": { @@ -8020,6 +8070,12 @@ "traits": { "smithy.api#documentation": "

The Lustre root squash configuration for an Amazon FSx for Lustre\n file system. When enabled, root squash restricts root-level access from clients that\n try to access your file system as a root user.

" } + }, + "MetadataConfiguration": { + "target": "com.amazonaws.fsx#FileSystemLustreMetadataConfiguration", + "traits": { + "smithy.api#documentation": "

The Lustre metadata performance configuration for an Amazon FSx for Lustre file system\n using a PERSISTENT_2 deployment type.

" + } } }, "traits": { @@ -8161,6 +8217,32 @@ } } }, + "com.amazonaws.fsx#MetadataConfigurationMode": { + "type": "enum", + "members": { + "AUTOMATIC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUTOMATIC" + } + }, + "USER_PROVISIONED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "USER_PROVISIONED" + } + } + } + }, + "com.amazonaws.fsx#MetadataIops": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1500, + "max": 192000 + } + } + }, "com.amazonaws.fsx#MetadataStorageCapacity": { "type": "integer", "traits": { @@ -9677,42 +9759,42 @@ "UserName": { "target": "com.amazonaws.fsx#DirectoryUserName", "traits": { - "smithy.api#documentation": "

Specifies the updated user name for the service account on your self-managed AD domain.\n Amazon FSx uses this account to join to your self-managed AD domain.

\n

This account must have the permissions required to join\n computers to the domain in the organizational unit provided in\n OrganizationalUnitDistinguishedName.

" + "smithy.api#documentation": "

Specifies the updated user name for the service account on your self-managed Active Directory domain.\n Amazon FSx uses this account to join to your self-managed Active Directory domain.

\n

This account must have the permissions required to join\n computers to the domain in the organizational unit provided in\n OrganizationalUnitDistinguishedName.

" } }, "Password": { "target": "com.amazonaws.fsx#DirectoryPassword", "traits": { - "smithy.api#documentation": "

Specifies the updated password for the service account on your self-managed AD domain. \n Amazon FSx uses this account to join to your self-managed AD domain.

" + "smithy.api#documentation": "

Specifies the updated password for the service account on your self-managed Active Directory domain. \n Amazon FSx uses this account to join to your self-managed Active Directory domain.

" } }, "DnsIps": { "target": "com.amazonaws.fsx#DnsIps", "traits": { - "smithy.api#documentation": "

A list of up to three DNS server or domain controller IP addresses in your\n self-managed AD domain.

" + "smithy.api#documentation": "

A list of up to three DNS server or domain controller IP addresses in your\n self-managed Active Directory domain.

" } }, "DomainName": { "target": "com.amazonaws.fsx#ActiveDirectoryFullyQualifiedName", "traits": { - "smithy.api#documentation": "

Specifies an updated fully qualified domain name of your self-managed AD configuration.

" + "smithy.api#documentation": "

Specifies an updated fully qualified domain name of your self-managed Active Directory configuration.

" } }, "OrganizationalUnitDistinguishedName": { "target": "com.amazonaws.fsx#OrganizationalUnitDistinguishedName", "traits": { - "smithy.api#documentation": "

Specifies an updated fully qualified distinguished name of the organization unit within your self-managed AD.

" + "smithy.api#documentation": "

Specifies an updated fully qualified distinguished name of the organization unit within your self-managed Active Directory.

" } }, "FileSystemAdministratorsGroup": { "target": "com.amazonaws.fsx#FileSystemAdministratorsGroupName", "traits": { - "smithy.api#documentation": "

Specifies the updated name of the self-managed AD domain group whose members are granted administrative privileges\n for the Amazon FSx resource.

" + "smithy.api#documentation": "

For FSx for ONTAP file systems only - Specifies the updated name of the self-managed Active Directory domain group whose members are granted administrative privileges\n for the Amazon FSx resource.

" } } }, "traits": { - "smithy.api#documentation": "

Specifies changes you are making to the self-managed Microsoft Active Directory (AD) configuration to which \n an FSx for Windows File Server file system or an FSx for ONTAP SVM is joined.

" + "smithy.api#documentation": "

Specifies changes you are making to the self-managed Microsoft Active Directory configuration to which \n an FSx for Windows File Server file system or an FSx for ONTAP SVM is joined.

" } }, "com.amazonaws.fsx#ServiceLimit": { @@ -11212,7 +11294,7 @@ } ], "traits": { - "smithy.api#documentation": "

Use this operation to update the configuration of an existing Amazon FSx file\n system. You can update multiple properties in a single request.

\n

For FSx for Windows File Server file systems, you can update the following\n properties:

\n
    \n
  • \n

    \n AuditLogConfiguration\n

    \n
  • \n
  • \n

    \n AutomaticBackupRetentionDays\n

    \n
  • \n
  • \n

    \n DailyAutomaticBackupStartTime\n

    \n
  • \n
  • \n

    \n SelfManagedActiveDirectoryConfiguration\n

    \n
  • \n
  • \n

    \n StorageCapacity\n

    \n
  • \n
  • \n

    \n StorageType\n

    \n
  • \n
  • \n

    \n ThroughputCapacity\n

    \n
  • \n
  • \n

    \n DiskIopsConfiguration\n

    \n
  • \n
  • \n

    \n WeeklyMaintenanceStartTime\n

    \n
  • \n
\n

For FSx for Lustre file systems, you can update the following\n properties:

\n
    \n
  • \n

    \n AutoImportPolicy\n

    \n
  • \n
  • \n

    \n AutomaticBackupRetentionDays\n

    \n
  • \n
  • \n

    \n DailyAutomaticBackupStartTime\n

    \n
  • \n
  • \n

    \n DataCompressionType\n

    \n
  • \n
  • \n

    \n LogConfiguration\n

    \n
  • \n
  • \n

    \n LustreRootSquashConfiguration\n

    \n
  • \n
  • \n

    \n PerUnitStorageThroughput\n

    \n
  • \n
  • \n

    \n StorageCapacity\n

    \n
  • \n
  • \n

    \n WeeklyMaintenanceStartTime\n

    \n
  • \n
\n

For FSx for ONTAP file systems, you can update the following\n properties:

\n
    \n
  • \n

    \n AddRouteTableIds\n

    \n
  • \n
  • \n

    \n AutomaticBackupRetentionDays\n

    \n
  • \n
  • \n

    \n DailyAutomaticBackupStartTime\n

    \n
  • \n
  • \n

    \n DiskIopsConfiguration\n

    \n
  • \n
  • \n

    \n FsxAdminPassword\n

    \n
  • \n
  • \n

    \n HAPairs\n

    \n
  • \n
  • \n

    \n RemoveRouteTableIds\n

    \n
  • \n
  • \n

    \n StorageCapacity\n

    \n
  • \n
  • \n

    \n ThroughputCapacity\n

    \n
  • \n
  • \n

    \n ThroughputCapacityPerHAPair\n

    \n
  • \n
  • \n

    \n WeeklyMaintenanceStartTime\n

    \n
  • \n
\n

For FSx for OpenZFS file systems, you can update the following\n properties:

\n
    \n
  • \n

    \n AddRouteTableIds\n

    \n
  • \n
  • \n

    \n AutomaticBackupRetentionDays\n

    \n
  • \n
  • \n

    \n CopyTagsToBackups\n

    \n
  • \n
  • \n

    \n CopyTagsToVolumes\n

    \n
  • \n
  • \n

    \n DailyAutomaticBackupStartTime\n

    \n
  • \n
  • \n

    \n DiskIopsConfiguration\n

    \n
  • \n
  • \n

    \n RemoveRouteTableIds\n

    \n
  • \n
  • \n

    \n StorageCapacity\n

    \n
  • \n
  • \n

    \n ThroughputCapacity\n

    \n
  • \n
  • \n

    \n WeeklyMaintenanceStartTime\n

    \n
  • \n
" + "smithy.api#documentation": "

Use this operation to update the configuration of an existing Amazon FSx file\n system. You can update multiple properties in a single request.

\n

For FSx for Windows File Server file systems, you can update the following\n properties:

\n
    \n
  • \n

    \n AuditLogConfiguration\n

    \n
  • \n
  • \n

    \n AutomaticBackupRetentionDays\n

    \n
  • \n
  • \n

    \n DailyAutomaticBackupStartTime\n

    \n
  • \n
  • \n

    \n SelfManagedActiveDirectoryConfiguration\n

    \n
  • \n
  • \n

    \n StorageCapacity\n

    \n
  • \n
  • \n

    \n StorageType\n

    \n
  • \n
  • \n

    \n ThroughputCapacity\n

    \n
  • \n
  • \n

    \n DiskIopsConfiguration\n

    \n
  • \n
  • \n

    \n WeeklyMaintenanceStartTime\n

    \n
  • \n
\n

For FSx for Lustre file systems, you can update the following\n properties:

\n
    \n
  • \n

    \n AutoImportPolicy\n

    \n
  • \n
  • \n

    \n AutomaticBackupRetentionDays\n

    \n
  • \n
  • \n

    \n DailyAutomaticBackupStartTime\n

    \n
  • \n
  • \n

    \n DataCompressionType\n

    \n
  • \n
  • \n

    \n LogConfiguration\n

    \n
  • \n
  • \n

    \n LustreRootSquashConfiguration\n

    \n
  • \n
  • \n

    \n MetadataConfiguration\n

    \n
  • \n
  • \n

    \n PerUnitStorageThroughput\n

    \n
  • \n
  • \n

    \n StorageCapacity\n

    \n
  • \n
  • \n

    \n WeeklyMaintenanceStartTime\n

    \n
  • \n
\n

For FSx for ONTAP file systems, you can update the following\n properties:

\n
    \n
  • \n

    \n AddRouteTableIds\n

    \n
  • \n
  • \n

    \n AutomaticBackupRetentionDays\n

    \n
  • \n
  • \n

    \n DailyAutomaticBackupStartTime\n

    \n
  • \n
  • \n

    \n DiskIopsConfiguration\n

    \n
  • \n
  • \n

    \n FsxAdminPassword\n

    \n
  • \n
  • \n

    \n HAPairs\n

    \n
  • \n
  • \n

    \n RemoveRouteTableIds\n

    \n
  • \n
  • \n

    \n StorageCapacity\n

    \n
  • \n
  • \n

    \n ThroughputCapacity\n

    \n
  • \n
  • \n

    \n ThroughputCapacityPerHAPair\n

    \n
  • \n
  • \n

    \n WeeklyMaintenanceStartTime\n

    \n
  • \n
\n

For FSx for OpenZFS file systems, you can update the following\n properties:

\n
    \n
  • \n

    \n AddRouteTableIds\n

    \n
  • \n
  • \n

    \n AutomaticBackupRetentionDays\n

    \n
  • \n
  • \n

    \n CopyTagsToBackups\n

    \n
  • \n
  • \n

    \n CopyTagsToVolumes\n

    \n
  • \n
  • \n

    \n DailyAutomaticBackupStartTime\n

    \n
  • \n
  • \n

    \n DiskIopsConfiguration\n

    \n
  • \n
  • \n

    \n RemoveRouteTableIds\n

    \n
  • \n
  • \n

    \n StorageCapacity\n

    \n
  • \n
  • \n

    \n ThroughputCapacity\n

    \n
  • \n
  • \n

    \n WeeklyMaintenanceStartTime\n

    \n
  • \n
" } }, "com.amazonaws.fsx#UpdateFileSystemLustreConfiguration": { @@ -11262,12 +11344,38 @@ "traits": { "smithy.api#documentation": "

The throughput of an Amazon FSx for Lustre Persistent SSD-based file system,\n measured in megabytes per second per tebibyte (MB/s/TiB). You can increase or decrease\n your file system's throughput. Valid values depend on the deployment type of the file\n system, as follows:

\n
    \n
  • \n

    For PERSISTENT_1 SSD-based deployment types, valid values\n are 50, 100, and 200 MB/s/TiB.

    \n
  • \n
  • \n

    For PERSISTENT_2 SSD-based deployment types, valid values\n are 125, 250, 500, and 1000 MB/s/TiB.

    \n
  • \n
\n

For more information, see \n \n Managing throughput capacity.

" } + }, + "MetadataConfiguration": { + "target": "com.amazonaws.fsx#UpdateFileSystemLustreMetadataConfiguration", + "traits": { + "smithy.api#documentation": "

The Lustre metadata performance configuration for an Amazon FSx for Lustre\n file system using a PERSISTENT_2 deployment type. When this configuration\n is enabled, the file system supports increasing metadata performance.

" + } } }, "traits": { "smithy.api#documentation": "

The configuration object for Amazon FSx for Lustre file systems used in the\n UpdateFileSystem operation.

" } }, + "com.amazonaws.fsx#UpdateFileSystemLustreMetadataConfiguration": { + "type": "structure", + "members": { + "Iops": { + "target": "com.amazonaws.fsx#MetadataIops", + "traits": { + "smithy.api#documentation": "

(USER_PROVISIONED mode only) Specifies the number of Metadata IOPS to provision\n for your file system. Valid values are 1500, 3000,\n 6000, 12000, and multiples of 12000\n up to a maximum of 192000.

\n

The value you provide must be greater than or equal to the current number of\n Metadata IOPS provisioned for the file system.

" + } + }, + "Mode": { + "target": "com.amazonaws.fsx#MetadataConfigurationMode", + "traits": { + "smithy.api#documentation": "

The metadata configuration mode for provisioning Metadata IOPS for\n an FSx for Lustre file system using a PERSISTENT_2\n deployment type.

\n
    \n
  • \n

    To increase the Metadata IOPS or to switch from AUTOMATIC mode,\n specify USER_PROVISIONED as the value for this parameter. Then use the\n Iops parameter to provide a Metadata IOPS value that is greater than or equal to\n the current number of Metadata IOPS provisioned for the file system.

    \n
  • \n
  • \n

    To switch from USER_PROVISIONED mode, specify\n AUTOMATIC as the value for this parameter, but do not input a value\n for Iops.

    \n \n

    If you request to switch from USER_PROVISIONED to AUTOMATIC mode and the\n current Metadata IOPS value is greater than the automated default, FSx for Lustre\n rejects the request because downscaling Metadata IOPS is not supported.

    \n
    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Lustre metadata performance configuration update for an Amazon FSx for Lustre\n file system using a PERSISTENT_2 deployment type. You can request\n an increase in your file system's Metadata IOPS and/or switch your file system's\n metadata configuration mode. For more information, see Managing\n metadata performance in the\n Amazon FSx for Lustre User Guide.

" + } + }, "com.amazonaws.fsx#UpdateFileSystemOntapConfiguration": { "type": "structure", "members": { diff --git a/models/global-accelerator.json b/models/global-accelerator.json index 3f8300a1bc..7374c575f0 100644 --- a/models/global-accelerator.json +++ b/models/global-accelerator.json @@ -5860,6 +5860,12 @@ "smithy.api#documentation": "

The IP address type that an accelerator supports. For a standard accelerator, the value can be IPV4 or DUAL_STACK.

" } }, + "IpAddresses": { + "target": "com.amazonaws.globalaccelerator#IpAddresses", + "traits": { + "smithy.api#documentation": "

The IP addresses for an accelerator.

" + } + }, "Enabled": { "target": "com.amazonaws.globalaccelerator#GenericBoolean", "traits": { @@ -6094,6 +6100,12 @@ "smithy.api#documentation": "

The IP address type that an accelerator supports. For a custom routing accelerator, the value must be IPV4.

" } }, + "IpAddresses": { + "target": "com.amazonaws.globalaccelerator#IpAddresses", + "traits": { + "smithy.api#documentation": "

The IP addresses for an accelerator.

" + } + }, "Enabled": { "target": "com.amazonaws.globalaccelerator#GenericBoolean", "traits": { diff --git a/models/glue.json b/models/glue.json index 44f400bca3..08e65e4c69 100644 --- a/models/glue.json +++ b/models/glue.json @@ -159,6 +159,9 @@ { "target": "com.amazonaws.glue#CreateTrigger" }, + { + "target": "com.amazonaws.glue#CreateUsageProfile" + }, { "target": "com.amazonaws.glue#CreateUserDefinedFunction" }, @@ -237,6 +240,9 @@ { "target": "com.amazonaws.glue#DeleteTrigger" }, + { + "target": "com.amazonaws.glue#DeleteUsageProfile" + }, { "target": "com.amazonaws.glue#DeleteUserDefinedFunction" }, @@ -429,6 +435,9 @@ { "target": "com.amazonaws.glue#GetUnfilteredTableMetadata" }, + { + "target": "com.amazonaws.glue#GetUsageProfile" + }, { "target": "com.amazonaws.glue#GetUserDefinedFunction" }, @@ -507,6 +516,9 @@ { "target": "com.amazonaws.glue#ListTriggers" }, + { + "target": "com.amazonaws.glue#ListUsageProfiles" + }, { "target": "com.amazonaws.glue#ListWorkflows" }, @@ -666,6 +678,9 @@ { "target": "com.amazonaws.glue#UpdateTrigger" }, + { + "target": "com.amazonaws.glue#UpdateUsageProfile" + }, { "target": "com.amazonaws.glue#UpdateUserDefinedFunction" }, @@ -1655,6 +1670,16 @@ } } }, + "com.amazonaws.glue#AWSManagedClientApplicationReference": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^\\S+$" + } + }, "com.amazonaws.glue#AccessDeniedException": { "type": "structure", "members": { @@ -1938,6 +1963,12 @@ } } }, + "com.amazonaws.glue#AllowedValuesStringList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#ConfigValueString" + } + }, "com.amazonaws.glue#AlreadyExistsException": { "type": "structure", "members": { @@ -2340,6 +2371,111 @@ "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$" } }, + "com.amazonaws.glue#AuthenticationConfiguration": { + "type": "structure", + "members": { + "AuthenticationType": { + "target": "com.amazonaws.glue#AuthenticationType", + "traits": { + "smithy.api#documentation": "

A structure containing the authentication configuration.

" + } + }, + "SecretArn": { + "target": "com.amazonaws.glue#SecretArn", + "traits": { + "smithy.api#documentation": "

The secret manager ARN to store credentials.

" + } + }, + "OAuth2Properties": { + "target": "com.amazonaws.glue#OAuth2Properties", + "traits": { + "smithy.api#documentation": "

The properties for OAuth2 authentication.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing the authentication configuration.

" + } + }, + "com.amazonaws.glue#AuthenticationConfigurationInput": { + "type": "structure", + "members": { + "AuthenticationType": { + "target": "com.amazonaws.glue#AuthenticationType", + "traits": { + "smithy.api#documentation": "

A structure containing the authentication configuration in the CreateConnection request.

" + } + }, + "SecretArn": { + "target": "com.amazonaws.glue#SecretArn", + "traits": { + "smithy.api#documentation": "

The secret manager ARN to store credentials in the CreateConnection request.

" + } + }, + "OAuth2Properties": { + "target": "com.amazonaws.glue#OAuth2PropertiesInput", + "traits": { + "smithy.api#documentation": "

The properties for OAuth2 authentication in the CreateConnection request.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing the authentication configuration in the CreateConnection request.

" + } + }, + "com.amazonaws.glue#AuthenticationType": { + "type": "enum", + "members": { + "BASIC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BASIC" + } + }, + "OAUTH2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OAUTH2" + } + }, + "CUSTOM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOM" + } + } + } + }, + "com.amazonaws.glue#AuthorizationCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + }, + "smithy.api#pattern": "^\\S+$" + } + }, + "com.amazonaws.glue#AuthorizationCodeProperties": { + "type": "structure", + "members": { + "AuthorizationCode": { + "target": "com.amazonaws.glue#AuthorizationCode", + "traits": { + "smithy.api#documentation": "

An authorization code to be used in the third leg of the AUTHORIZATION_CODE grant workflow. This is a single-use code which becomes invalid once exchanged for an access token, thus it is acceptable to have this value as a request parameter.

" + } + }, + "RedirectUri": { + "target": "com.amazonaws.glue#RedirectUri", + "traits": { + "smithy.api#documentation": "

The redirect URI where the user gets redirected to by authorization server when issuing an authorization code. The URI is subsequently used when the authorization code is exchanged for an access token.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The set of properties required for the the OAuth2 AUTHORIZATION_CODE grant type workflow.

" + } + }, "com.amazonaws.glue#BackfillError": { "type": "structure", "members": { @@ -6407,6 +6543,57 @@ "target": "com.amazonaws.glue#Condition" } }, + "com.amazonaws.glue#ConfigValueString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[a-zA-Z0-9_.-]+$" + } + }, + "com.amazonaws.glue#ConfigurationMap": { + "type": "map", + "key": { + "target": "com.amazonaws.glue#NameString" + }, + "value": { + "target": "com.amazonaws.glue#ConfigurationObject" + } + }, + "com.amazonaws.glue#ConfigurationObject": { + "type": "structure", + "members": { + "DefaultValue": { + "target": "com.amazonaws.glue#ConfigValueString", + "traits": { + "smithy.api#documentation": "

A default value for the parameter.

" + } + }, + "AllowedValues": { + "target": "com.amazonaws.glue#AllowedValuesStringList", + "traits": { + "smithy.api#documentation": "

A list of allowed values for the parameter.

" + } + }, + "MinValue": { + "target": "com.amazonaws.glue#ConfigValueString", + "traits": { + "smithy.api#documentation": "

A minimum allowed value for the parameter.

" + } + }, + "MaxValue": { + "target": "com.amazonaws.glue#ConfigValueString", + "traits": { + "smithy.api#documentation": "

A maximum allowed value for the parameter.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the values that an admin sets for each job or session parameter configured in a Glue usage profile.

" + } + }, "com.amazonaws.glue#ConflictException": { "type": "structure", "members": { @@ -6490,19 +6677,19 @@ "PhysicalConnectionRequirements": { "target": "com.amazonaws.glue#PhysicalConnectionRequirements", "traits": { - "smithy.api#documentation": "

A map of physical connection requirements, such as virtual private cloud (VPC) and\n SecurityGroup, that are needed to make this connection successfully.

" + "smithy.api#documentation": "

The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to make this connection successfully.

" } }, "CreationTime": { "target": "com.amazonaws.glue#Timestamp", "traits": { - "smithy.api#documentation": "

The time that this connection definition was created.

" + "smithy.api#documentation": "

The timestamp of the time that this connection definition was created.

" } }, "LastUpdatedTime": { "target": "com.amazonaws.glue#Timestamp", "traits": { - "smithy.api#documentation": "

The last time that this connection definition was updated.

" + "smithy.api#documentation": "

The timestamp of the last time the connection definition was updated.

" } }, "LastUpdatedBy": { @@ -6510,6 +6697,30 @@ "traits": { "smithy.api#documentation": "

The user, group, or role that last updated this connection definition.

" } + }, + "Status": { + "target": "com.amazonaws.glue#ConnectionStatus", + "traits": { + "smithy.api#documentation": "

The status of the connection. Can be one of: READY, IN_PROGRESS, or FAILED.

" + } + }, + "StatusReason": { + "target": "com.amazonaws.glue#LongValueString", + "traits": { + "smithy.api#documentation": "

The reason for the connection status.

" + } + }, + "LastConnectionValidationTime": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

A timestamp of the time this connection was last validated.

" + } + }, + "AuthenticationConfiguration": { + "target": "com.amazonaws.glue#AuthenticationConfiguration", + "traits": { + "smithy.api#documentation": "

The authentication properties of the connection.

" + } } }, "traits": { @@ -6522,7 +6733,7 @@ "Name": { "target": "com.amazonaws.glue#NameString", "traits": { - "smithy.api#documentation": "

The name of the connection. Connection will not function as expected without a name.

", + "smithy.api#documentation": "

The name of the connection.

", "smithy.api#required": {} } }, @@ -6535,7 +6746,7 @@ "ConnectionType": { "target": "com.amazonaws.glue#ConnectionType", "traits": { - "smithy.api#documentation": "

The type of the connection. Currently, these types are supported:

\n
    \n
  • \n

    \n JDBC - Designates a connection to a database through Java Database Connectivity (JDBC).

    \n

    \n JDBC Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL.

      \n
    • \n
    • \n

      Required: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    • \n

      Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC.

      \n
    • \n
    \n
  • \n
  • \n

    \n KAFKA - Designates a connection to an Apache Kafka streaming platform.

    \n

    \n KAFKA Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: KAFKA_BOOTSTRAP_SERVERS.

      \n
    • \n
    • \n

      Optional: KAFKA_SSL_ENABLED, KAFKA_CUSTOM_CERT, KAFKA_SKIP_CUSTOM_CERT_VALIDATION. These parameters are used to configure SSL with KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_CLIENT_KEYSTORE, KAFKA_CLIENT_KEYSTORE_PASSWORD, KAFKA_CLIENT_KEY_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD. These parameters are used to configure TLS client configuration with SSL in KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_MECHANISM. Can be specified as SCRAM-SHA-512, GSSAPI, or AWS_MSK_IAM.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_SCRAM_USERNAME, KAFKA_SASL_SCRAM_PASSWORD, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_GSSAPI_KEYTAB, KAFKA_SASL_GSSAPI_KRB5_CONF, KAFKA_SASL_GSSAPI_SERVICE, KAFKA_SASL_GSSAPI_PRINCIPAL. These parameters are used to configure SASL/GSSAPI authentication with KAFKA.

      \n
    • \n
    \n
  • \n
  • \n

    \n MONGODB - Designates a connection to a MongoDB document database.

    \n

    \n MONGODB Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: CONNECTION_URL.

      \n
    • \n
    • \n

      Required: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    \n
  • \n
  • \n

    \n NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).

    \n

    \n NETWORK Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.

    \n
  • \n
  • \n

    \n MARKETPLACE - Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue.

    \n

    \n MARKETPLACE Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: CONNECTOR_TYPE, CONNECTOR_URL, CONNECTOR_CLASS_NAME, CONNECTION_URL.

      \n
    • \n
    • \n

      Required for JDBC\n CONNECTOR_TYPE connections: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    \n
  • \n
  • \n

    \n CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.

    \n
  • \n
\n

\n SFTP is not supported.

\n

For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties.

\n

For more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections.

", + "smithy.api#documentation": "

The type of the connection. Currently, these types are supported:

\n
    \n
  • \n

    \n JDBC - Designates a connection to a database through Java Database Connectivity (JDBC).

    \n

    \n JDBC Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL.

      \n
    • \n
    • \n

      Required: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    • \n

      Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC.

      \n
    • \n
    \n
  • \n
  • \n

    \n KAFKA - Designates a connection to an Apache Kafka streaming platform.

    \n

    \n KAFKA Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: KAFKA_BOOTSTRAP_SERVERS.

      \n
    • \n
    • \n

      Optional: KAFKA_SSL_ENABLED, KAFKA_CUSTOM_CERT, KAFKA_SKIP_CUSTOM_CERT_VALIDATION. These parameters are used to configure SSL with KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_CLIENT_KEYSTORE, KAFKA_CLIENT_KEYSTORE_PASSWORD, KAFKA_CLIENT_KEY_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD, ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD. These parameters are used to configure TLS client configuration with SSL in KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_MECHANISM. Can be specified as SCRAM-SHA-512, GSSAPI, or AWS_MSK_IAM.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_SCRAM_USERNAME, KAFKA_SASL_SCRAM_PASSWORD, ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD. These parameters are used to configure SASL/SCRAM-SHA-512 authentication with KAFKA.

      \n
    • \n
    • \n

      Optional: KAFKA_SASL_GSSAPI_KEYTAB, KAFKA_SASL_GSSAPI_KRB5_CONF, KAFKA_SASL_GSSAPI_SERVICE, KAFKA_SASL_GSSAPI_PRINCIPAL. These parameters are used to configure SASL/GSSAPI authentication with KAFKA.

      \n
    • \n
    \n
  • \n
  • \n

    \n MONGODB - Designates a connection to a MongoDB document database.

    \n

    \n MONGODB Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: CONNECTION_URL.

      \n
    • \n
    • \n

      Required: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    \n
  • \n
  • \n

    \n SALESFORCE - Designates a connection to Salesforce using OAuth authencation.

    \n
      \n
    • \n

      Requires the AuthenticationConfiguration member to be configured.

      \n
    • \n
    \n
  • \n
  • \n

    \n NETWORK - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).

    \n

    \n NETWORK Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.

    \n
  • \n
  • \n

    \n MARKETPLACE - Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue.

    \n

    \n MARKETPLACE Connections use the following ConnectionParameters.

    \n
      \n
    • \n

      Required: CONNECTOR_TYPE, CONNECTOR_URL, CONNECTOR_CLASS_NAME, CONNECTION_URL.

      \n
    • \n
    • \n

      Required for JDBC\n CONNECTOR_TYPE connections: All of (USERNAME, PASSWORD) or SECRET_ID.

      \n
    • \n
    \n
  • \n
  • \n

    \n CUSTOM - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.

    \n
  • \n
\n

\n SFTP is not supported.

\n

For more information about how optional ConnectionProperties are used to configure features in Glue, consult Glue connection properties.

\n

For more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult Using connectors and connections.

", "smithy.api#required": {} } }, @@ -6555,7 +6766,20 @@ "PhysicalConnectionRequirements": { "target": "com.amazonaws.glue#PhysicalConnectionRequirements", "traits": { - "smithy.api#documentation": "

A map of physical connection requirements, such as virtual private cloud (VPC) and\n SecurityGroup, that are needed to successfully make this connection.

" + "smithy.api#documentation": "

The physical connection requirements, such as virtual private cloud (VPC) and SecurityGroup, that are needed to successfully make this connection.

" + } + }, + "AuthenticationConfiguration": { + "target": "com.amazonaws.glue#AuthenticationConfigurationInput", + "traits": { + "smithy.api#documentation": "

The authentication properties of the connection. Used for a Salesforce connection.

" + } + }, + "ValidateCredentials": { + "target": "com.amazonaws.glue#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

A flag to validate the credentials during create connection. Used for a Salesforce connection. Default is true.

" } } }, @@ -6863,6 +7087,35 @@ "traits": { "smithy.api#enumValue": "KAFKA_SASL_GSSAPI_PRINCIPAL" } + }, + "ROLE_ARN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ROLE_ARN" + } + } + } + }, + "com.amazonaws.glue#ConnectionStatus": { + "type": "enum", + "members": { + "READY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "READY" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } } } }, @@ -6910,6 +7163,12 @@ "traits": { "smithy.api#enumValue": "CUSTOM" } + }, + "SALESFORCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SALESFORCE" + } } } }, @@ -7847,7 +8106,14 @@ }, "com.amazonaws.glue#CreateConnectionResponse": { "type": "structure", - "members": {}, + "members": { + "CreateConnectionStatus": { + "target": "com.amazonaws.glue#ConnectionStatus", + "traits": { + "smithy.api#documentation": "

The status of the connection creation request. The request can take some time for certain authentication types, for example when creating an OAuth connection with token exchange over VPC.

" + } + } + }, "traits": { "smithy.api#output": {} } @@ -8643,6 +8909,12 @@ "smithy.api#required": {} } }, + "JobMode": { + "target": "com.amazonaws.glue#JobMode", + "traits": { + "smithy.api#documentation": "

A mode that describes how a job was created. Valid values are:

\n
    \n
  • \n

    \n SCRIPT - The job was created using the Glue Studio script editor.

    \n
  • \n
  • \n

    \n VISUAL - The job was created using the Glue Studio visual editor.

    \n
  • \n
  • \n

    \n NOTEBOOK - The job was created using an interactive sessions notebook.

    \n
  • \n
\n

When the JobMode field is missing or null, SCRIPT is assigned as the default value.

" + } + }, "Description": { "target": "com.amazonaws.glue#DescriptionString", "traits": { @@ -8713,7 +8985,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status. The default is 2,880 minutes (48 hours).

" + "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status. The default is 2,880 minutes (48 hours) for batch jobs.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" } }, "MaxCapacity": { @@ -8775,6 +9047,12 @@ "traits": { "smithy.api#documentation": "

The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository.

" } + }, + "MaintenanceWindow": { + "target": "com.amazonaws.glue#MaintenanceWindow", + "traits": { + "smithy.api#documentation": "

This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.

\n

Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.

" + } } }, "traits": { @@ -9971,6 +10249,86 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#CreateUsageProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#CreateUsageProfileRequest" + }, + "output": { + "target": "com.amazonaws.glue#CreateUsageProfileResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#AlreadyExistsException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + }, + { + "target": "com.amazonaws.glue#ResourceNumberLimitExceededException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an Glue usage profile.

" + } + }, + "com.amazonaws.glue#CreateUsageProfileRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the usage profile.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.glue#DescriptionString", + "traits": { + "smithy.api#documentation": "

A description of the usage profile.

" + } + }, + "Configuration": { + "target": "com.amazonaws.glue#ProfileConfiguration", + "traits": { + "smithy.api#documentation": "

A ProfileConfiguration object specifying the job and session values for the profile.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.glue#TagsMap", + "traits": { + "smithy.api#documentation": "

A list of tags applied to the usage profile.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#CreateUsageProfileResponse": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the usage profile that was created.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.glue#CreateUserDefinedFunction": { "type": "operation", "input": { @@ -10433,6 +10791,23 @@ "target": "com.amazonaws.glue#GenericString" } }, + "com.amazonaws.glue#DQCompositeRuleEvaluationMethod": { + "type": "enum", + "members": { + "COLUMN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COLUMN" + } + }, + "ROW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ROW" + } + } + } + }, "com.amazonaws.glue#DQDLAliases": { "type": "map", "key": { @@ -10668,6 +11043,12 @@ "traits": { "smithy.api#documentation": "

Prefix for Amazon S3 to store results.

" } + }, + "CompositeRuleEvaluationMethod": { + "target": "com.amazonaws.glue#DQCompositeRuleEvaluationMethod", + "traits": { + "smithy.api#documentation": "

Set the evaluation method for composite rules in the ruleset to ROW/COLUMN

" + } } }, "traits": { @@ -13058,6 +13439,54 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#DeleteUsageProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#DeleteUsageProfileRequest" + }, + "output": { + "target": "com.amazonaws.glue#DeleteUsageProfileResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the Glue specified usage profile.

" + } + }, + "com.amazonaws.glue#DeleteUsageProfileRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the usage profile to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#DeleteUsageProfileResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.glue#DeleteUserDefinedFunction": { "type": "operation", "input": { @@ -14401,6 +14830,30 @@ "com.amazonaws.glue#FederationSourceErrorCode": { "type": "enum", "members": { + "AccessDeniedException": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AccessDeniedException" + } + }, + "EntityNotFoundException": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EntityNotFoundException" + } + }, + "InvalidCredentialsException": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InvalidCredentialsException" + } + }, + "InvalidInputException": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InvalidInputException" + } + }, "InvalidResponseException": { "target": "smithy.api#Unit", "traits": { @@ -14425,6 +14878,12 @@ "smithy.api#enumValue": "InternalServiceException" } }, + "PartialFailureException": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PartialFailureException" + } + }, "ThrottlingException": { "target": "smithy.api#Unit", "traits": { @@ -16477,7 +16936,7 @@ "RulesetNames": { "target": "com.amazonaws.glue#RulesetNames", "traits": { - "smithy.api#documentation": "

A list of ruleset names for the run.

" + "smithy.api#documentation": "

A list of ruleset names for the run. Currently, this parameter takes only one Ruleset name.

" } }, "ResultIds": { @@ -17038,7 +17497,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the metadata for a given job run.

" + "smithy.api#documentation": "

Retrieves the metadata for a given job run. Job run history is accessible for 90 days for your workflow and job run.

" } }, "com.amazonaws.glue#GetJobRunRequest": { @@ -17111,6 +17570,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "JobRuns", "pageSize": "MaxResults" } } @@ -17189,6 +17649,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Jobs", "pageSize": "MaxResults" } } @@ -19656,6 +20117,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Triggers", "pageSize": "MaxResults" } } @@ -20160,6 +20622,88 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#GetUsageProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#GetUsageProfileRequest" + }, + "output": { + "target": "com.amazonaws.glue#GetUsageProfileResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves information about the specified Glue usage profile.

" + } + }, + "com.amazonaws.glue#GetUsageProfileRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the usage profile to retrieve.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#GetUsageProfileResponse": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the usage profile.

" + } + }, + "Description": { + "target": "com.amazonaws.glue#DescriptionString", + "traits": { + "smithy.api#documentation": "

A description of the usage profile.

" + } + }, + "Configuration": { + "target": "com.amazonaws.glue#ProfileConfiguration", + "traits": { + "smithy.api#documentation": "

A ProfileConfiguration object specifying the job and session values for the profile.

" + } + }, + "CreatedOn": { + "target": "com.amazonaws.glue#TimestampValue", + "traits": { + "smithy.api#documentation": "

The date and time when the usage profile was created.

" + } + }, + "LastModifiedOn": { + "target": "com.amazonaws.glue#TimestampValue", + "traits": { + "smithy.api#documentation": "

The date and time when the usage profile was last modified.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.glue#GetUserDefinedFunction": { "type": "operation", "input": { @@ -20408,7 +20952,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the metadata for a given workflow run.

" + "smithy.api#documentation": "

Retrieves the metadata for a given workflow run. Job run history is accessible for 90 days for your workflow and job run.

" } }, "com.amazonaws.glue#GetWorkflowRunProperties": { @@ -20542,6 +21086,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Runs", "pageSize": "MaxResults" } } @@ -21901,6 +22446,12 @@ "smithy.api#documentation": "

The name you assign to this job definition.

" } }, + "JobMode": { + "target": "com.amazonaws.glue#JobMode", + "traits": { + "smithy.api#documentation": "

A mode that describes how a job was created. Valid values are:

\n
    \n
  • \n

    \n SCRIPT - The job was created using the Glue Studio script editor.

    \n
  • \n
  • \n

    \n VISUAL - The job was created using the Glue Studio visual editor.

    \n
  • \n
  • \n

    \n NOTEBOOK - The job was created using an interactive sessions notebook.

    \n
  • \n
\n

When the JobMode field is missing or null, SCRIPT is assigned as the default value.

" + } + }, "Description": { "target": "com.amazonaws.glue#DescriptionString", "traits": { @@ -21981,7 +22532,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status. The default is 2,880 minutes (48 hours).

" + "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status. The default is 2,880 minutes (48 hours) for batch jobs.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" } }, "MaxCapacity": { @@ -22037,6 +22588,18 @@ "traits": { "smithy.api#documentation": "

The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository.

" } + }, + "MaintenanceWindow": { + "target": "com.amazonaws.glue#MaintenanceWindow", + "traits": { + "smithy.api#documentation": "

This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.

\n

Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.

" + } + }, + "ProfileName": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of an Glue usage profile associated with the job.

" + } } }, "traits": { @@ -22171,6 +22734,29 @@ "target": "com.amazonaws.glue#Job" } }, + "com.amazonaws.glue#JobMode": { + "type": "enum", + "members": { + "SCRIPT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SCRIPT" + } + }, + "VISUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VISUAL" + } + }, + "NOTEBOOK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOTEBOOK" + } + } + } + }, "com.amazonaws.glue#JobName": { "type": "string" }, @@ -22228,6 +22814,12 @@ "smithy.api#documentation": "

The name of the job definition being used in this run.

" } }, + "JobMode": { + "target": "com.amazonaws.glue#JobMode", + "traits": { + "smithy.api#documentation": "

A mode that describes how a job was created. Valid values are:

\n
    \n
  • \n

    \n SCRIPT - The job was created using the Glue Studio script editor.

    \n
  • \n
  • \n

    \n VISUAL - The job was created using the Glue Studio visual editor.

    \n
  • \n
  • \n

    \n NOTEBOOK - The job was created using an interactive sessions notebook.

    \n
  • \n
\n

When the JobMode field is missing or null, SCRIPT is assigned as the default value.

" + } + }, "StartedOn": { "target": "com.amazonaws.glue#TimestampValue", "traits": { @@ -22290,7 +22882,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).

" + "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" } }, "MaxCapacity": { @@ -22338,7 +22930,7 @@ "DPUSeconds": { "target": "com.amazonaws.glue#NullableDouble", "traits": { - "smithy.api#documentation": "

This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for G.025X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity.

" + "smithy.api#documentation": "

This field can be set for either job runs with execution class FLEX or when Auto Scaling is enabled, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for G.025X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity.

" } }, "ExecutionClass": { @@ -22346,6 +22938,18 @@ "traits": { "smithy.api#documentation": "

Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.

\n

The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.

\n

Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set ExecutionClass to FLEX. The flexible execution class is available for Spark jobs.

" } + }, + "MaintenanceWindow": { + "target": "com.amazonaws.glue#MaintenanceWindow", + "traits": { + "smithy.api#documentation": "

This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.

\n

Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.

" + } + }, + "ProfileName": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of an Glue usage profile associated with the job run.

" + } } }, "traits": { @@ -22414,12 +23018,24 @@ "traits": { "smithy.api#enumValue": "WAITING" } + }, + "EXPIRED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EXPIRED" + } } } }, "com.amazonaws.glue#JobUpdate": { "type": "structure", "members": { + "JobMode": { + "target": "com.amazonaws.glue#JobMode", + "traits": { + "smithy.api#documentation": "

A mode that describes how a job was created. Valid values are:

\n
    \n
  • \n

    \n SCRIPT - The job was created using the Glue Studio script editor.

    \n
  • \n
  • \n

    \n VISUAL - The job was created using the Glue Studio visual editor.

    \n
  • \n
  • \n

    \n NOTEBOOK - The job was created using an interactive sessions notebook.

    \n
  • \n
\n

When the JobMode field is missing or null, SCRIPT is assigned as the default value.

" + } + }, "Description": { "target": "com.amazonaws.glue#DescriptionString", "traits": { @@ -22488,7 +23104,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status. The default is 2,880 minutes (48 hours).

" + "smithy.api#documentation": "

The job timeout in minutes. This is the maximum time that a job run\n can consume resources before it is terminated and enters TIMEOUT\n status. The default is 2,880 minutes (48 hours) for batch jobs.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" } }, "MaxCapacity": { @@ -22544,6 +23160,12 @@ "traits": { "smithy.api#documentation": "

The details for a source control configuration for a job, allowing synchronization of job artifacts to or from a remote repository.

" } + }, + "MaintenanceWindow": { + "target": "com.amazonaws.glue#MaintenanceWindow", + "traits": { + "smithy.api#documentation": "

This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.

\n

Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.

" + } } }, "traits": { @@ -23242,6 +23864,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Blueprints", "pageSize": "MaxResults" } } @@ -23982,6 +24605,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "JobNames", "pageSize": "MaxResults" } } @@ -24664,6 +25288,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "TriggerNames", "pageSize": "MaxResults" } } @@ -24720,6 +25345,78 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#ListUsageProfiles": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#ListUsageProfilesRequest" + }, + "output": { + "target": "com.amazonaws.glue#ListUsageProfilesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

List all the Glue usage profiles.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Profiles", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.glue#ListUsageProfilesRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.glue#OrchestrationToken", + "traits": { + "smithy.api#documentation": "

A continuation token, included if this is a continuation call.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.glue#OrchestrationPageSize200", + "traits": { + "smithy.api#documentation": "

The maximum number of usage profiles to return in a single response.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#ListUsageProfilesResponse": { + "type": "structure", + "members": { + "Profiles": { + "target": "com.amazonaws.glue#UsageProfileDefinitionList", + "traits": { + "smithy.api#documentation": "

A list of usage profile (UsageProfileDefinition) objects.

" + } + }, + "NextToken": { + "target": "com.amazonaws.glue#OrchestrationToken", + "traits": { + "smithy.api#documentation": "

A continuation token, present if the current list segment is not the last.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.glue#ListWorkflows": { "type": "operation", "input": { @@ -24744,6 +25441,7 @@ "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "Workflows", "pageSize": "MaxResults" } } @@ -24937,6 +25635,15 @@ "smithy.api#default": 0 } }, + "com.amazonaws.glue#LongValueString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 16384 + } + } + }, "com.amazonaws.glue#MLTransform": { "type": "structure", "members": { @@ -25113,6 +25820,12 @@ } } }, + "com.amazonaws.glue#MaintenanceWindow": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(Sun|Mon|Tue|Wed|Thu|Fri|Sat):([01]?[0-9]|2[0-3])$" + } + }, "com.amazonaws.glue#ManyInputs": { "type": "list", "member": { @@ -25881,6 +26594,119 @@ "com.amazonaws.glue#NullableString": { "type": "string" }, + "com.amazonaws.glue#OAuth2ClientApplication": { + "type": "structure", + "members": { + "UserManagedClientApplicationClientId": { + "target": "com.amazonaws.glue#UserManagedClientApplicationClientId", + "traits": { + "smithy.api#documentation": "

The client application clientID if the ClientAppType is USER_MANAGED.

" + } + }, + "AWSManagedClientApplicationReference": { + "target": "com.amazonaws.glue#AWSManagedClientApplicationReference", + "traits": { + "smithy.api#documentation": "

The reference to the SaaS-side client app that is Amazon Web Services managed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The OAuth2 client app used for the connection.

" + } + }, + "com.amazonaws.glue#OAuth2GrantType": { + "type": "enum", + "members": { + "AUTHORIZATION_CODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUTHORIZATION_CODE" + } + }, + "CLIENT_CREDENTIALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLIENT_CREDENTIALS" + } + }, + "JWT_BEARER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "JWT_BEARER" + } + } + } + }, + "com.amazonaws.glue#OAuth2Properties": { + "type": "structure", + "members": { + "OAuth2GrantType": { + "target": "com.amazonaws.glue#OAuth2GrantType", + "traits": { + "smithy.api#documentation": "

The OAuth2 grant type. For example, AUTHORIZATION_CODE, JWT_BEARER, or CLIENT_CREDENTIALS.

" + } + }, + "OAuth2ClientApplication": { + "target": "com.amazonaws.glue#OAuth2ClientApplication", + "traits": { + "smithy.api#documentation": "

The client application type. For example, AWS_MANAGED or USER_MANAGED.

" + } + }, + "TokenUrl": { + "target": "com.amazonaws.glue#TokenUrl", + "traits": { + "smithy.api#documentation": "

The URL of the provider's authentication server, to exchange an authorization code for an access token.

" + } + }, + "TokenUrlParametersMap": { + "target": "com.amazonaws.glue#TokenUrlParametersMap", + "traits": { + "smithy.api#documentation": "

A map of parameters that are added to the token GET request.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing properties for OAuth2 authentication.

" + } + }, + "com.amazonaws.glue#OAuth2PropertiesInput": { + "type": "structure", + "members": { + "OAuth2GrantType": { + "target": "com.amazonaws.glue#OAuth2GrantType", + "traits": { + "smithy.api#documentation": "

The OAuth2 grant type in the CreateConnection request. For example, AUTHORIZATION_CODE, JWT_BEARER, or CLIENT_CREDENTIALS.

" + } + }, + "OAuth2ClientApplication": { + "target": "com.amazonaws.glue#OAuth2ClientApplication", + "traits": { + "smithy.api#documentation": "

The client application type in the CreateConnection request. For example, AWS_MANAGED or USER_MANAGED.

" + } + }, + "TokenUrl": { + "target": "com.amazonaws.glue#TokenUrl", + "traits": { + "smithy.api#documentation": "

The URL of the provider's authentication server, to exchange an authorization code for an access token.

" + } + }, + "TokenUrlParametersMap": { + "target": "com.amazonaws.glue#TokenUrlParametersMap", + "traits": { + "smithy.api#documentation": "

A map of parameters that are added to the token GET request.

" + } + }, + "AuthorizationCodeProperties": { + "target": "com.amazonaws.glue#AuthorizationCodeProperties", + "traits": { + "smithy.api#documentation": "

The set of properties required for the the OAuth2 AUTHORIZATION_CODE grant type.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing properties for OAuth2 in the CreateConnection request.

" + } + }, "com.amazonaws.glue#OneInput": { "type": "list", "member": { @@ -25907,6 +26733,21 @@ "smithy.api#documentation": "

A structure representing an open format table.

" } }, + "com.amazonaws.glue#OperationNotSupportedException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.glue#MessageString", + "traits": { + "smithy.api#documentation": "

A message describing the problem.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The operation is not available in the region.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.glue#OperationTimeoutException": { "type": "structure", "members": { @@ -26764,12 +27605,12 @@ "AvailabilityZone": { "target": "com.amazonaws.glue#NameString", "traits": { - "smithy.api#documentation": "

The connection's Availability Zone. This field is redundant because the specified subnet\n implies the Availability Zone to be used. Currently the field must be populated, but it will\n be deprecated in the future.

" + "smithy.api#documentation": "

The connection's Availability Zone.

" } } }, "traits": { - "smithy.api#documentation": "

Specifies the physical requirements for a connection.

" + "smithy.api#documentation": "

The OAuth client app in GetConnection response.

" } }, "com.amazonaws.glue#PiiType": { @@ -27013,6 +27854,26 @@ } } }, + "com.amazonaws.glue#ProfileConfiguration": { + "type": "structure", + "members": { + "SessionConfiguration": { + "target": "com.amazonaws.glue#ConfigurationMap", + "traits": { + "smithy.api#documentation": "

A key-value map of configuration parameters for Glue sessions.

" + } + }, + "JobConfiguration": { + "target": "com.amazonaws.glue#ConfigurationMap", + "traits": { + "smithy.api#documentation": "

A key-value map of configuration parameters for Glue jobs.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the job and session values that an admin configures in an Glue usage profile.

" + } + }, "com.amazonaws.glue#PropertyPredicate": { "type": "structure", "members": { @@ -27652,6 +28513,16 @@ "smithy.api#documentation": "

When crawling an Amazon S3 data source after the first crawl is complete, specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run. For more information, see Incremental Crawls in Glue in the developer guide.

" } }, + "com.amazonaws.glue#RedirectUri": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^(https?):\\/\\/[^\\s/$.?#].[^\\s]*$" + } + }, "com.amazonaws.glue#RedshiftSource": { "type": "structure", "members": { @@ -30107,6 +30978,12 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#SecretArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws(-(cn|us-gov|iso(-[bef])?))?:secretsmanager:.*$" + } + }, "com.amazonaws.glue#SecurityConfiguration": { "type": "structure", "members": { @@ -30411,6 +31288,12 @@ "traits": { "smithy.api#documentation": "

The number of minutes when idle before the session times out.

" } + }, + "ProfileName": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of an Glue usage profile associated with the session.

" + } } }, "traits": { @@ -31786,7 +32669,7 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).

" + "smithy.api#documentation": "

The JobRun timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT status. This value overrides the timeout value set in the parent job.

\n

Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.

" } }, "MaxCapacity": { @@ -33111,6 +33994,12 @@ "traits": { "smithy.api#documentation": "

A TableIdentifier structure that describes a target table for resource linking.

" } + }, + "ViewDefinition": { + "target": "com.amazonaws.glue#ViewDefinitionInput", + "traits": { + "smithy.api#documentation": "

A structure that contains all the information that defines the view, including the dialect or dialects for the view, and the query.

" + } } }, "traits": { @@ -33765,6 +34654,43 @@ "com.amazonaws.glue#Token": { "type": "string" }, + "com.amazonaws.glue#TokenUrl": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^(https?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$" + } + }, + "com.amazonaws.glue#TokenUrlParameterKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.glue#TokenUrlParameterValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + } + } + }, + "com.amazonaws.glue#TokenUrlParametersMap": { + "type": "map", + "key": { + "target": "com.amazonaws.glue#TokenUrlParameterKey" + }, + "value": { + "target": "com.amazonaws.glue#TokenUrlParameterValue" + } + }, "com.amazonaws.glue#Topk": { "type": "integer", "traits": { @@ -36218,6 +37144,19 @@ "traits": { "smithy.api#documentation": "

The version ID at which to update the table contents.

" } + }, + "ViewUpdateAction": { + "target": "com.amazonaws.glue#ViewUpdateAction", + "traits": { + "smithy.api#documentation": "

The operation to be performed when updating the view.

" + } + }, + "Force": { + "target": "com.amazonaws.glue#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

A flag that can be set to true to ignore matching storage descriptor and subobject matching requirements.

" + } } }, "traits": { @@ -36296,6 +37235,80 @@ "smithy.api#output": {} } }, + "com.amazonaws.glue#UpdateUsageProfile": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#UpdateUsageProfileRequest" + }, + "output": { + "target": "com.amazonaws.glue#UpdateUsageProfileResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationNotSupportedException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

Update an Glue usage profile.

" + } + }, + "com.amazonaws.glue#UpdateUsageProfileRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the usage profile.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.glue#DescriptionString", + "traits": { + "smithy.api#documentation": "

A description of the usage profile.

" + } + }, + "Configuration": { + "target": "com.amazonaws.glue#ProfileConfiguration", + "traits": { + "smithy.api#documentation": "

A ProfileConfiguration object specifying the job and session values for the profile.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#UpdateUsageProfileResponse": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the usage profile that was updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.glue#UpdateUserDefinedFunction": { "type": "operation", "input": { @@ -36502,6 +37515,44 @@ "com.amazonaws.glue#UriString": { "type": "string" }, + "com.amazonaws.glue#UsageProfileDefinition": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the usage profile.

" + } + }, + "Description": { + "target": "com.amazonaws.glue#DescriptionString", + "traits": { + "smithy.api#documentation": "

A description of the usage profile.

" + } + }, + "CreatedOn": { + "target": "com.amazonaws.glue#TimestampValue", + "traits": { + "smithy.api#documentation": "

The date and time when the usage profile was created.

" + } + }, + "LastModifiedOn": { + "target": "com.amazonaws.glue#TimestampValue", + "traits": { + "smithy.api#documentation": "

The date and time when the usage profile was last modified.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes an Glue usage profile.

" + } + }, + "com.amazonaws.glue#UsageProfileDefinitionList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#UsageProfileDefinition" + } + }, "com.amazonaws.glue#UserDefinedFunction": { "type": "structure", "members": { @@ -36602,6 +37653,16 @@ "target": "com.amazonaws.glue#UserDefinedFunction" } }, + "com.amazonaws.glue#UserManagedClientApplicationClientId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^\\S+$" + } + }, "com.amazonaws.glue#ValidationException": { "type": "structure", "members": { @@ -36714,6 +37775,38 @@ "smithy.api#documentation": "

A structure containing details for representations.

" } }, + "com.amazonaws.glue#ViewDefinitionInput": { + "type": "structure", + "members": { + "IsProtected": { + "target": "com.amazonaws.glue#NullableBoolean", + "traits": { + "smithy.api#documentation": "

You can set this flag as true to instruct the engine not to push user-provided operations into the logical plan of the view during query planning. However, setting this flag does not guarantee that the engine will comply. Refer to the engine's documentation to understand the guarantees provided, if any.

" + } + }, + "Definer": { + "target": "com.amazonaws.glue#ArnString", + "traits": { + "smithy.api#documentation": "

The definer of a view in SQL.

" + } + }, + "Representations": { + "target": "com.amazonaws.glue#ViewRepresentationInputList", + "traits": { + "smithy.api#documentation": "

A list of structures that contains the dialect of the view, and the query that defines the view.

" + } + }, + "SubObjects": { + "target": "com.amazonaws.glue#ViewSubObjectsList", + "traits": { + "smithy.api#documentation": "

A list of base table ARNs that make up the view.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing details for creating or updating an Glue view.

" + } + }, "com.amazonaws.glue#ViewDialect": { "type": "enum", "members": { @@ -36770,7 +37863,13 @@ "ViewExpandedText": { "target": "com.amazonaws.glue#ViewTextString", "traits": { - "smithy.api#documentation": "

The expanded SQL for the view. This SQL is used by engines while processing a query on a view. Engines may perform operations during view creation to transform ViewOriginalText to ViewExpandedText. For example:

\n
    \n
  • \n

    Fully qualify identifiers: SELECT * from table1 → SELECT * from db1.table1\n

    \n
  • \n
" + "smithy.api#documentation": "

The expanded SQL for the view. This SQL is used by engines while processing a query on a view. Engines may perform operations during view creation to transform ViewOriginalText to ViewExpandedText. For example:

\n
    \n
  • \n

    Fully qualified identifiers: SELECT * from table1 -> SELECT * from db1.table1\n

    \n
  • \n
" + } + }, + "ValidationConnection": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the connection to be used to validate the specific representation of the view.

" } }, "IsStale": { @@ -36784,6 +37883,56 @@ "smithy.api#documentation": "

A structure that contains the dialect of the view, and the query that defines the view.

" } }, + "com.amazonaws.glue#ViewRepresentationInput": { + "type": "structure", + "members": { + "Dialect": { + "target": "com.amazonaws.glue#ViewDialect", + "traits": { + "smithy.api#documentation": "

A parameter that specifies the engine type of a specific representation.

" + } + }, + "DialectVersion": { + "target": "com.amazonaws.glue#ViewDialectVersionString", + "traits": { + "smithy.api#documentation": "

A parameter that specifies the version of the engine of a specific representation.

" + } + }, + "ViewOriginalText": { + "target": "com.amazonaws.glue#ViewTextString", + "traits": { + "smithy.api#documentation": "

A string that represents the original SQL query that describes the view.

" + } + }, + "ValidationConnection": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

The name of the connection to be used to validate the specific representation of the view.

" + } + }, + "ViewExpandedText": { + "target": "com.amazonaws.glue#ViewTextString", + "traits": { + "smithy.api#documentation": "

A string that represents the SQL query that describes the view with expanded resource ARNs

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing details of a representation to update or create a Lake Formation view.

" + } + }, + "com.amazonaws.glue#ViewRepresentationInputList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#ViewRepresentationInput" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, "com.amazonaws.glue#ViewRepresentationList": { "type": "list", "member": { @@ -36817,6 +37966,35 @@ } } }, + "com.amazonaws.glue#ViewUpdateAction": { + "type": "enum", + "members": { + "ADD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ADD" + } + }, + "REPLACE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REPLACE" + } + }, + "ADD_OR_REPLACE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ADD_OR_REPLACE" + } + }, + "DROP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DROP" + } + } + } + }, "com.amazonaws.glue#WorkerType": { "type": "enum", "members": { diff --git a/models/grafana.json b/models/grafana.json index f95fbc801c..2f64bbcab0 100644 --- a/models/grafana.json +++ b/models/grafana.json @@ -34,6 +34,12 @@ { "target": "com.amazonaws.grafana#Permission" }, + { + "target": "com.amazonaws.grafana#ServiceAccount" + }, + { + "target": "com.amazonaws.grafana#ServiceAccountToken" + }, { "target": "com.amazonaws.grafana#Workspace" } @@ -981,7 +987,7 @@ } ], "traits": { - "smithy.api#documentation": "

Assigns a Grafana Enterprise license to a workspace. Upgrading to Grafana Enterprise\n incurs additional fees. For more information, see Upgrade a\n workspace to Grafana Enterprise.

", + "smithy.api#documentation": "

Assigns a Grafana Enterprise license to a workspace. To upgrade, you must use\n ENTERPRISE for the licenseType, and pass in a valid\n Grafana Labs token for the grafanaToken. Upgrading to Grafana Enterprise\n incurs additional fees. For more information, see Upgrade a\n workspace to Grafana Enterprise.

", "smithy.api#http": { "code": 202, "method": "POST", @@ -1011,7 +1017,7 @@ "grafanaToken": { "target": "com.amazonaws.grafana#GrafanaToken", "traits": { - "smithy.api#documentation": "

A token from Grafana Labs that ties your Amazon Web Services account with a Grafana \n Labs account. For more information, see Register with Grafana Labs.

", + "smithy.api#documentation": "

A token from Grafana Labs that ties your Amazon Web Services account with a Grafana \n Labs account. For more information, see Link your account with Grafana Labs.

", "smithy.api#httpHeader": "Grafana-Token" } } @@ -1262,7 +1268,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a Grafana API key for the workspace. This key can be used to authenticate\n requests sent to the workspace's HTTP API. See https://docs.aws.amazon.com/grafana/latest/userguide/Using-Grafana-APIs.html\n for available APIs and example requests.

", + "smithy.api#documentation": "

Creates a Grafana API key for the workspace. This key can be used to authenticate\n requests sent to the workspace's HTTP API. See https://docs.aws.amazon.com/grafana/latest/userguide/Using-Grafana-APIs.html\n for available APIs and example requests.

\n \n

In workspaces compatible with Grafana version 9 or above, use workspace service \n accounts instead of API keys. API keys will be removed in a future release.

\n
", "smithy.api#http": { "code": 200, "method": "POST", @@ -1283,7 +1289,7 @@ "keyRole": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

Specifies the permission level of the key.

\n

Valid values: VIEWER|EDITOR|ADMIN\n

", + "smithy.api#documentation": "

Specifies the permission level of the key.

\n

Valid values: ADMIN|EDITOR|VIEWER\n

", "smithy.api#required": {} } }, @@ -1440,7 +1446,7 @@ "grafanaVersion": { "target": "com.amazonaws.grafana#GrafanaVersion", "traits": { - "smithy.api#documentation": "

Specifies the version of Grafana to support in the new workspace. If not specified, \n defaults to the latest version (for example, 9.4).

\n

To get a list of supported versions, use the ListVersions\n operation.

" + "smithy.api#documentation": "

Specifies the version of Grafana to support in the new workspace. If not specified, \n defaults to the latest version (for example, 10.4).

\n

To get a list of supported versions, use the ListVersions\n operation.

" } } } @@ -1457,6 +1463,223 @@ } } }, + "com.amazonaws.grafana#CreateWorkspaceServiceAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.grafana#CreateWorkspaceServiceAccountRequest" + }, + "output": { + "target": "com.amazonaws.grafana#CreateWorkspaceServiceAccountResponse" + }, + "errors": [ + { + "target": "com.amazonaws.grafana#AccessDeniedException" + }, + { + "target": "com.amazonaws.grafana#ConflictException" + }, + { + "target": "com.amazonaws.grafana#InternalServerException" + }, + { + "target": "com.amazonaws.grafana#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.grafana#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.grafana#ThrottlingException" + }, + { + "target": "com.amazonaws.grafana#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a service account for the workspace. A service account can be used to call \n Grafana HTTP APIs, and run automated workloads. After creating the service account with\n the correct GrafanaRole for your use case, use \n CreateWorkspaceServiceAccountToken to create a token that can be used to\n authenticate and authorize Grafana HTTP API calls.

\n

You can only create service accounts for workspaces that are compatible with Grafana\n version 9 and above.

\n \n

For more information about service accounts, see Service accounts in \n the Amazon Managed Grafana User Guide.

\n

For more information about the Grafana HTTP APIs, see Using Grafana HTTP \n APIs in the Amazon Managed Grafana User Guide.

\n
", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/workspaces/{workspaceId}/serviceaccounts" + } + } + }, + "com.amazonaws.grafana#CreateWorkspaceServiceAccountRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.grafana#ServiceAccountName", + "traits": { + "smithy.api#documentation": "

A name for the service account. The name must be unique within the workspace, as it\n determines the ID associated with the service account.

", + "smithy.api#required": {} + } + }, + "grafanaRole": { + "target": "com.amazonaws.grafana#Role", + "traits": { + "smithy.api#documentation": "

The permission level to use for this service account.

\n \n

For more information about the roles and the permissions each has, see User\n roles in the Amazon Managed Grafana User Guide.

\n
", + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The ID of the workspace within which to create the service account.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.grafana#CreateWorkspaceServiceAccountResponse": { + "type": "structure", + "members": { + "id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the service account.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the service account.

", + "smithy.api#required": {} + } + }, + "grafanaRole": { + "target": "com.amazonaws.grafana#Role", + "traits": { + "smithy.api#documentation": "

The permission level given to the service account.

", + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The workspace with which the service account is associated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.grafana#CreateWorkspaceServiceAccountToken": { + "type": "operation", + "input": { + "target": "com.amazonaws.grafana#CreateWorkspaceServiceAccountTokenRequest" + }, + "output": { + "target": "com.amazonaws.grafana#CreateWorkspaceServiceAccountTokenResponse" + }, + "errors": [ + { + "target": "com.amazonaws.grafana#AccessDeniedException" + }, + { + "target": "com.amazonaws.grafana#ConflictException" + }, + { + "target": "com.amazonaws.grafana#InternalServerException" + }, + { + "target": "com.amazonaws.grafana#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.grafana#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.grafana#ThrottlingException" + }, + { + "target": "com.amazonaws.grafana#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a token that can be used to authenticate and authorize Grafana HTTP API\n operations for the given workspace service \n account. The service account acts as a user for the API operations, and\n defines the permissions that are used by the API.

\n \n

When you create the service account token, you will receive a key that is used\n when calling Grafana APIs. Do not lose this key, as it will not be retrievable\n again.

\n

If you do lose the key, you can delete the token and recreate it to receive a \n new key. This will disable the initial key.

\n
\n

Service accounts are only available for workspaces that are compatible with Grafana\n version 9 and above.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}/tokens" + } + } + }, + "com.amazonaws.grafana#CreateWorkspaceServiceAccountTokenRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.grafana#ServiceAccountTokenName", + "traits": { + "smithy.api#documentation": "

A name for the token to create.

", + "smithy.api#required": {} + } + }, + "secondsToLive": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Sets how long the token will be valid, in seconds. You can set the time up to 30 \n days in the future.

", + "smithy.api#range": { + "min": 1, + "max": 2592000 + }, + "smithy.api#required": {} + } + }, + "serviceAccountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the service account for which to create a token.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The ID of the workspace the service account resides within.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.grafana#CreateWorkspaceServiceAccountTokenResponse": { + "type": "structure", + "members": { + "serviceAccountToken": { + "target": "com.amazonaws.grafana#ServiceAccountTokenSummaryWithKey", + "traits": { + "smithy.api#documentation": "

Information about the created token, including the key. Be sure to store the key\n securely.

", + "smithy.api#required": {} + } + }, + "serviceAccountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the service account where the token was created.

", + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The ID of the workspace where the token was created.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.grafana#DataSourceType": { "type": "string", "traits": { @@ -1582,7 +1805,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a Grafana API key for the workspace.

", + "smithy.api#documentation": "

Deletes a Grafana API key for the workspace.

\n \n

In workspaces compatible with Grafana version 9 or above, use workspace service \n accounts instead of API keys. API keys will be removed in a future release.

\n
", "smithy.api#http": { "code": 200, "method": "DELETE", @@ -1655,18 +1878,21 @@ } } }, - "com.amazonaws.grafana#DescribeWorkspace": { + "com.amazonaws.grafana#DeleteWorkspaceServiceAccount": { "type": "operation", "input": { - "target": "com.amazonaws.grafana#DescribeWorkspaceRequest" + "target": "com.amazonaws.grafana#DeleteWorkspaceServiceAccountRequest" }, "output": { - "target": "com.amazonaws.grafana#DescribeWorkspaceResponse" + "target": "com.amazonaws.grafana#DeleteWorkspaceServiceAccountResponse" }, "errors": [ { "target": "com.amazonaws.grafana#AccessDeniedException" }, + { + "target": "com.amazonaws.grafana#ConflictException" + }, { "target": "com.amazonaws.grafana#InternalServerException" }, @@ -1681,27 +1907,75 @@ } ], "traits": { - "smithy.api#documentation": "

Displays information about one Amazon Managed Grafana workspace.

", + "smithy.api#documentation": "

Deletes a workspace service account from the workspace.

\n

This will delete any tokens created for the service account, as well. If the tokens\n are currently in use, the will fail to authenticate / authorize after they are \n deleted.

\n

Service accounts are only available for workspaces that are compatible with Grafana\n version 9 and above.

", "smithy.api#http": { "code": 200, - "method": "GET", - "uri": "/workspaces/{workspaceId}" + "method": "DELETE", + "uri": "/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}" + } + } + }, + "com.amazonaws.grafana#DeleteWorkspaceServiceAccountRequest": { + "type": "structure", + "members": { + "serviceAccountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the service account to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } }, - "smithy.api#readonly": {} + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The ID of the workspace where the service account resides.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} } }, - "com.amazonaws.grafana#DescribeWorkspaceAuthentication": { + "com.amazonaws.grafana#DeleteWorkspaceServiceAccountResponse": { + "type": "structure", + "members": { + "serviceAccountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the service account deleted.

", + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The ID of the workspace where the service account was deleted.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.grafana#DeleteWorkspaceServiceAccountToken": { "type": "operation", "input": { - "target": "com.amazonaws.grafana#DescribeWorkspaceAuthenticationRequest" + "target": "com.amazonaws.grafana#DeleteWorkspaceServiceAccountTokenRequest" }, "output": { - "target": "com.amazonaws.grafana#DescribeWorkspaceAuthenticationResponse" + "target": "com.amazonaws.grafana#DeleteWorkspaceServiceAccountTokenResponse" }, "errors": [ { "target": "com.amazonaws.grafana#AccessDeniedException" }, + { + "target": "com.amazonaws.grafana#ConflictException" + }, { "target": "com.amazonaws.grafana#InternalServerException" }, @@ -1716,29 +1990,162 @@ } ], "traits": { - "smithy.api#documentation": "

Displays information about the authentication methods used in one Amazon Managed Grafana\n workspace.

", + "smithy.api#documentation": "

Deletes a token for the workspace service account.

\n

This will disable the key associated with the token. If any automation is currently \n using the key, it will no longer be authenticated or authorized to perform actions with \n the Grafana HTTP APIs.

\n

Service accounts are only available for workspaces that are compatible with Grafana\n version 9 and above.

", "smithy.api#http": { "code": 200, - "method": "GET", - "uri": "/workspaces/{workspaceId}/authentication" - }, - "smithy.api#readonly": {} + "method": "DELETE", + "uri": "/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}/tokens/{tokenId}" + } } }, - "com.amazonaws.grafana#DescribeWorkspaceAuthenticationRequest": { + "com.amazonaws.grafana#DeleteWorkspaceServiceAccountTokenRequest": { "type": "structure", "members": { - "workspaceId": { - "target": "com.amazonaws.grafana#WorkspaceId", + "tokenId": { + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The ID of the workspace to return authentication information about.

", + "smithy.api#documentation": "

The ID of the token to delete.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } - } - } - }, - "com.amazonaws.grafana#DescribeWorkspaceAuthenticationResponse": { + }, + "serviceAccountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the service account from which to delete the token.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The ID of the workspace from which to delete the token.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.grafana#DeleteWorkspaceServiceAccountTokenResponse": { + "type": "structure", + "members": { + "tokenId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the token that was deleted.

", + "smithy.api#required": {} + } + }, + "serviceAccountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the service account where the token was deleted.

", + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The ID of the workspace where the token was deleted.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.grafana#DescribeWorkspace": { + "type": "operation", + "input": { + "target": "com.amazonaws.grafana#DescribeWorkspaceRequest" + }, + "output": { + "target": "com.amazonaws.grafana#DescribeWorkspaceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.grafana#AccessDeniedException" + }, + { + "target": "com.amazonaws.grafana#InternalServerException" + }, + { + "target": "com.amazonaws.grafana#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.grafana#ThrottlingException" + }, + { + "target": "com.amazonaws.grafana#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Displays information about one Amazon Managed Grafana workspace.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/workspaces/{workspaceId}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.grafana#DescribeWorkspaceAuthentication": { + "type": "operation", + "input": { + "target": "com.amazonaws.grafana#DescribeWorkspaceAuthenticationRequest" + }, + "output": { + "target": "com.amazonaws.grafana#DescribeWorkspaceAuthenticationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.grafana#AccessDeniedException" + }, + { + "target": "com.amazonaws.grafana#ConflictException" + }, + { + "target": "com.amazonaws.grafana#InternalServerException" + }, + { + "target": "com.amazonaws.grafana#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.grafana#ThrottlingException" + }, + { + "target": "com.amazonaws.grafana#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Displays information about the authentication methods used in one Amazon Managed Grafana\n workspace.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/workspaces/{workspaceId}/authentication" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.grafana#DescribeWorkspaceAuthenticationRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The ID of the workspace to return authentication information about.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.grafana#DescribeWorkspaceAuthenticationResponse": { "type": "structure", "members": { "authentication": { @@ -2322,6 +2729,237 @@ "smithy.api#output": {} } }, + "com.amazonaws.grafana#ListWorkspaceServiceAccountTokens": { + "type": "operation", + "input": { + "target": "com.amazonaws.grafana#ListWorkspaceServiceAccountTokensRequest" + }, + "output": { + "target": "com.amazonaws.grafana#ListWorkspaceServiceAccountTokensResponse" + }, + "errors": [ + { + "target": "com.amazonaws.grafana#AccessDeniedException" + }, + { + "target": "com.amazonaws.grafana#ConflictException" + }, + { + "target": "com.amazonaws.grafana#InternalServerException" + }, + { + "target": "com.amazonaws.grafana#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.grafana#ThrottlingException" + }, + { + "target": "com.amazonaws.grafana#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of tokens for a workspace service account.

\n \n

This does not return the key for each token. You cannot access keys after they\n are created. To create a new key, delete the token and recreate it.

\n
\n

Service accounts are only available for workspaces that are compatible with Grafana\n version 9 and above.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}/tokens" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "serviceAccountTokens" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.grafana#ListWorkspaceServiceAccountTokensRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of tokens to include in the results.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "nextToken": { + "target": "com.amazonaws.grafana#PaginationToken", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The token for the next set of service accounts to return. (You receive this token\n from a previous ListWorkspaceServiceAccountTokens operation.)

", + "smithy.api#httpQuery": "nextToken" + } + }, + "serviceAccountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the service account for which to return tokens.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The ID of the workspace for which to return tokens.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.grafana#ListWorkspaceServiceAccountTokensResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.grafana#PaginationToken", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The token to use when requesting the next set of service accounts.

" + } + }, + "serviceAccountTokens": { + "target": "com.amazonaws.grafana#ServiceAccountTokenList", + "traits": { + "smithy.api#documentation": "

An array of structures containing information about the tokens.

", + "smithy.api#required": {} + } + }, + "serviceAccountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The ID of the service account where the tokens reside.

", + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The ID of the workspace where the tokens reside.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.grafana#ListWorkspaceServiceAccounts": { + "type": "operation", + "input": { + "target": "com.amazonaws.grafana#ListWorkspaceServiceAccountsRequest" + }, + "output": { + "target": "com.amazonaws.grafana#ListWorkspaceServiceAccountsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.grafana#AccessDeniedException" + }, + { + "target": "com.amazonaws.grafana#ConflictException" + }, + { + "target": "com.amazonaws.grafana#InternalServerException" + }, + { + "target": "com.amazonaws.grafana#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.grafana#ThrottlingException" + }, + { + "target": "com.amazonaws.grafana#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of service accounts for a workspace.

\n

Service accounts are only available for workspaces that are compatible with Grafana\n version 9 and above.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/workspaces/{workspaceId}/serviceaccounts" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "serviceAccounts" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.grafana#ListWorkspaceServiceAccountsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of service accounts to include in the results.

", + "smithy.api#httpQuery": "maxResults", + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "nextToken": { + "target": "com.amazonaws.grafana#PaginationToken", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The token for the next set of service accounts to return. (You receive this token\n from a previous ListWorkspaceServiceAccounts operation.)

", + "smithy.api#httpQuery": "nextToken" + } + }, + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The workspace for which to list service accounts.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.grafana#ListWorkspaceServiceAccountsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.grafana#PaginationToken", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The token to use when requesting the next set of service accounts.

" + } + }, + "serviceAccounts": { + "target": "com.amazonaws.grafana#ServiceAccountList", + "traits": { + "smithy.api#documentation": "

An array of structures containing information about the service accounts.

", + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId", + "traits": { + "smithy.api#documentation": "

The workspace to which the service accounts are associated.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.grafana#ListWorkspaces": { "type": "operation", "input": { @@ -2757,6 +3395,200 @@ } } }, + "com.amazonaws.grafana#ServiceAccount": { + "type": "resource", + "identifiers": { + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId" + } + }, + "operations": [ + { + "target": "com.amazonaws.grafana#CreateWorkspaceServiceAccount" + }, + { + "target": "com.amazonaws.grafana#DeleteWorkspaceServiceAccount" + }, + { + "target": "com.amazonaws.grafana#ListWorkspaceServiceAccounts" + } + ], + "traits": { + "aws.api#arn": { + "template": "workspaces/{workspaceId}/serviceaccounts" + } + } + }, + "com.amazonaws.grafana#ServiceAccountList": { + "type": "list", + "member": { + "target": "com.amazonaws.grafana#ServiceAccountSummary" + } + }, + "com.amazonaws.grafana#ServiceAccountName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.grafana#ServiceAccountSummary": { + "type": "structure", + "members": { + "id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique ID of the service account.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the service account.

", + "smithy.api#required": {} + } + }, + "isDisabled": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Returns true if the service account is disabled. Service accounts can be disabled and\n enabled in the Amazon Managed Grafana console.

", + "smithy.api#required": {} + } + }, + "grafanaRole": { + "target": "com.amazonaws.grafana#Role", + "traits": { + "smithy.api#documentation": "

The role of the service account, which sets the permission level used when calling\n Grafana APIs.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure that contains the information about one service account.

" + } + }, + "com.amazonaws.grafana#ServiceAccountToken": { + "type": "resource", + "identifiers": { + "workspaceId": { + "target": "com.amazonaws.grafana#WorkspaceId" + }, + "serviceAccountId": { + "target": "smithy.api#String" + } + }, + "operations": [ + { + "target": "com.amazonaws.grafana#CreateWorkspaceServiceAccountToken" + }, + { + "target": "com.amazonaws.grafana#DeleteWorkspaceServiceAccountToken" + }, + { + "target": "com.amazonaws.grafana#ListWorkspaceServiceAccountTokens" + } + ], + "traits": { + "aws.api#arn": { + "template": "workspaces/{workspaceId}/serviceaccounts/{serviceAccountId}/tokens" + } + } + }, + "com.amazonaws.grafana#ServiceAccountTokenKey": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.grafana#ServiceAccountTokenList": { + "type": "list", + "member": { + "target": "com.amazonaws.grafana#ServiceAccountTokenSummary" + } + }, + "com.amazonaws.grafana#ServiceAccountTokenName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.grafana#ServiceAccountTokenSummary": { + "type": "structure", + "members": { + "id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique ID of the service account token.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the service account token.

", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

When the service account token was created.

", + "smithy.api#required": {} + } + }, + "expiresAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

When the service account token will expire.

", + "smithy.api#required": {} + } + }, + "lastUsedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The last time the token was used to authorize a Grafana HTTP API.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure that contains the information about a service account token.

" + } + }, + "com.amazonaws.grafana#ServiceAccountTokenSummaryWithKey": { + "type": "structure", + "members": { + "id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique ID of the service account token.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the service account token.

", + "smithy.api#required": {} + } + }, + "key": { + "target": "com.amazonaws.grafana#ServiceAccountTokenKey", + "traits": { + "smithy.api#documentation": "

The key for the service account token. Used when making calls to the Grafana HTTP \n APIs to authenticate and authorize the requests.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure that contains the information about a service account token.

\n

This structure is returned when creating the token. It is important to store the\n key that is returned, as it is not retrievable at a later time.

\n

If you lose the key, you can delete and recreate the token, which will create a\n new key.

" + } + }, "com.amazonaws.grafana#ServiceQuotaExceededException": { "type": "structure", "members": { @@ -3828,7 +4660,7 @@ "grafanaToken": { "target": "com.amazonaws.grafana#GrafanaToken", "traits": { - "smithy.api#documentation": "

The token that ties this workspace to a Grafana Labs account. For more information, \n see Register with Grafana Labs.

" + "smithy.api#documentation": "

The token that ties this workspace to a Grafana Labs account. For more information, \n see Link your account with Grafana Labs.

" } } }, @@ -4012,7 +4844,7 @@ "grafanaToken": { "target": "com.amazonaws.grafana#GrafanaToken", "traits": { - "smithy.api#documentation": "

The token that ties this workspace to a Grafana Labs account. For more information, \n see Register with Grafana Labs.

" + "smithy.api#documentation": "

The token that ties this workspace to a Grafana Labs account. For more information, \n see Link your account with Grafana Labs.

" } } }, diff --git a/models/greengrassv2.json b/models/greengrassv2.json index 80822331e3..3d4035e32f 100644 --- a/models/greengrassv2.json +++ b/models/greengrassv2.json @@ -657,7 +657,8 @@ "componentVersion": { "target": "com.amazonaws.greengrassv2#ComponentVersionString", "traits": { - "smithy.api#documentation": "

The version of the component.

" + "smithy.api#documentation": "

The version of the component.

", + "smithy.api#required": {} } }, "configurationUpdate": { @@ -1045,7 +1046,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a component. Components are software that run on Greengrass core devices. After you\n develop and test a component on your core device, you can use this operation to upload your\n component to IoT Greengrass. Then, you can deploy the component to other core devices.

\n

You can use this operation to do the following:

\n
    \n
  • \n

    \n Create components from recipes\n

    \n

    Create a component from a recipe, which is a file that defines the component's\n metadata, parameters, dependencies, lifecycle, artifacts, and platform capability. For\n more information, see IoT Greengrass component recipe\n reference in the IoT Greengrass V2 Developer Guide.

    \n

    To create a component from a recipe, specify inlineRecipe when you call\n this operation.

    \n
  • \n
  • \n

    \n Create components from Lambda functions\n

    \n

    Create a component from an Lambda function that runs on IoT Greengrass. This creates a recipe\n and artifacts from the Lambda function's deployment package. You can use this operation to\n migrate Lambda functions from IoT Greengrass V1 to IoT Greengrass V2.

    \n

    This function only accepts Lambda functions that use the following runtimes:

    \n
      \n
    • \n

      Python 2.7 – python2.7\n

      \n
    • \n
    • \n

      Python 3.7 – python3.7\n

      \n
    • \n
    • \n

      Python 3.8 – python3.8\n

      \n
    • \n
    • \n

      Python 3.9 – python3.9\n

      \n
    • \n
    • \n

      Java 8 – java8\n

      \n
    • \n
    • \n

      Java 11 – java11\n

      \n
    • \n
    • \n

      Node.js 10 – nodejs10.x\n

      \n
    • \n
    • \n

      Node.js 12 – nodejs12.x\n

      \n
    • \n
    • \n

      Node.js 14 – nodejs14.x\n

      \n
    • \n
    \n

    To create a component from a Lambda function, specify lambdaFunction when\n you call this operation.

    \n \n

    IoT Greengrass currently supports Lambda functions on only Linux core devices.

    \n
    \n
  • \n
", + "smithy.api#documentation": "

Creates a component. Components are software that run on Greengrass core devices. After you\n develop and test a component on your core device, you can use this operation to upload your\n component to IoT Greengrass. Then, you can deploy the component to other core devices.

\n

You can use this operation to do the following:

\n
    \n
  • \n

    \n Create components from recipes\n

    \n

    Create a component from a recipe, which is a file that defines the component's\n metadata, parameters, dependencies, lifecycle, artifacts, and platform capability. For\n more information, see IoT Greengrass component recipe\n reference in the IoT Greengrass V2 Developer Guide.

    \n

    To create a component from a recipe, specify inlineRecipe when you call\n this operation.

    \n
  • \n
  • \n

    \n Create components from Lambda functions\n

    \n

    Create a component from an Lambda function that runs on IoT Greengrass. This creates a recipe\n and artifacts from the Lambda function's deployment package. You can use this operation to\n migrate Lambda functions from IoT Greengrass V1 to IoT Greengrass V2.

    \n

    This function accepts Lambda functions in all supported versions of Python, Node.js,\n and Java runtimes. IoT Greengrass doesn't apply any additional restrictions on deprecated Lambda\n runtime versions.

    \n

    To create a component from a Lambda function, specify lambdaFunction when\n you call this operation.

    \n \n

    IoT Greengrass currently supports Lambda functions on only Linux core devices.

    \n
    \n
  • \n
", "smithy.api#http": { "method": "POST", "uri": "/greengrass/v2/createComponentVersion", @@ -2249,6 +2250,20 @@ "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "s3EndpointType": { + "target": "com.amazonaws.greengrassv2#S3EndpointType", + "traits": { + "smithy.api#documentation": "

Specifies the endpoint to use when getting Amazon S3 pre-signed URLs.

\n

All Amazon Web Services Regions except US East (N. Virginia) use REGIONAL in all cases.\n In the US East (N. Virginia) Region the default is GLOBAL, but you can change it\n to REGIONAL with this parameter.

", + "smithy.api#httpQuery": "s3EndpointType" + } + }, + "iotEndpointType": { + "target": "com.amazonaws.greengrassv2#IotEndpointType", + "traits": { + "smithy.api#documentation": "

Determines if the Amazon S3 URL returned is a FIPS pre-signed URL endpoint. \n Specify fips if you want the returned Amazon S3 pre-signed URL to point to \n an Amazon S3 FIPS endpoint. If you don't specify a value, the default is standard.

", + "smithy.api#httpHeader": "x-amz-iot-endpoint-type" + } } }, "traits": { @@ -2724,7 +2739,7 @@ "sdkId": "GreengrassV2", "arnNamespace": "greengrass", "cloudFormationName": "GreengrassV2", - "cloudTrailEventSource": "greengrassv2.amazonaws.com", + "cloudTrailEventSource": "greengrass.amazonaws.com", "endpointPrefix": "greengrass" }, "aws.auth#sigv4": { @@ -3717,7 +3732,7 @@ "lastInstallationSource": { "target": "com.amazonaws.greengrassv2#NonEmptyString", "traits": { - "smithy.api#documentation": "

The most recent deployment source that brought the component to the Greengrass core device. For\n a thing group deployment or thing deployment, the source will be the The ID of the deployment. and for\n local deployments it will be LOCAL.

\n \n

Any deployment will attempt to reinstall currently broken components on the device,\n which will update the last installation source.

\n
" + "smithy.api#documentation": "

The most recent deployment source that brought the component to the Greengrass core device. For\n a thing group deployment or thing deployment, the source will be the ID of the last deployment\n that contained the component. For local deployments it will be LOCAL.

\n \n

Any deployment will attempt to reinstall currently broken components on the device,\n which will update the last installation source.

\n
" } }, "lifecycleStatusCodes": { @@ -4115,6 +4130,23 @@ } } }, + "com.amazonaws.greengrassv2#IotEndpointType": { + "type": "enum", + "members": { + "fips": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "fips" + } + }, + "standard": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "standard" + } + } + } + }, "com.amazonaws.greengrassv2#IsLatestForTarget": { "type": "boolean", "traits": { @@ -4930,7 +4962,7 @@ "maxResults": { "target": "com.amazonaws.greengrassv2#DefaultMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned per paginated request.

", + "smithy.api#documentation": "

The maximum number of results to be returned per paginated request.

\n

Default: 50\n

", "smithy.api#httpQuery": "maxResults" } }, @@ -5465,6 +5497,23 @@ "smithy.api#default": 0 } }, + "com.amazonaws.greengrassv2#S3EndpointType": { + "type": "enum", + "members": { + "REGIONAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REGIONAL" + } + }, + "GLOBAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GLOBAL" + } + } + } + }, "com.amazonaws.greengrassv2#ServiceQuotaExceededException": { "type": "structure", "members": { diff --git a/models/guardduty.json b/models/guardduty.json index ae4f3341b8..b366f0f78c 100644 --- a/models/guardduty.json +++ b/models/guardduty.json @@ -2079,6 +2079,100 @@ "smithy.api#output": {} } }, + "com.amazonaws.guardduty#CreateMalwareProtectionPlan": { + "type": "operation", + "input": { + "target": "com.amazonaws.guardduty#CreateMalwareProtectionPlanRequest" + }, + "output": { + "target": "com.amazonaws.guardduty#CreateMalwareProtectionPlanResponse" + }, + "errors": [ + { + "target": "com.amazonaws.guardduty#AccessDeniedException" + }, + { + "target": "com.amazonaws.guardduty#BadRequestException" + }, + { + "target": "com.amazonaws.guardduty#ConflictException" + }, + { + "target": "com.amazonaws.guardduty#InternalServerErrorException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a new Malware Protection plan for the protected resource.

\n

When you create a Malware Protection plan, the Amazon Web Services service terms for GuardDuty Malware\n Protection apply. For more information, see Amazon Web Services service terms for GuardDuty Malware Protection.

", + "smithy.api#http": { + "method": "POST", + "uri": "/malware-protection-plan", + "code": 200 + } + } + }, + "com.amazonaws.guardduty#CreateMalwareProtectionPlanRequest": { + "type": "structure", + "members": { + "ClientToken": { + "target": "com.amazonaws.guardduty#ClientToken", + "traits": { + "smithy.api#documentation": "

The idempotency token for the create request.

", + "smithy.api#idempotencyToken": {}, + "smithy.api#jsonName": "clientToken" + } + }, + "Role": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

IAM role with permissions required to scan and add tags to the associated\n protected resource.

", + "smithy.api#jsonName": "role", + "smithy.api#required": {} + } + }, + "ProtectedResource": { + "target": "com.amazonaws.guardduty#CreateProtectedResource", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Information about the protected resource that is associated with the created \n Malware Protection plan. Presently, S3Bucket is the only supported \n protected resource.

", + "smithy.api#jsonName": "protectedResource", + "smithy.api#required": {} + } + }, + "Actions": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlanActions", + "traits": { + "smithy.api#documentation": "

Information about whether the tags will be added to the S3 object after scanning.

", + "smithy.api#jsonName": "actions" + } + }, + "Tags": { + "target": "com.amazonaws.guardduty#TagMap", + "traits": { + "smithy.api#documentation": "

Tags added to the Malware Protection plan resource.

", + "smithy.api#jsonName": "tags" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.guardduty#CreateMalwareProtectionPlanResponse": { + "type": "structure", + "members": { + "MalwareProtectionPlanId": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

A unique identifier associated with the Malware Protection plan resource.

", + "smithy.api#jsonName": "malwareProtectionPlanId" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.guardduty#CreateMembers": { "type": "operation", "input": { @@ -2147,6 +2241,21 @@ "smithy.api#output": {} } }, + "com.amazonaws.guardduty#CreateProtectedResource": { + "type": "structure", + "members": { + "S3Bucket": { + "target": "com.amazonaws.guardduty#CreateS3BucketResource", + "traits": { + "smithy.api#documentation": "

Information about the protected S3 bucket resource.

", + "smithy.api#jsonName": "s3Bucket" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the protected resource that\n is associated with the created Malware Protection plan.\n Presently, S3Bucket is the only supported \n protected resource.

" + } + }, "com.amazonaws.guardduty#CreatePublishingDestination": { "type": "operation", "input": { @@ -2232,6 +2341,28 @@ "smithy.api#output": {} } }, + "com.amazonaws.guardduty#CreateS3BucketResource": { + "type": "structure", + "members": { + "BucketName": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

Name of the S3 bucket.

", + "smithy.api#jsonName": "bucketName" + } + }, + "ObjectPrefixes": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlanObjectPrefixesList", + "traits": { + "smithy.api#documentation": "

Information about the specified object prefixes. The S3 object will be scanned only \n if it belongs to any of the specified object prefixes.

", + "smithy.api#jsonName": "objectPrefixes" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the protected S3 bucket resource.

" + } + }, "com.amazonaws.guardduty#CreateSampleFindings": { "type": "operation", "input": { @@ -2998,6 +3129,54 @@ "smithy.api#output": {} } }, + "com.amazonaws.guardduty#DeleteMalwareProtectionPlan": { + "type": "operation", + "input": { + "target": "com.amazonaws.guardduty#DeleteMalwareProtectionPlanRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.guardduty#AccessDeniedException" + }, + { + "target": "com.amazonaws.guardduty#BadRequestException" + }, + { + "target": "com.amazonaws.guardduty#InternalServerErrorException" + }, + { + "target": "com.amazonaws.guardduty#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the Malware Protection plan ID associated with the Malware Protection plan resource.\n Use this API only when you no longer want to protect the resource associated with this\n Malware Protection plan ID.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/malware-protection-plan/{MalwareProtectionPlanId}", + "code": 200 + } + } + }, + "com.amazonaws.guardduty#DeleteMalwareProtectionPlanRequest": { + "type": "structure", + "members": { + "MalwareProtectionPlanId": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

A unique identifier associated with Malware Protection plan resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#jsonName": "malwareProtectionPlanId", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.guardduty#DeleteMembers": { "type": "operation", "input": { @@ -5853,6 +6032,118 @@ "smithy.api#output": {} } }, + "com.amazonaws.guardduty#GetMalwareProtectionPlan": { + "type": "operation", + "input": { + "target": "com.amazonaws.guardduty#GetMalwareProtectionPlanRequest" + }, + "output": { + "target": "com.amazonaws.guardduty#GetMalwareProtectionPlanResponse" + }, + "errors": [ + { + "target": "com.amazonaws.guardduty#AccessDeniedException" + }, + { + "target": "com.amazonaws.guardduty#BadRequestException" + }, + { + "target": "com.amazonaws.guardduty#InternalServerErrorException" + }, + { + "target": "com.amazonaws.guardduty#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the Malware Protection plan details associated with a Malware Protection\n plan ID.

", + "smithy.api#http": { + "method": "GET", + "uri": "/malware-protection-plan/{MalwareProtectionPlanId}", + "code": 200 + } + } + }, + "com.amazonaws.guardduty#GetMalwareProtectionPlanRequest": { + "type": "structure", + "members": { + "MalwareProtectionPlanId": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

A unique identifier associated with Malware Protection plan resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#jsonName": "malwareProtectionPlanId", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.guardduty#GetMalwareProtectionPlanResponse": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the protected resource.

", + "smithy.api#jsonName": "arn" + } + }, + "Role": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

IAM role that includes the permissions required to scan and \n add tags to the associated protected resource.

", + "smithy.api#jsonName": "role" + } + }, + "ProtectedResource": { + "target": "com.amazonaws.guardduty#CreateProtectedResource", + "traits": { + "smithy.api#documentation": "

Information about the protected resource that is associated with the created \n Malware Protection plan. Presently, S3Bucket is the only supported \n protected resource.

", + "smithy.api#jsonName": "protectedResource" + } + }, + "Actions": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlanActions", + "traits": { + "smithy.api#documentation": "

Information about whether the tags will be added to the S3 object after scanning.

", + "smithy.api#jsonName": "actions" + } + }, + "CreatedAt": { + "target": "com.amazonaws.guardduty#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp when the Malware Protection plan resource was created.

", + "smithy.api#jsonName": "createdAt" + } + }, + "Status": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlanStatus", + "traits": { + "smithy.api#documentation": "

Malware Protection plan status.

", + "smithy.api#jsonName": "status" + } + }, + "StatusReasons": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlanStatusReasonsList", + "traits": { + "smithy.api#documentation": "

Information about the issue code and message associated to the status of\n your Malware Protection plan.

", + "smithy.api#jsonName": "statusReasons" + } + }, + "Tags": { + "target": "com.amazonaws.guardduty#TagMap", + "traits": { + "smithy.api#documentation": "

Tags added to the Malware Protection plan resource.

", + "smithy.api#jsonName": "tags" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.guardduty#GetMalwareScanSettings": { "type": "operation", "input": { @@ -6488,6 +6779,9 @@ { "target": "com.amazonaws.guardduty#CreateIPSet" }, + { + "target": "com.amazonaws.guardduty#CreateMalwareProtectionPlan" + }, { "target": "com.amazonaws.guardduty#CreateMembers" }, @@ -6515,6 +6809,9 @@ { "target": "com.amazonaws.guardduty#DeleteIPSet" }, + { + "target": "com.amazonaws.guardduty#DeleteMalwareProtectionPlan" + }, { "target": "com.amazonaws.guardduty#DeleteMembers" }, @@ -6572,6 +6869,9 @@ { "target": "com.amazonaws.guardduty#GetIPSet" }, + { + "target": "com.amazonaws.guardduty#GetMalwareProtectionPlan" + }, { "target": "com.amazonaws.guardduty#GetMalwareScanSettings" }, @@ -6617,6 +6917,9 @@ { "target": "com.amazonaws.guardduty#ListIPSets" }, + { + "target": "com.amazonaws.guardduty#ListMalwareProtectionPlans" + }, { "target": "com.amazonaws.guardduty#ListMembers" }, @@ -6662,6 +6965,9 @@ { "target": "com.amazonaws.guardduty#UpdateIPSet" }, + { + "target": "com.amazonaws.guardduty#UpdateMalwareProtectionPlan" + }, { "target": "com.amazonaws.guardduty#UpdateMalwareScanSettings" }, @@ -8193,6 +8499,34 @@ } } }, + "com.amazonaws.guardduty#ItemPath": { + "type": "structure", + "members": { + "NestedItemPath": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

The nested item path where the infected file was found.

", + "smithy.api#jsonName": "nestedItemPath" + } + }, + "Hash": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

The hash value of the infected resource.

", + "smithy.api#jsonName": "hash" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the nested item path and hash of the protected\n resource.

" + } + }, + "com.amazonaws.guardduty#ItemPaths": { + "type": "list", + "member": { + "target": "com.amazonaws.guardduty#ItemPath" + } + }, "com.amazonaws.guardduty#KubernetesApiCallAction": { "type": "structure", "members": { @@ -9291,15 +9625,18 @@ "smithy.api#output": {} } }, - "com.amazonaws.guardduty#ListMembers": { + "com.amazonaws.guardduty#ListMalwareProtectionPlans": { "type": "operation", "input": { - "target": "com.amazonaws.guardduty#ListMembersRequest" + "target": "com.amazonaws.guardduty#ListMalwareProtectionPlansRequest" }, "output": { - "target": "com.amazonaws.guardduty#ListMembersResponse" + "target": "com.amazonaws.guardduty#ListMalwareProtectionPlansResponse" }, "errors": [ + { + "target": "com.amazonaws.guardduty#AccessDeniedException" + }, { "target": "com.amazonaws.guardduty#BadRequestException" }, @@ -9308,12 +9645,75 @@ } ], "traits": { - "smithy.api#documentation": "

Lists details about all member accounts for the current GuardDuty administrator\n account.

", + "smithy.api#documentation": "

Lists the Malware Protection plan IDs associated with the protected\n resources in your Amazon Web Services account.

", "smithy.api#http": { "method": "GET", - "uri": "/detector/{DetectorId}/member", + "uri": "/malware-protection-plan", "code": 200 - }, + } + } + }, + "com.amazonaws.guardduty#ListMalwareProtectionPlansRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

You can use this parameter when paginating results. Set the value \n of this parameter to null on your first call to the list action. \n For subsequent calls to the action, fill nextToken in the request \n with the value of NextToken from the previous response to \n continue listing data.

", + "smithy.api#httpQuery": "nextToken", + "smithy.api#jsonName": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.guardduty#ListMalwareProtectionPlansResponse": { + "type": "structure", + "members": { + "MalwareProtectionPlans": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlansSummary", + "traits": { + "smithy.api#documentation": "

A list of unique identifiers associated with each Malware Protection plan.

", + "smithy.api#jsonName": "malwareProtectionPlans" + } + }, + "NextToken": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

You can use this parameter when paginating results. Set the value \n of this parameter to null on your first call to the list action. \n For subsequent calls to the action, fill nextToken in the request \n with the value of NextToken from the previous response to \n continue listing data.

", + "smithy.api#jsonName": "nextToken" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.guardduty#ListMembers": { + "type": "operation", + "input": { + "target": "com.amazonaws.guardduty#ListMembersRequest" + }, + "output": { + "target": "com.amazonaws.guardduty#ListMembersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.guardduty#BadRequestException" + }, + { + "target": "com.amazonaws.guardduty#InternalServerErrorException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists details about all member accounts for the current GuardDuty administrator\n account.

", + "smithy.api#http": { + "method": "GET", + "uri": "/detector/{DetectorId}/member", + "code": 200 + }, "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -9850,6 +10250,158 @@ "smithy.api#documentation": "

Provides details about Malware Protection when it is enabled as a data source.

" } }, + "com.amazonaws.guardduty#MalwareProtectionPlanActions": { + "type": "structure", + "members": { + "Tagging": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlanTaggingAction", + "traits": { + "smithy.api#documentation": "

Indicates whether the scanned S3 object will have tags about the scan result.

", + "smithy.api#jsonName": "tagging" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about whether the tags will be added to the S3 object after scanning.

" + } + }, + "com.amazonaws.guardduty#MalwareProtectionPlanObjectPrefixesList": { + "type": "list", + "member": { + "target": "com.amazonaws.guardduty#String" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, + "com.amazonaws.guardduty#MalwareProtectionPlanStatus": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "WARNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WARNING" + } + }, + "ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR" + } + } + } + }, + "com.amazonaws.guardduty#MalwareProtectionPlanStatusReason": { + "type": "structure", + "members": { + "Code": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

Issue code.

", + "smithy.api#jsonName": "code" + } + }, + "Message": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

Issue message that specifies the reason. For information\n about potential troubleshooting steps, see\n Troubleshooting Malware Protection for S3 status issues in the \n GuardDuty User Guide.

", + "smithy.api#jsonName": "message" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the issue code and message associated to the status of\n your Malware Protection plan.

" + } + }, + "com.amazonaws.guardduty#MalwareProtectionPlanStatusReasonsList": { + "type": "list", + "member": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlanStatusReason" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.guardduty#MalwareProtectionPlanSummary": { + "type": "structure", + "members": { + "MalwareProtectionPlanId": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

A unique identifier associated with Malware Protection plan.

", + "smithy.api#jsonName": "malwareProtectionPlanId" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the Malware Protection plan resource.

" + } + }, + "com.amazonaws.guardduty#MalwareProtectionPlanTaggingAction": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlanTaggingActionStatus", + "traits": { + "smithy.api#documentation": "

Indicates whether or not the tags will added.

", + "smithy.api#jsonName": "status" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about adding tags to the scanned S3 object after the scan result.

" + } + }, + "com.amazonaws.guardduty#MalwareProtectionPlanTaggingActionStatus": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.guardduty#MalwareProtectionPlansSummary": { + "type": "list", + "member": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlanSummary" + } + }, + "com.amazonaws.guardduty#MalwareScanDetails": { + "type": "structure", + "members": { + "Threats": { + "target": "com.amazonaws.guardduty#Threats", + "traits": { + "smithy.api#documentation": "

Information about the detected threats associated with the\n generated GuardDuty finding.

", + "smithy.api#jsonName": "threats" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the malware scan that generated a GuardDuty finding.

" + } + }, "com.amazonaws.guardduty#ManagementType": { "type": "enum", "members": { @@ -11710,7 +12262,7 @@ "InstanceArn": { "target": "com.amazonaws.guardduty#InstanceArn", "traits": { - "smithy.api#documentation": "

InstanceArn that was scanned in the scan entry.

", + "smithy.api#documentation": "

Instance ARN that was scanned in the scan entry.

", "smithy.api#jsonName": "instanceArn" } } @@ -11725,6 +12277,30 @@ "target": "com.amazonaws.guardduty#String" } }, + "com.amazonaws.guardduty#ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

The error message.

", + "smithy.api#jsonName": "message" + } + }, + "Type": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

The error type.

", + "smithy.api#jsonName": "__type" + } + } + }, + "traits": { + "smithy.api#documentation": "

The requested resource can't be found.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, "com.amazonaws.guardduty#ResourceType": { "type": "enum", "members": { @@ -12011,6 +12587,13 @@ "smithy.api#documentation": "

Describes the public access policies that apply to the S3 bucket.

", "smithy.api#jsonName": "publicAccess" } + }, + "S3ObjectDetails": { + "target": "com.amazonaws.guardduty#S3ObjectDetails", + "traits": { + "smithy.api#documentation": "

Information about the S3 object that was scanned.

", + "smithy.api#jsonName": "s3ObjectDetails" + } } }, "traits": { @@ -12057,6 +12640,55 @@ "smithy.api#documentation": "

Describes whether S3 data event logs will be enabled as a data source.

" } }, + "com.amazonaws.guardduty#S3ObjectDetail": { + "type": "structure", + "members": { + "ObjectArn": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the S3 object.

", + "smithy.api#jsonName": "objectArn" + } + }, + "Key": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

Key of the S3 object.

", + "smithy.api#jsonName": "key" + } + }, + "ETag": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

The entity tag is a hash of the S3 object. The ETag reflects changes only to the contents of \n an object, and not its metadata.

", + "smithy.api#jsonName": "eTag" + } + }, + "Hash": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

Hash of the threat detected in this finding.

", + "smithy.api#jsonName": "hash" + } + }, + "VersionId": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

Version ID of the object.

", + "smithy.api#jsonName": "versionId" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the S3 object that was scanned

" + } + }, + "com.amazonaws.guardduty#S3ObjectDetails": { + "type": "list", + "member": { + "target": "com.amazonaws.guardduty#S3ObjectDetail" + } + }, "com.amazonaws.guardduty#Scan": { "type": "structure", "members": { @@ -12316,7 +12948,7 @@ "VolumeArn": { "target": "com.amazonaws.guardduty#String", "traits": { - "smithy.api#documentation": "

EBS volume Arn details of the infected file.

", + "smithy.api#documentation": "

EBS volume ARN details of the infected file.

", "smithy.api#jsonName": "volumeArn" } }, @@ -12679,6 +13311,13 @@ "smithy.api#documentation": "

Contains information about the detected unusual behavior.

", "smithy.api#jsonName": "detection" } + }, + "MalwareScanDetails": { + "target": "com.amazonaws.guardduty#MalwareScanDetails", + "traits": { + "smithy.api#documentation": "

Returns details from the malware scan that generated a GuardDuty finding.

", + "smithy.api#jsonName": "malwareScanDetails" + } } }, "traits": { @@ -13087,6 +13726,35 @@ "target": "com.amazonaws.guardduty#Tag" } }, + "com.amazonaws.guardduty#Threat": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

Name of the detected threat that caused GuardDuty to generate this finding.

", + "smithy.api#jsonName": "name" + } + }, + "Source": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

Source of the threat that generated this finding.

", + "smithy.api#jsonName": "source" + } + }, + "ItemPaths": { + "target": "com.amazonaws.guardduty#ItemPaths", + "traits": { + "smithy.api#documentation": "

Information about the nested item path and \n hash of the protected resource.

", + "smithy.api#jsonName": "itemPaths" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the detected threats associated with the\n generated finding.

" + } + }, "com.amazonaws.guardduty#ThreatDetectedByName": { "type": "structure", "members": { @@ -13276,6 +13944,12 @@ "target": "com.amazonaws.guardduty#String" } }, + "com.amazonaws.guardduty#Threats": { + "type": "list", + "member": { + "target": "com.amazonaws.guardduty#Threat" + } + }, "com.amazonaws.guardduty#ThreatsDetectedItemCount": { "type": "structure", "members": { @@ -13839,6 +14513,75 @@ "smithy.api#output": {} } }, + "com.amazonaws.guardduty#UpdateMalwareProtectionPlan": { + "type": "operation", + "input": { + "target": "com.amazonaws.guardduty#UpdateMalwareProtectionPlanRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.guardduty#AccessDeniedException" + }, + { + "target": "com.amazonaws.guardduty#BadRequestException" + }, + { + "target": "com.amazonaws.guardduty#InternalServerErrorException" + }, + { + "target": "com.amazonaws.guardduty#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates an existing Malware Protection plan resource.

", + "smithy.api#http": { + "method": "PATCH", + "uri": "/malware-protection-plan/{MalwareProtectionPlanId}", + "code": 200 + } + } + }, + "com.amazonaws.guardduty#UpdateMalwareProtectionPlanRequest": { + "type": "structure", + "members": { + "MalwareProtectionPlanId": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

A unique identifier associated with the Malware Protection plan.

", + "smithy.api#httpLabel": {}, + "smithy.api#jsonName": "malwareProtectionPlanId", + "smithy.api#required": {} + } + }, + "Role": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

IAM role with permissions required to scan and add tags to \n the associated protected resource.

", + "smithy.api#jsonName": "role" + } + }, + "Actions": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlanActions", + "traits": { + "smithy.api#documentation": "

Information about whether the tags will be added to the S3 object after scanning.

", + "smithy.api#jsonName": "actions" + } + }, + "ProtectedResource": { + "target": "com.amazonaws.guardduty#UpdateProtectedResource", + "traits": { + "smithy.api#documentation": "

Information about the protected resource that is associated \n with the created Malware Protection plan. Presently, S3Bucket\n is the only supported protected resource.

", + "smithy.api#jsonName": "protectedResource" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.guardduty#UpdateMalwareScanSettings": { "type": "operation", "input": { @@ -14070,6 +14813,21 @@ "smithy.api#output": {} } }, + "com.amazonaws.guardduty#UpdateProtectedResource": { + "type": "structure", + "members": { + "S3Bucket": { + "target": "com.amazonaws.guardduty#UpdateS3BucketResource", + "traits": { + "smithy.api#documentation": "

Information about the protected S3 bucket resource.

", + "smithy.api#jsonName": "s3Bucket" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the protected resource \n that is associated with the created Malware Protection plan. \n Presently, S3Bucket is the only supported protected resource.

" + } + }, "com.amazonaws.guardduty#UpdatePublishingDestination": { "type": "operation", "input": { @@ -14135,6 +14893,21 @@ "smithy.api#output": {} } }, + "com.amazonaws.guardduty#UpdateS3BucketResource": { + "type": "structure", + "members": { + "ObjectPrefixes": { + "target": "com.amazonaws.guardduty#MalwareProtectionPlanObjectPrefixesList", + "traits": { + "smithy.api#documentation": "

Information about the specified object prefixes. The S3 object will be scanned only \n if it belongs to any of the specified object prefixes.

", + "smithy.api#jsonName": "objectPrefixes" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the protected S3 bucket resource.

" + } + }, "com.amazonaws.guardduty#UpdateThreatIntelSet": { "type": "operation", "input": { @@ -14627,7 +15400,7 @@ "VolumeArn": { "target": "com.amazonaws.guardduty#String", "traits": { - "smithy.api#documentation": "

EBS volume Arn information.

", + "smithy.api#documentation": "

EBS volume ARN information.

", "smithy.api#jsonName": "volumeArn" } }, @@ -14662,14 +15435,14 @@ "SnapshotArn": { "target": "com.amazonaws.guardduty#String", "traits": { - "smithy.api#documentation": "

Snapshot Arn of the EBS volume.

", + "smithy.api#documentation": "

Snapshot ARN of the EBS volume.

", "smithy.api#jsonName": "snapshotArn" } }, "KmsKeyArn": { "target": "com.amazonaws.guardduty#String", "traits": { - "smithy.api#documentation": "

KMS key Arn used to encrypt the EBS volume.

", + "smithy.api#documentation": "

KMS key ARN used to encrypt the EBS volume.

", "smithy.api#jsonName": "kmsKeyArn" } } diff --git a/models/honeycode.json b/models/honeycode.json deleted file mode 100644 index b1c6d908a5..0000000000 --- a/models/honeycode.json +++ /dev/null @@ -1,3710 +0,0 @@ -{ - "smithy": "2.0", - "metadata": { - "suppressions": [ - { - "id": "HttpMethodSemantics", - "namespace": "*" - }, - { - "id": "HttpResponseCodeSemantics", - "namespace": "*" - }, - { - "id": "PaginatedTrait", - "namespace": "*" - }, - { - "id": "HttpHeaderTrait", - "namespace": "*" - }, - { - "id": "HttpUriConflict", - "namespace": "*" - }, - { - "id": "Service", - "namespace": "*" - } - ] - }, - "shapes": { - "com.amazonaws.honeycode#AccessDeniedException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.honeycode#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

\n You do not have sufficient access to perform this action. Check that the workbook is owned by you and your\n IAM policy allows access to the resource in the request.\n

", - "smithy.api#error": "client", - "smithy.api#httpError": 403 - } - }, - "com.amazonaws.honeycode#AutomationExecutionException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.honeycode#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

The automation execution did not end successfully.

", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.honeycode#AutomationExecutionTimeoutException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.honeycode#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

The automation execution timed out.

", - "smithy.api#error": "server", - "smithy.api#httpError": 504 - } - }, - "com.amazonaws.honeycode#AwsUserArn": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 20, - "max": 2048 - } - } - }, - "com.amazonaws.honeycode#BatchCreateTableRows": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#BatchCreateTableRowsRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#BatchCreateTableRowsResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceQuotaExceededException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The BatchCreateTableRows API allows you to create one or more rows at the end of a table in a workbook.\n The API allows you to specify the values to set in some or all of the columns in the new rows.\n

\n

\n If a column is not explicitly set in a specific row, then the column level formula specified in the table\n will be applied to the new row. If there is no column level formula but the last row of the table has a\n formula, then that formula will be copied down to the new row. If there is no column level formula and\n no formula in the last row of the table, then that column will be left blank for the new rows.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/workbooks/{workbookId}/tables/{tableId}/rows/batchcreate", - "code": 200 - } - } - }, - "com.amazonaws.honeycode#BatchCreateTableRowsRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook where the new rows are being added.

\n

\n If a workbook with the specified ID could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tableId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the table where the new rows are being added.

\n

\n If a table with the specified ID could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "rowsToCreate": { - "target": "com.amazonaws.honeycode#CreateRowDataList", - "traits": { - "smithy.api#documentation": "

\n The list of rows to create at the end of the table. Each item in this list needs to have a batch item id\n to uniquely identify the element in the request and the cells to create for that row.\n You need to specify at least one item in this list.\n

\n

\n Note that if one of the column ids in any of the rows in the request does not exist in the table, then the\n request fails and no updates are made to the table.\n

", - "smithy.api#required": {} - } - }, - "clientRequestToken": { - "target": "com.amazonaws.honeycode#ClientRequestToken", - "traits": { - "smithy.api#documentation": "

\n The request token for performing the batch create operation.\n Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error\n like a failed network connection, you can retry the call with the same request token. The service ensures\n that if the first call using that request token is successfully performed, the second call will not perform\n the operation again.\n

\n

\n Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests\n spanning hours or days.\n

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#BatchCreateTableRowsResult": { - "type": "structure", - "members": { - "workbookCursor": { - "target": "com.amazonaws.honeycode#WorkbookCursor", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The updated workbook cursor after adding the new rows at the end of the table.

", - "smithy.api#required": {} - } - }, - "createdRows": { - "target": "com.amazonaws.honeycode#CreatedRowsMap", - "traits": { - "smithy.api#documentation": "

The map of batch item id to the row id that was created for that item.

", - "smithy.api#required": {} - } - }, - "failedBatchItems": { - "target": "com.amazonaws.honeycode#FailedBatchItems", - "traits": { - "smithy.api#documentation": "

\n The list of batch items in the request that could not be added to the table. Each element in this list\n contains one item from the request that could not be added to the table along with the reason why\n that item could not be added.\n

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#BatchDeleteTableRows": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#BatchDeleteTableRowsRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#BatchDeleteTableRowsResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The BatchDeleteTableRows API allows you to delete one or more rows from a table in a workbook.\n You need to specify the ids of the rows that you want to delete from the table.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/workbooks/{workbookId}/tables/{tableId}/rows/batchdelete", - "code": 200 - } - } - }, - "com.amazonaws.honeycode#BatchDeleteTableRowsRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook where the rows are being deleted.

\n

\n If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tableId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the table where the rows are being deleted.

\n

\n If a table with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "rowIds": { - "target": "com.amazonaws.honeycode#RowIdList", - "traits": { - "smithy.api#documentation": "

\n The list of row ids to delete from the table. You need to specify at least one row id in this list.\n

\n

\n Note that if one of the row ids provided in the request does not exist in the table, then the request fails\n and no rows are deleted from the table.\n

", - "smithy.api#required": {} - } - }, - "clientRequestToken": { - "target": "com.amazonaws.honeycode#ClientRequestToken", - "traits": { - "smithy.api#documentation": "

\n The request token for performing the delete action.\n Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error\n like a failed network connection, you can retry the call with the same request token. The service ensures\n that if the first call using that request token is successfully performed, the second call will not perform\n the action again.\n

\n

\n Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests\n spanning hours or days.\n

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#BatchDeleteTableRowsResult": { - "type": "structure", - "members": { - "workbookCursor": { - "target": "com.amazonaws.honeycode#WorkbookCursor", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The updated workbook cursor after deleting the rows from the table.

", - "smithy.api#required": {} - } - }, - "failedBatchItems": { - "target": "com.amazonaws.honeycode#FailedBatchItems", - "traits": { - "smithy.api#documentation": "

\n The list of row ids in the request that could not be deleted from the table. Each element in this list\n contains one row id from the request that could not be deleted along with the reason why that item could not\n be deleted.\n

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#BatchErrorMessage": { - "type": "string", - "traits": { - "smithy.api#pattern": "^(?!\\s*$).+$" - } - }, - "com.amazonaws.honeycode#BatchItemId": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 64 - }, - "smithy.api#pattern": "^(?!\\s*$).+$" - } - }, - "com.amazonaws.honeycode#BatchUpdateTableRows": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#BatchUpdateTableRowsRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#BatchUpdateTableRowsResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The BatchUpdateTableRows API allows you to update one or more rows in a table in a workbook.\n

\n

\n You can specify the values to set in some or all of the columns in the table for the specified\n rows.\n If a column is not explicitly specified in a particular row, then that column will not be updated\n for that row. To clear out the data in a specific cell, you need to set the value as an empty string\n (\"\").\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/workbooks/{workbookId}/tables/{tableId}/rows/batchupdate", - "code": 200 - } - } - }, - "com.amazonaws.honeycode#BatchUpdateTableRowsRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook where the rows are being updated.

\n

\n If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tableId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the table where the rows are being updated.

\n

\n If a table with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "rowsToUpdate": { - "target": "com.amazonaws.honeycode#UpdateRowDataList", - "traits": { - "smithy.api#documentation": "

\n The list of rows to update in the table. Each item in this list needs to contain the row id to update\n along with the map of column id to cell values for each column in that row that needs to be updated.\n You need to specify at least one row in this list, and for each row, you need to specify at least one\n column to update.\n

\n

\n Note that if one of the row or column ids in the request does not exist in the table, then the request fails\n and no updates are made to the table.\n

", - "smithy.api#required": {} - } - }, - "clientRequestToken": { - "target": "com.amazonaws.honeycode#ClientRequestToken", - "traits": { - "smithy.api#documentation": "

\n The request token for performing the update action.\n Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error\n like a failed network connection, you can retry the call with the same request token. The service ensures\n that if the first call using that request token is successfully performed, the second call will not perform\n the action again.\n

\n

\n Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests\n spanning hours or days.\n

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#BatchUpdateTableRowsResult": { - "type": "structure", - "members": { - "workbookCursor": { - "target": "com.amazonaws.honeycode#WorkbookCursor", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The updated workbook cursor after adding the new rows at the end of the table.

", - "smithy.api#required": {} - } - }, - "failedBatchItems": { - "target": "com.amazonaws.honeycode#FailedBatchItems", - "traits": { - "smithy.api#documentation": "

\n The list of batch items in the request that could not be updated in the table. Each element in this list\n contains one item from the request that could not be updated in the table along with the reason why\n that item could not be updated.\n

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#BatchUpsertTableRows": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#BatchUpsertTableRowsRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#BatchUpsertTableRowsResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceQuotaExceededException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The BatchUpsertTableRows API allows you to upsert one or more rows in a table. The upsert\n operation takes a filter expression as input and evaluates it to find matching rows on the destination\n table. If matching rows are found, it will update the cells in the matching rows to new values specified\n in the request. If no matching rows are found, a new row is added at the end of the table and the cells in\n that row are set to the new values specified in the request.\n

\n

\n You can specify the values to set in some or all of the columns in the table for the\n matching or newly appended rows. If a column is not explicitly specified for a particular row, then that\n column will not be updated for that row. To clear out the data in a specific cell, you need to set the value\n as an empty string (\"\").\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/workbooks/{workbookId}/tables/{tableId}/rows/batchupsert", - "code": 200 - } - } - }, - "com.amazonaws.honeycode#BatchUpsertTableRowsRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook where the rows are being upserted.

\n

\n If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tableId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the table where the rows are being upserted.

\n

\n If a table with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "rowsToUpsert": { - "target": "com.amazonaws.honeycode#UpsertRowDataList", - "traits": { - "smithy.api#documentation": "

\n The list of rows to upsert in the table. Each item in this list needs to have a batch item id to uniquely\n identify the element in the request, a filter expression to find the rows to update for that element\n and the cell values to set for each column in the upserted rows. You need to specify\n at least one item in this list.\n

\n

\n Note that if one of the filter formulas in the request fails to evaluate because of an error or one of the\n column ids in any of the rows does not exist in the table, then the request fails\n and no updates are made to the table.\n

", - "smithy.api#required": {} - } - }, - "clientRequestToken": { - "target": "com.amazonaws.honeycode#ClientRequestToken", - "traits": { - "smithy.api#documentation": "

\n The request token for performing the update action.\n Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error\n like a failed network connection, you can retry the call with the same request token. The service ensures\n that if the first call using that request token is successfully performed, the second call will not perform\n the action again.\n

\n

\n Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests\n spanning hours or days.\n

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#BatchUpsertTableRowsResult": { - "type": "structure", - "members": { - "rows": { - "target": "com.amazonaws.honeycode#UpsertRowsResultMap", - "traits": { - "smithy.api#documentation": "

\n A map with the batch item id as the key and the result of the upsert operation as the value. The\n result of the upsert operation specifies whether existing rows were updated or a new row was appended, along\n with the list of row ids that were affected.\n

", - "smithy.api#required": {} - } - }, - "workbookCursor": { - "target": "com.amazonaws.honeycode#WorkbookCursor", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The updated workbook cursor after updating or appending rows in the table.

", - "smithy.api#required": {} - } - }, - "failedBatchItems": { - "target": "com.amazonaws.honeycode#FailedBatchItems", - "traits": { - "smithy.api#documentation": "

\n The list of batch items in the request that could not be updated or appended in the table. Each element in\n this list contains one item from the request that could not be updated in the table along with the reason\n why that item could not be updated or appended.\n

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#Cell": { - "type": "structure", - "members": { - "formula": { - "target": "com.amazonaws.honeycode#Formula", - "traits": { - "smithy.api#documentation": "

\n The formula contained in the cell. This field is empty if a cell does not have a formula.\n

" - } - }, - "format": { - "target": "com.amazonaws.honeycode#Format", - "traits": { - "smithy.api#documentation": "

The format of the cell. If this field is empty, then the format is either not specified in the\n workbook or the format is set to AUTO.

" - } - }, - "rawValue": { - "target": "com.amazonaws.honeycode#RawValue", - "traits": { - "smithy.api#documentation": "

\n The raw value of the data contained in the cell. The raw value depends on the format of the data in the\n cell. However the attribute in the API return value is always a string containing the raw value.\n

\n

\n Cells with format DATE, DATE_TIME or TIME have the raw value as a floating point\n number where the whole number represents the number of days since 1/1/1900 and the fractional part\n represents the fraction of the day since midnight. For example, a cell with date 11/3/2020 has the raw value\n \"44138\". A cell with the time 9:00 AM has the raw value \"0.375\" and a cell with date/time value of\n 11/3/2020 9:00 AM has the raw value \"44138.375\". Notice that even though the raw value is a number in all\n three cases, it is still represented as a string.\n

\n

\n Cells with format NUMBER, CURRENCY, PERCENTAGE and ACCOUNTING have the raw value of the data as the number\n representing the data being displayed. For example, the number 1.325 with two decimal places in the format\n will have it's raw value as \"1.325\" and formatted value as \"1.33\". A currency value for\n $10 will have the raw value as \"10\" and formatted value as \"$10.00\". A value representing 20% with two\n decimal places in the format will have its raw value as \"0.2\" and the formatted value as \"20.00%\". An\n accounting value of -$25 will have \"-25\" as the raw value and \"$ (25.00)\" as the formatted value.\n

\n

\n Cells with format TEXT will have the raw text as the raw value. For example, a cell with text \"John Smith\"\n will have \"John Smith\" as both the raw value and the formatted value.\n

\n

\n Cells with format CONTACT will have the name of the contact as a formatted value and the email address of\n the contact as the raw value. For example, a contact for John Smith will have \"John Smith\" as the\n formatted value and \"john.smith@example.com\" as the raw value.\n

\n

\n Cells with format ROWLINK (aka picklist) will have the first column of the linked row as the formatted value\n and the row id of the linked row as the raw value. For example, a cell containing a picklist to a table\n that displays task status might have \"Completed\" as the formatted value and\n \"row:dfcefaee-5b37-4355-8f28-40c3e4ff5dd4/ca432b2f-b8eb-431d-9fb5-cbe0342f9f03\" as the raw value.\n

\n

\n Cells with format ROWSET (aka multi-select or multi-record picklist) will by default have the first column\n of each of the linked rows as the formatted value in the list, and the rowset id of the linked rows as the\n raw value. For example, a cell containing a multi-select picklist to a table that contains items might have\n \"Item A\", \"Item B\" in the formatted value list and \"rows:b742c1f4-6cb0-4650-a845-35eb86fcc2bb/\n [fdea123b-8f68-474a-aa8a-5ff87aa333af,6daf41f0-a138-4eee-89da-123086d36ecf]\" as the raw value.\n

\n

\n Cells with format ATTACHMENT will have the name of the attachment as the formatted value and the attachment\n id as the raw value. For example, a cell containing an attachment named \"image.jpeg\" will have\n \"image.jpeg\" as the formatted value and \"attachment:ca432b2f-b8eb-431d-9fb5-cbe0342f9f03\" as the raw value.\n

\n

\n Cells with format AUTO or cells without any format that are auto-detected as one of the formats above will\n contain the raw and formatted values as mentioned above, based on the auto-detected formats. If there is no\n auto-detected format, the raw and formatted values will be the same as the data in the cell.\n

" - } - }, - "formattedValue": { - "target": "com.amazonaws.honeycode#FormattedValue", - "traits": { - "smithy.api#documentation": "

\n The formatted value of the cell. This is the value that you see displayed in the cell in the UI.\n

\n

\n Note that the formatted value of a cell is always represented as a string irrespective of the data that is\n stored in the cell. For example, if a cell contains a date, the formatted value of the cell is the string\n representation of the formatted date being shown in the cell in the UI. See details in the rawValue field\n below for how cells of different formats will have different raw and formatted values.\n

" - } - }, - "formattedValues": { - "target": "com.amazonaws.honeycode#FormattedValuesList", - "traits": { - "smithy.api#documentation": "

\n A list of formatted values of the cell. This field is only returned when the cell is ROWSET format\n (aka multi-select or multi-record picklist). Values in the list are always represented as strings.\n The formattedValue field will be empty if this field is returned.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

An object that represents a single cell in a table.

", - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.honeycode#CellInput": { - "type": "structure", - "members": { - "fact": { - "target": "com.amazonaws.honeycode#Fact", - "traits": { - "smithy.api#documentation": "

\n Fact represents the data that is entered into a cell. This data can be free text or a formula. Formulas need\n to start with the equals (=) sign.\n

" - } - }, - "facts": { - "target": "com.amazonaws.honeycode#FactList", - "traits": { - "smithy.api#documentation": "

\n A list representing the values that are entered into a ROWSET cell. Facts list can have either only values\n or rowIDs, and rowIDs should from the same table.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n CellInput object contains the data needed to create or update cells in a table.\n

\n \n

\n CellInput object has only a facts field or a fact field, but not both. A 400 bad request will be\n thrown if both fact and facts field are present.\n

\n
" - } - }, - "com.amazonaws.honeycode#Cells": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#Cell" - } - }, - "com.amazonaws.honeycode#ClientRequestToken": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 32, - "max": 64 - }, - "smithy.api#pattern": "^(?!\\s*$).+$" - } - }, - "com.amazonaws.honeycode#ColumnMetadata": { - "type": "structure", - "members": { - "name": { - "target": "com.amazonaws.honeycode#Name", - "traits": { - "smithy.api#documentation": "

The name of the column.

", - "smithy.api#required": {} - } - }, - "format": { - "target": "com.amazonaws.honeycode#Format", - "traits": { - "smithy.api#documentation": "

The format of the column.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

Metadata for column in the table.

" - } - }, - "com.amazonaws.honeycode#CreateRowData": { - "type": "structure", - "members": { - "batchItemId": { - "target": "com.amazonaws.honeycode#BatchItemId", - "traits": { - "smithy.api#documentation": "

\n An external identifier that represents the single row that is being created as part of the\n BatchCreateTableRows request. This can be any string that you can use to identify the row in the request.\n The BatchCreateTableRows API puts the batch item id in the results to allow you to link data in the\n request to data in the results.\n

", - "smithy.api#required": {} - } - }, - "cellsToCreate": { - "target": "com.amazonaws.honeycode#RowDataInput", - "traits": { - "smithy.api#documentation": "

\n A map representing the cells to create in the new row. The key is the column id of the\n cell and the value is the CellInput object that represents the data to set in that cell.\n

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Data needed to create a single row in a table as part of the BatchCreateTableRows request.\n

" - } - }, - "com.amazonaws.honeycode#CreateRowDataList": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#CreateRowData" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 100 - } - } - }, - "com.amazonaws.honeycode#CreatedRowsMap": { - "type": "map", - "key": { - "target": "com.amazonaws.honeycode#BatchItemId" - }, - "value": { - "target": "com.amazonaws.honeycode#RowId" - } - }, - "com.amazonaws.honeycode#DataItem": { - "type": "structure", - "members": { - "overrideFormat": { - "target": "com.amazonaws.honeycode#Format", - "traits": { - "smithy.api#documentation": "

\n The overrideFormat is optional and is specified only if a particular row of data has a different format for\n the data than the default format defined on the screen or the table.\n

" - } - }, - "rawValue": { - "target": "com.amazonaws.honeycode#RawValue", - "traits": { - "smithy.api#documentation": "

The raw value of the data. e.g. jsmith@example.com

" - } - }, - "formattedValue": { - "target": "com.amazonaws.honeycode#FormattedValue", - "traits": { - "smithy.api#documentation": "

The formatted value of the data. e.g. John Smith.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

The data in a particular data cell defined on the screen.

", - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.honeycode#DataItems": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#DataItem" - } - }, - "com.amazonaws.honeycode#DelimitedTextDelimiter": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 1 - }, - "smithy.api#pattern": "^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]?$" - } - }, - "com.amazonaws.honeycode#DelimitedTextImportOptions": { - "type": "structure", - "members": { - "delimiter": { - "target": "com.amazonaws.honeycode#DelimitedTextDelimiter", - "traits": { - "smithy.api#documentation": "

The delimiter to use for separating columns in a single row of the input.

", - "smithy.api#required": {} - } - }, - "hasHeaderRow": { - "target": "com.amazonaws.honeycode#HasHeaderRow", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "

Indicates whether the input file has a header row at the top containing the column names.

" - } - }, - "ignoreEmptyRows": { - "target": "com.amazonaws.honeycode#IgnoreEmptyRows", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "

A parameter to indicate whether empty rows should be ignored or be included in the import.

" - } - }, - "dataCharacterEncoding": { - "target": "com.amazonaws.honeycode#ImportDataCharacterEncoding", - "traits": { - "smithy.api#documentation": "

The encoding of the data in the input file.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n An object that contains the options relating to parsing delimited text as part of an import request.\n

" - } - }, - "com.amazonaws.honeycode#DescribeTableDataImportJob": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#DescribeTableDataImportJobRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#DescribeTableDataImportJobResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The DescribeTableDataImportJob API allows you to retrieve the status and details of a table data import job.\n

", - "smithy.api#http": { - "method": "GET", - "uri": "/workbooks/{workbookId}/tables/{tableId}/import/{jobId}", - "code": 200 - } - } - }, - "com.amazonaws.honeycode#DescribeTableDataImportJobRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook into which data was imported.

\n

\n If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tableId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the table into which data was imported.

\n

\n If a table with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "jobId": { - "target": "com.amazonaws.honeycode#JobId", - "traits": { - "smithy.api#documentation": "

The ID of the job that was returned by the StartTableDataImportJob request.

\n

\n If a job with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#DescribeTableDataImportJobResult": { - "type": "structure", - "members": { - "jobStatus": { - "target": "com.amazonaws.honeycode#TableDataImportJobStatus", - "traits": { - "smithy.api#documentation": "

\n The current status of the import job.\n

", - "smithy.api#required": {} - } - }, - "message": { - "target": "com.amazonaws.honeycode#TableDataImportJobMessage", - "traits": { - "smithy.api#documentation": "

\n A message providing more details about the current status of the import job.\n

", - "smithy.api#required": {} - } - }, - "jobMetadata": { - "target": "com.amazonaws.honeycode#TableDataImportJobMetadata", - "traits": { - "smithy.api#documentation": "

\n The metadata about the job that was submitted for import.\n

", - "smithy.api#required": {} - } - }, - "errorCode": { - "target": "com.amazonaws.honeycode#ErrorCode", - "traits": { - "smithy.api#documentation": "

\n If job status is failed, error code to understand reason for the failure.\n

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#DestinationOptions": { - "type": "structure", - "members": { - "columnMap": { - "target": "com.amazonaws.honeycode#ImportColumnMap", - "traits": { - "smithy.api#documentation": "

A map of the column id to the import properties for each column.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

An object that contains the options relating to the destination of the import request.

" - } - }, - "com.amazonaws.honeycode#Email": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 3, - "max": 254 - }, - "smithy.api#pattern": "^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$", - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.honeycode#ErrorCode": { - "type": "enum", - "members": { - "AccessDenied": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACCESS_DENIED" - } - }, - "InvalidUrlError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INVALID_URL_ERROR" - } - }, - "InvalidImportOptionsError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INVALID_IMPORT_OPTIONS_ERROR" - } - }, - "InvalidTableIdError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INVALID_TABLE_ID_ERROR" - } - }, - "InvalidTableColumnIdError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INVALID_TABLE_COLUMN_ID_ERROR" - } - }, - "TableNotFoundError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TABLE_NOT_FOUND_ERROR" - } - }, - "FileEmptyError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FILE_EMPTY_ERROR" - } - }, - "InvalidFileTypeError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INVALID_FILE_TYPE_ERROR" - } - }, - "FileParsingError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FILE_PARSING_ERROR" - } - }, - "FileSizeLimitError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FILE_SIZE_LIMIT_ERROR" - } - }, - "FileNotFoundError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FILE_NOT_FOUND_ERROR" - } - }, - "UnknownError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UNKNOWN_ERROR" - } - }, - "ResourceNotFoundError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "RESOURCE_NOT_FOUND_ERROR" - } - }, - "SystemLimitError": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SYSTEM_LIMIT_ERROR" - } - } - } - }, - "com.amazonaws.honeycode#ErrorMessage": { - "type": "string" - }, - "com.amazonaws.honeycode#Fact": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 8192 - }, - "smithy.api#pattern": "^[\\s\\S]*$", - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.honeycode#FactList": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#Fact" - }, - "traits": { - "smithy.api#length": { - "min": 0, - "max": 220 - } - } - }, - "com.amazonaws.honeycode#FailedBatchItem": { - "type": "structure", - "members": { - "id": { - "target": "com.amazonaws.honeycode#BatchItemId", - "traits": { - "smithy.api#documentation": "

\n The id of the batch item that failed. This is the batch item id for the BatchCreateTableRows and\n BatchUpsertTableRows operations and the row id for the BatchUpdateTableRows and BatchDeleteTableRows\n operations.\n

", - "smithy.api#required": {} - } - }, - "errorMessage": { - "target": "com.amazonaws.honeycode#BatchErrorMessage", - "traits": { - "smithy.api#documentation": "

\n The error message that indicates why the batch item failed.\n

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n A single item in a batch that failed to perform the intended action because of an error preventing it from\n succeeding.\n

" - } - }, - "com.amazonaws.honeycode#FailedBatchItems": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#FailedBatchItem" - }, - "traits": { - "smithy.api#length": { - "min": 0, - "max": 100 - } - } - }, - "com.amazonaws.honeycode#Filter": { - "type": "structure", - "members": { - "formula": { - "target": "com.amazonaws.honeycode#Formula", - "traits": { - "smithy.api#documentation": "

\n A formula representing a filter function that returns zero or more matching rows from a table. Valid\n formulas in this field return a list of rows from a table. The most common ways of writing a formula to\n return a list of rows are to use the FindRow() or Filter() functions. Any other formula that returns zero or\n more rows is also acceptable. For example, you can use a formula that points to a cell that contains a\n filter function.\n

", - "smithy.api#required": {} - } - }, - "contextRowId": { - "target": "com.amazonaws.honeycode#RowId", - "traits": { - "smithy.api#documentation": "

\n The optional contextRowId attribute can be used to specify the row id of the context row if the filter\n formula contains unqualified references to table columns and needs a context row to evaluate them\n successfully.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n An object that represents a filter formula along with the id of the context row under which the filter\n function needs to evaluate.\n

" - } - }, - "com.amazonaws.honeycode#Format": { - "type": "enum", - "members": { - "Auto": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AUTO" - } - }, - "Number": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NUMBER" - } - }, - "Currency": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CURRENCY" - } - }, - "Date": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DATE" - } - }, - "Time": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TIME" - } - }, - "DateTime": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DATE_TIME" - } - }, - "Percentage": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PERCENTAGE" - } - }, - "Text": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TEXT" - } - }, - "Accounting": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACCOUNTING" - } - }, - "Contact": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CONTACT" - } - }, - "Rowlink": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ROWLINK" - } - }, - "Rowset": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ROWSET" - } - } - } - }, - "com.amazonaws.honeycode#FormattedValue": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 8192 - }, - "smithy.api#pattern": "^[\\s\\S]*$" - } - }, - "com.amazonaws.honeycode#FormattedValuesList": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#FormattedValue" - }, - "traits": { - "smithy.api#length": { - "min": 0, - "max": 220 - } - } - }, - "com.amazonaws.honeycode#Formula": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 8192 - }, - "smithy.api#pattern": "^=", - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.honeycode#GetScreenData": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#GetScreenDataRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#GetScreenDataResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The GetScreenData API allows retrieval of data from a screen in a Honeycode app.\n The API allows setting local variables in the screen to filter, sort or otherwise affect what will be\n displayed on the screen.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/screendata", - "code": 200 - } - } - }, - "com.amazonaws.honeycode#GetScreenDataRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook that contains the screen.

", - "smithy.api#required": {} - } - }, - "appId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the app that contains the screen.

", - "smithy.api#required": {} - } - }, - "screenId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the screen.

", - "smithy.api#required": {} - } - }, - "variables": { - "target": "com.amazonaws.honeycode#VariableValueMap", - "traits": { - "smithy.api#documentation": "

\n Variables are optional and are needed only if the screen requires them to render correctly. Variables are\n specified as a map where the key is the name of the variable as defined on the screen. The value is an\n object which currently has only one property, rawValue, which holds the value of the variable to be passed\n to the screen.\n

" - } - }, - "maxResults": { - "target": "com.amazonaws.honeycode#MaxResults", - "traits": { - "smithy.api#documentation": "

\n The number of results to be returned on a single page.\n Specify a number between 1 and 100. The maximum value is 100.\n

\n

\n This parameter is optional. If you don't specify this parameter, the default page size is 100.\n

" - } - }, - "nextToken": { - "target": "com.amazonaws.honeycode#PaginationToken", - "traits": { - "smithy.api#documentation": "

\n This parameter is optional. If a nextToken is not specified, the API returns the first page of data.\n

\n

\n Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API\n will throw ValidationException.\n

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#GetScreenDataResult": { - "type": "structure", - "members": { - "results": { - "target": "com.amazonaws.honeycode#ResultSetMap", - "traits": { - "smithy.api#documentation": "

A map of all the rows on the screen keyed by block name.

", - "smithy.api#required": {} - } - }, - "workbookCursor": { - "target": "com.amazonaws.honeycode#WorkbookCursor", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n Indicates the cursor of the workbook at which the data returned by this workbook is read. Workbook cursor\n keeps increasing with every update and the increments are not sequential.\n

", - "smithy.api#required": {} - } - }, - "nextToken": { - "target": "com.amazonaws.honeycode#PaginationToken", - "traits": { - "smithy.api#documentation": "

\n Provides the pagination token to load the next page if there are more results matching the request. If a\n pagination token is not present in the response, it means that all data matching the query has been loaded.\n

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#HasHeaderRow": { - "type": "boolean", - "traits": { - "smithy.api#default": false - } - }, - "com.amazonaws.honeycode#IgnoreEmptyRows": { - "type": "boolean", - "traits": { - "smithy.api#default": false - } - }, - "com.amazonaws.honeycode#ImportColumnMap": { - "type": "map", - "key": { - "target": "com.amazonaws.honeycode#ResourceId" - }, - "value": { - "target": "com.amazonaws.honeycode#SourceDataColumnProperties" - }, - "traits": { - "smithy.api#length": { - "min": 0, - "max": 100 - } - } - }, - "com.amazonaws.honeycode#ImportDataCharacterEncoding": { - "type": "enum", - "members": { - "UTF_8": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UTF-8" - } - }, - "US_ASCII": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "US-ASCII" - } - }, - "ISO_8859_1": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ISO-8859-1" - } - }, - "UTF_16BE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UTF-16BE" - } - }, - "UTF_16LE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UTF-16LE" - } - }, - "UTF_16": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UTF-16" - } - } - } - }, - "com.amazonaws.honeycode#ImportDataSource": { - "type": "structure", - "members": { - "dataSourceConfig": { - "target": "com.amazonaws.honeycode#ImportDataSourceConfig", - "traits": { - "smithy.api#documentation": "

The configuration parameters for the data source of the import

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

An object that has details about the source of the data that was submitted for import.

" - } - }, - "com.amazonaws.honeycode#ImportDataSourceConfig": { - "type": "structure", - "members": { - "dataSourceUrl": { - "target": "com.amazonaws.honeycode#SecureURL", - "traits": { - "smithy.api#documentation": "

\n The URL from which source data will be downloaded for the import request.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

\n An object that contains the configuration parameters for the data source of an import request.\n

" - } - }, - "com.amazonaws.honeycode#ImportJobSubmitter": { - "type": "structure", - "members": { - "email": { - "target": "com.amazonaws.honeycode#Email", - "traits": { - "smithy.api#documentation": "

The email id of the submitter of the import job, if available.

" - } - }, - "userArn": { - "target": "com.amazonaws.honeycode#AwsUserArn", - "traits": { - "smithy.api#documentation": "

The AWS user ARN of the submitter of the import job, if available.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

An object that contains the attributes of the submitter of the import job.

" - } - }, - "com.amazonaws.honeycode#ImportOptions": { - "type": "structure", - "members": { - "destinationOptions": { - "target": "com.amazonaws.honeycode#DestinationOptions", - "traits": { - "smithy.api#documentation": "

Options relating to the destination of the import request.

" - } - }, - "delimitedTextOptions": { - "target": "com.amazonaws.honeycode#DelimitedTextImportOptions", - "traits": { - "smithy.api#documentation": "

Options relating to parsing delimited text. Required if dataFormat is DELIMITED_TEXT.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

An object that contains the options specified by the sumitter of the import request.

" - } - }, - "com.amazonaws.honeycode#ImportSourceDataFormat": { - "type": "enum", - "members": { - "DelimitedText": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELIMITED_TEXT" - } - } - } - }, - "com.amazonaws.honeycode#InternalServerException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.honeycode#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

There were unexpected errors from the server.

", - "smithy.api#error": "server", - "smithy.api#httpError": 500 - } - }, - "com.amazonaws.honeycode#InvokeScreenAutomation": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#InvokeScreenAutomationRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#InvokeScreenAutomationResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#AutomationExecutionException" - }, - { - "target": "com.amazonaws.honeycode#AutomationExecutionTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceQuotaExceededException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The InvokeScreenAutomation API allows invoking an action defined in a screen in a Honeycode app.\n The API allows setting local variables, which can then be used in the automation being invoked.\n This allows automating the Honeycode app interactions to write, update or delete data in the workbook.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/workbooks/{workbookId}/apps/{appId}/screens/{screenId}/automations/{screenAutomationId}", - "code": 200 - } - } - }, - "com.amazonaws.honeycode#InvokeScreenAutomationRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook that contains the screen automation.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "appId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the app that contains the screen automation.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "screenId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the screen that contains the screen automation.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "screenAutomationId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the automation action to be performed.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "variables": { - "target": "com.amazonaws.honeycode#VariableValueMap", - "traits": { - "smithy.api#documentation": "

\n Variables are specified as a map where the key is the name of the variable as defined on the screen. The value is an\n object which currently has only one property, rawValue, which holds the value of the variable to be passed\n to the screen. Any variables defined in a screen are required to be passed in the call.\n

" - } - }, - "rowId": { - "target": "com.amazonaws.honeycode#RowId", - "traits": { - "smithy.api#documentation": "

\n The row ID for the automation if the automation is defined inside a block with source or list.\n

" - } - }, - "clientRequestToken": { - "target": "com.amazonaws.honeycode#ClientRequestToken", - "traits": { - "smithy.api#documentation": "

\n The request token for performing the automation action.\n Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error\n like a failed network connection, you can retry the call with the same request token. The service ensures\n that if the first call using that request token is successfully performed, the second call will return the\n response of the previous call rather than performing the action again.\n

\n

\n Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests\n spanning hours or days.\n

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#InvokeScreenAutomationResult": { - "type": "structure", - "members": { - "workbookCursor": { - "target": "com.amazonaws.honeycode#WorkbookCursor", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

The updated workbook cursor after performing the automation action.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#JobId": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 100 - }, - "smithy.api#pattern": "^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$" - } - }, - "com.amazonaws.honeycode#ListTableColumns": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#ListTableColumnsRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#ListTableColumnsResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The ListTableColumns API allows you to retrieve a list of all the columns in a table in a workbook.\n

", - "smithy.api#http": { - "method": "GET", - "uri": "/workbooks/{workbookId}/tables/{tableId}/columns", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "items": "tableColumns" - } - } - }, - "com.amazonaws.honeycode#ListTableColumnsRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook that contains the table whose columns are being retrieved.

\n

\n If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tableId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the table whose columns are being retrieved.

\n

\n If a table with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "nextToken": { - "target": "com.amazonaws.honeycode#PaginationToken", - "traits": { - "smithy.api#documentation": "

\n This parameter is optional. If a nextToken is not specified, the API returns the first page of data.\n

\n

\n Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API\n will throw ValidationException.\n

", - "smithy.api#httpQuery": "nextToken" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#ListTableColumnsResult": { - "type": "structure", - "members": { - "tableColumns": { - "target": "com.amazonaws.honeycode#TableColumns", - "traits": { - "smithy.api#documentation": "

\n The list of columns in the table.\n

", - "smithy.api#required": {} - } - }, - "nextToken": { - "target": "com.amazonaws.honeycode#PaginationToken", - "traits": { - "smithy.api#documentation": "

\n Provides the pagination token to load the next page if there are more results matching the request. If a\n pagination token is not present in the response, it means that all data matching the request has been\n loaded.\n

" - } - }, - "workbookCursor": { - "target": "com.amazonaws.honeycode#WorkbookCursor", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor\n keeps increasing with every update and the increments are not sequential.\n

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#ListTableRows": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#ListTableRowsRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#ListTableRowsResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The ListTableRows API allows you to retrieve a list of all the rows in a table in a workbook.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/workbooks/{workbookId}/tables/{tableId}/rows/list", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "items": "rows", - "pageSize": "maxResults" - } - } - }, - "com.amazonaws.honeycode#ListTableRowsRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook that contains the table whose rows are being retrieved.

\n

\n If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tableId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the table whose rows are being retrieved.

\n

\n If a table with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "rowIds": { - "target": "com.amazonaws.honeycode#RowIdList", - "traits": { - "smithy.api#documentation": "

\n This parameter is optional. If one or more row ids are specified in this list, then only the specified\n row ids are returned in the result. If no row ids are specified here, then all the rows in the table are\n returned.\n

" - } - }, - "maxResults": { - "target": "com.amazonaws.honeycode#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of rows to return in each page of the results.

" - } - }, - "nextToken": { - "target": "com.amazonaws.honeycode#PaginationToken", - "traits": { - "smithy.api#documentation": "

\n This parameter is optional. If a nextToken is not specified, the API returns the first page of data.\n

\n

\n Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API\n will throw ValidationException.\n

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#ListTableRowsResult": { - "type": "structure", - "members": { - "columnIds": { - "target": "com.amazonaws.honeycode#ResourceIds", - "traits": { - "smithy.api#documentation": "

\n The list of columns in the table whose row data is returned in the result.\n

", - "smithy.api#required": {} - } - }, - "rows": { - "target": "com.amazonaws.honeycode#TableRows", - "traits": { - "smithy.api#documentation": "

\n The list of rows in the table. Note that this result is paginated, so this list contains a maximum of 100\n rows.\n

", - "smithy.api#required": {} - } - }, - "rowIdsNotFound": { - "target": "com.amazonaws.honeycode#RowIdList", - "traits": { - "smithy.api#documentation": "

\n The list of row ids included in the request that were not found in the table.\n

" - } - }, - "nextToken": { - "target": "com.amazonaws.honeycode#PaginationToken", - "traits": { - "smithy.api#documentation": "

\n Provides the pagination token to load the next page if there are more results matching the request. If a\n pagination token is not present in the response, it means that all data matching the request has been\n loaded.\n

" - } - }, - "workbookCursor": { - "target": "com.amazonaws.honeycode#WorkbookCursor", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor\n keeps increasing with every update and the increments are not sequential.\n

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#ListTables": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#ListTablesRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#ListTablesResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The ListTables API allows you to retrieve a list of all the tables in a workbook.\n

", - "smithy.api#http": { - "method": "GET", - "uri": "/workbooks/{workbookId}/tables", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "items": "tables", - "pageSize": "maxResults" - } - } - }, - "com.amazonaws.honeycode#ListTablesRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook whose tables are being retrieved.

\n

\n If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "maxResults": { - "target": "com.amazonaws.honeycode#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of tables to return in each page of the results.

", - "smithy.api#httpQuery": "maxResults" - } - }, - "nextToken": { - "target": "com.amazonaws.honeycode#PaginationToken", - "traits": { - "smithy.api#documentation": "

\n This parameter is optional. If a nextToken is not specified, the API returns the first page of data.\n

\n

\n Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API\n will throw ValidationException.\n

", - "smithy.api#httpQuery": "nextToken" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#ListTablesResult": { - "type": "structure", - "members": { - "tables": { - "target": "com.amazonaws.honeycode#Tables", - "traits": { - "smithy.api#documentation": "

\n The list of tables in the workbook.\n

", - "smithy.api#required": {} - } - }, - "nextToken": { - "target": "com.amazonaws.honeycode#PaginationToken", - "traits": { - "smithy.api#documentation": "

\n Provides the pagination token to load the next page if there are more results matching the request. If a\n pagination token is not present in the response, it means that all data matching the request has been\n loaded.\n

" - } - }, - "workbookCursor": { - "target": "com.amazonaws.honeycode#WorkbookCursor", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor\n keeps increasing with every update and the increments are not sequential.\n

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#ListTagsForResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#ListTagsForResourceRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#ListTagsForResourceResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The ListTagsForResource API allows you to return a resource's tags.\n

", - "smithy.api#http": { - "method": "GET", - "uri": "/tags/{resourceArn}", - "code": 200 - } - } - }, - "com.amazonaws.honeycode#ListTagsForResourceRequest": { - "type": "structure", - "members": { - "resourceArn": { - "target": "com.amazonaws.honeycode#ResourceArn", - "traits": { - "smithy.api#documentation": "

The resource's Amazon Resource Name (ARN).

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#ListTagsForResourceResult": { - "type": "structure", - "members": { - "tags": { - "target": "com.amazonaws.honeycode#TagsMap", - "traits": { - "smithy.api#documentation": "

The resource's tags.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#MaxResults": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1, - "max": 100 - } - } - }, - "com.amazonaws.honeycode#Name": { - "type": "string", - "traits": { - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.honeycode#PaginationToken": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 1024 - }, - "smithy.api#pattern": "^(?!\\s*$).+$" - } - }, - "com.amazonaws.honeycode#QueryTableRows": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#QueryTableRowsRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#QueryTableRowsResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The QueryTableRows API allows you to use a filter formula to query for specific rows in a table.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/workbooks/{workbookId}/tables/{tableId}/rows/query", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "items": "rows", - "pageSize": "maxResults" - } - } - }, - "com.amazonaws.honeycode#QueryTableRowsRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook whose table rows are being queried.

\n

\n If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tableId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the table whose rows are being queried.

\n

\n If a table with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "filterFormula": { - "target": "com.amazonaws.honeycode#Filter", - "traits": { - "smithy.api#documentation": "

An object that represents a filter formula along with the id of the context row under which the filter\n function needs to evaluate.

", - "smithy.api#required": {} - } - }, - "maxResults": { - "target": "com.amazonaws.honeycode#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of rows to return in each page of the results.

" - } - }, - "nextToken": { - "target": "com.amazonaws.honeycode#PaginationToken", - "traits": { - "smithy.api#documentation": "

\n This parameter is optional. If a nextToken is not specified, the API returns the first page of data.\n

\n

\n Pagination tokens expire after 1 hour. If you use a token that was returned more than an hour back, the API\n will throw ValidationException.\n

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#QueryTableRowsResult": { - "type": "structure", - "members": { - "columnIds": { - "target": "com.amazonaws.honeycode#ResourceIds", - "traits": { - "smithy.api#documentation": "

\n The list of columns in the table whose row data is returned in the result.\n

", - "smithy.api#required": {} - } - }, - "rows": { - "target": "com.amazonaws.honeycode#TableRows", - "traits": { - "smithy.api#documentation": "

\n The list of rows in the table that match the query filter.\n

", - "smithy.api#required": {} - } - }, - "nextToken": { - "target": "com.amazonaws.honeycode#PaginationToken", - "traits": { - "smithy.api#documentation": "

\n Provides the pagination token to load the next page if there are more results matching the request. If a\n pagination token is not present in the response, it means that all data matching the request has been\n loaded.\n

" - } - }, - "workbookCursor": { - "target": "com.amazonaws.honeycode#WorkbookCursor", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "

\n Indicates the cursor of the workbook at which the data returned by this request is read. Workbook cursor\n keeps increasing with every update and the increments are not sequential.\n

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#RawValue": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 32767 - }, - "smithy.api#pattern": "^[\\s\\S]*$" - } - }, - "com.amazonaws.honeycode#RequestTimeoutException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.honeycode#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

The request timed out.

", - "smithy.api#error": "server", - "smithy.api#httpError": 504 - } - }, - "com.amazonaws.honeycode#ResourceArn": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 256 - }, - "smithy.api#pattern": "^arn:aws:honeycode:.+:[0-9]{12}:.+:.+$" - } - }, - "com.amazonaws.honeycode#ResourceId": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 36, - "max": 36 - }, - "smithy.api#pattern": "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" - } - }, - "com.amazonaws.honeycode#ResourceIds": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#ResourceId" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 100 - } - } - }, - "com.amazonaws.honeycode#ResourceNotFoundException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.honeycode#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

A Workbook, Table, App, Screen or Screen Automation was not found with the given ID.

", - "smithy.api#error": "client", - "smithy.api#httpError": 404 - } - }, - "com.amazonaws.honeycode#ResultHeader": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#ColumnMetadata" - } - }, - "com.amazonaws.honeycode#ResultRow": { - "type": "structure", - "members": { - "rowId": { - "target": "com.amazonaws.honeycode#RowId", - "traits": { - "smithy.api#documentation": "

The ID for a particular row.

" - } - }, - "dataItems": { - "target": "com.amazonaws.honeycode#DataItems", - "traits": { - "smithy.api#documentation": "

List of all the data cells in a row.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

A single row in the ResultSet.

" - } - }, - "com.amazonaws.honeycode#ResultRows": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#ResultRow" - } - }, - "com.amazonaws.honeycode#ResultSet": { - "type": "structure", - "members": { - "headers": { - "target": "com.amazonaws.honeycode#ResultHeader", - "traits": { - "smithy.api#documentation": "

\n List of headers for all the data cells in the block. The header identifies the name and default format of\n the data cell. Data cells appear in the same order in all rows as defined in the header. The names and\n formats are not repeated in the rows. If a particular row does not have a value for a data cell, a blank\n value is used.\n

\n

\n For example, a task list that displays the task name, due date and assigned person might have headers\n [ { \"name\": \"Task Name\"}, {\"name\": \"Due Date\", \"format\": \"DATE\"}, {\"name\": \"Assigned\", \"format\": \"CONTACT\"} ].\n Every row in the result will have the task name as the first item, due date as the second item and assigned\n person as the third item. If a particular task does not have a due date, that row will still have a blank\n value in the second element and the assigned person will still be in the third element.\n

", - "smithy.api#required": {} - } - }, - "rows": { - "target": "com.amazonaws.honeycode#ResultRows", - "traits": { - "smithy.api#documentation": "

\n List of rows returned by the request. Each row has a row Id and a list of data cells in that row. The data\n cells will be present in the same order as they are defined in the header.\n

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n ResultSet contains the results of the request for a single block or list defined on the screen.\n

" - } - }, - "com.amazonaws.honeycode#ResultSetMap": { - "type": "map", - "key": { - "target": "com.amazonaws.honeycode#Name" - }, - "value": { - "target": "com.amazonaws.honeycode#ResultSet" - } - }, - "com.amazonaws.honeycode#RowDataInput": { - "type": "map", - "key": { - "target": "com.amazonaws.honeycode#ResourceId" - }, - "value": { - "target": "com.amazonaws.honeycode#CellInput" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 100 - } - } - }, - "com.amazonaws.honeycode#RowId": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 77, - "max": 77 - }, - "smithy.api#pattern": "^row:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\\/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" - } - }, - "com.amazonaws.honeycode#RowIdList": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#RowId" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 100 - } - } - }, - "com.amazonaws.honeycode#SecureURL": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 8000 - }, - "smithy.api#pattern": "^https:\\/\\/[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$", - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.honeycode#ServiceQuotaExceededException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.honeycode#ErrorMessage", - "traits": { - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n The request caused service quota to be breached.\n

", - "smithy.api#error": "client", - "smithy.api#httpError": 402 - } - }, - "com.amazonaws.honeycode#ServiceUnavailableException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.honeycode#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

Remote service is unreachable.

", - "smithy.api#error": "server", - "smithy.api#httpError": 503 - } - }, - "com.amazonaws.honeycode#SheetsPublicApiService": { - "type": "service", - "version": "2020-03-01", - "operations": [ - { - "target": "com.amazonaws.honeycode#BatchCreateTableRows" - }, - { - "target": "com.amazonaws.honeycode#BatchDeleteTableRows" - }, - { - "target": "com.amazonaws.honeycode#BatchUpdateTableRows" - }, - { - "target": "com.amazonaws.honeycode#BatchUpsertTableRows" - }, - { - "target": "com.amazonaws.honeycode#DescribeTableDataImportJob" - }, - { - "target": "com.amazonaws.honeycode#GetScreenData" - }, - { - "target": "com.amazonaws.honeycode#InvokeScreenAutomation" - }, - { - "target": "com.amazonaws.honeycode#ListTableColumns" - }, - { - "target": "com.amazonaws.honeycode#ListTableRows" - }, - { - "target": "com.amazonaws.honeycode#ListTables" - }, - { - "target": "com.amazonaws.honeycode#ListTagsForResource" - }, - { - "target": "com.amazonaws.honeycode#QueryTableRows" - }, - { - "target": "com.amazonaws.honeycode#StartTableDataImportJob" - }, - { - "target": "com.amazonaws.honeycode#TagResource" - }, - { - "target": "com.amazonaws.honeycode#UntagResource" - } - ], - "traits": { - "aws.api#service": { - "sdkId": "Honeycode", - "arnNamespace": "honeycode", - "cloudFormationName": "Honeycode", - "cloudTrailEventSource": "honeycode.amazonaws.com", - "endpointPrefix": "honeycode" - }, - "aws.auth#sigv4": { - "name": "honeycode" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

\n Amazon Honeycode is a fully managed service that allows you to quickly build mobile and web apps for teams—without\n programming. Build Honeycode apps for managing almost anything, like projects, customers, operations, approvals,\n resources, and even your team.\n

", - "smithy.api#title": "Amazon Honeycode", - "smithy.rules#endpointRuleSet": { - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://honeycode-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - }, - true - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://honeycode-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://honeycode.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://honeycode.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] - }, - "smithy.rules#endpointTests": { - "testCases": [ - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode.us-west-2.amazonaws.com" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://honeycode-fips.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://honeycode.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://honeycode-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://honeycode.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://honeycode-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://honeycode.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://honeycode.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "Missing region", - "expect": { - "error": "Invalid Configuration: Missing Region" - } - } - ], - "version": "1.0" - } - } - }, - "com.amazonaws.honeycode#SourceDataColumnIndex": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1 - } - } - }, - "com.amazonaws.honeycode#SourceDataColumnProperties": { - "type": "structure", - "members": { - "columnIndex": { - "target": "com.amazonaws.honeycode#SourceDataColumnIndex", - "traits": { - "smithy.api#documentation": "

The index of the column in the input file.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

An object that contains the properties for importing data to a specific column in a table.

" - } - }, - "com.amazonaws.honeycode#StartTableDataImportJob": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#StartTableDataImportJobRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#StartTableDataImportJobResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceQuotaExceededException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The StartTableDataImportJob API allows you to start an import job on a table. This API will only return\n the id of the job that was started. To find out the status of the import request, you need to call the\n DescribeTableDataImportJob API.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/workbooks/{workbookId}/tables/{destinationTableId}/import", - "code": 200 - } - } - }, - "com.amazonaws.honeycode#StartTableDataImportJobRequest": { - "type": "structure", - "members": { - "workbookId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the workbook where the rows are being imported.

\n

\n If a workbook with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "dataSource": { - "target": "com.amazonaws.honeycode#ImportDataSource", - "traits": { - "smithy.api#documentation": "

\n The source of the data that is being imported. The size of source must be no larger than 100 MB.\n Source must have no more than 100,000 cells and no more than 1,000 rows.\n

", - "smithy.api#required": {} - } - }, - "dataFormat": { - "target": "com.amazonaws.honeycode#ImportSourceDataFormat", - "traits": { - "smithy.api#documentation": "

\n The format of the data that is being imported. Currently the only option supported is \"DELIMITED_TEXT\".\n

", - "smithy.api#required": {} - } - }, - "destinationTableId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The ID of the table where the rows are being imported.

\n

\n If a table with the specified id could not be found, this API throws ResourceNotFoundException.\n

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "importOptions": { - "target": "com.amazonaws.honeycode#ImportOptions", - "traits": { - "smithy.api#documentation": "

\n The options for customizing this import request.\n

", - "smithy.api#required": {} - } - }, - "clientRequestToken": { - "target": "com.amazonaws.honeycode#ClientRequestToken", - "traits": { - "smithy.api#documentation": "

\n The request token for performing the update action.\n Request tokens help to identify duplicate requests. If a call times out or fails due to a transient error\n like a failed network connection, you can retry the call with the same request token. The service ensures\n that if the first call using that request token is successfully performed, the second call will not perform\n the action again.\n

\n

\n Note that request tokens are valid only for a few minutes. You cannot use request tokens to dedupe requests\n spanning hours or days.\n

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#StartTableDataImportJobResult": { - "type": "structure", - "members": { - "jobId": { - "target": "com.amazonaws.honeycode#JobId", - "traits": { - "smithy.api#documentation": "

\n The id that is assigned to this import job. Future requests to find out the status of this import job\n need to send this id in the appropriate parameter in the request.\n

", - "smithy.api#required": {} - } - }, - "jobStatus": { - "target": "com.amazonaws.honeycode#TableDataImportJobStatus", - "traits": { - "smithy.api#documentation": "

\n The status of the import job immediately after submitting the request.\n

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#Table": { - "type": "structure", - "members": { - "tableId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The id of the table.

" - } - }, - "tableName": { - "target": "com.amazonaws.honeycode#TableName", - "traits": { - "smithy.api#documentation": "

The name of the table.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

An object representing the properties of a table in a workbook.

" - } - }, - "com.amazonaws.honeycode#TableColumn": { - "type": "structure", - "members": { - "tableColumnId": { - "target": "com.amazonaws.honeycode#ResourceId", - "traits": { - "smithy.api#documentation": "

The id of the column in the table.

" - } - }, - "tableColumnName": { - "target": "com.amazonaws.honeycode#TableColumnName", - "traits": { - "smithy.api#documentation": "

The name of the column in the table.

" - } - }, - "format": { - "target": "com.amazonaws.honeycode#Format", - "traits": { - "smithy.api#documentation": "

\n The column level format that is applied in the table. An empty value in this field means that the\n column format is the default value 'AUTO'.\n

" - } - } - }, - "traits": { - "smithy.api#documentation": "

An object that contains attributes about a single column in a table

" - } - }, - "com.amazonaws.honeycode#TableColumnName": { - "type": "string" - }, - "com.amazonaws.honeycode#TableColumns": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#TableColumn" - } - }, - "com.amazonaws.honeycode#TableDataImportJobMessage": { - "type": "string" - }, - "com.amazonaws.honeycode#TableDataImportJobMetadata": { - "type": "structure", - "members": { - "submitter": { - "target": "com.amazonaws.honeycode#ImportJobSubmitter", - "traits": { - "smithy.api#documentation": "

Details about the submitter of the import request.

", - "smithy.api#required": {} - } - }, - "submitTime": { - "target": "com.amazonaws.honeycode#TimestampInMillis", - "traits": { - "smithy.api#documentation": "

The timestamp when the job was submitted for import.

", - "smithy.api#required": {} - } - }, - "importOptions": { - "target": "com.amazonaws.honeycode#ImportOptions", - "traits": { - "smithy.api#documentation": "

The options that was specified at the time of submitting the import request.

", - "smithy.api#required": {} - } - }, - "dataSource": { - "target": "com.amazonaws.honeycode#ImportDataSource", - "traits": { - "smithy.api#documentation": "

The source of the data that was submitted for import.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

The metadata associated with the table data import job that was submitted.

" - } - }, - "com.amazonaws.honeycode#TableDataImportJobStatus": { - "type": "enum", - "members": { - "Submitted": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SUBMITTED" - } - }, - "InProgress": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IN_PROGRESS" - } - }, - "Completed": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "COMPLETED" - } - }, - "Failed": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FAILED" - } - } - } - }, - "com.amazonaws.honeycode#TableName": { - "type": "string" - }, - "com.amazonaws.honeycode#TableRow": { - "type": "structure", - "members": { - "rowId": { - "target": "com.amazonaws.honeycode#RowId", - "traits": { - "smithy.api#documentation": "

The id of the row in the table.

", - "smithy.api#required": {} - } - }, - "cells": { - "target": "com.amazonaws.honeycode#Cells", - "traits": { - "smithy.api#documentation": "

A list of cells in the table row. The cells appear in the same order as the columns of the table.\n

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

An object that contains attributes about a single row in a table

" - } - }, - "com.amazonaws.honeycode#TableRows": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#TableRow" - } - }, - "com.amazonaws.honeycode#Tables": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#Table" - } - }, - "com.amazonaws.honeycode#TagKey": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 100 - }, - "smithy.api#pattern": "^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$" - } - }, - "com.amazonaws.honeycode#TagKeysList": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#TagKey" - }, - "traits": { - "smithy.api#documentation": "

A list of tag keys

", - "smithy.api#length": { - "min": 0, - "max": 100 - } - } - }, - "com.amazonaws.honeycode#TagResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#TagResourceRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#TagResourceResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The TagResource API allows you to add tags to an ARN-able resource. Resource includes workbook, table,\n screen and screen-automation.\n

", - "smithy.api#http": { - "method": "POST", - "uri": "/tags/{resourceArn}", - "code": 200 - } - } - }, - "com.amazonaws.honeycode#TagResourceRequest": { - "type": "structure", - "members": { - "resourceArn": { - "target": "com.amazonaws.honeycode#ResourceArn", - "traits": { - "smithy.api#documentation": "

The resource's Amazon Resource Name (ARN).

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tags": { - "target": "com.amazonaws.honeycode#TagsMap", - "traits": { - "smithy.api#documentation": "

A list of tags to apply to the resource.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#TagResourceResult": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#TagValue": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 100 - }, - "smithy.api#pattern": "^[^\\n\\r\\x00\\x08\\x0B\\x0C\\x0E\\x1F]*$" - } - }, - "com.amazonaws.honeycode#TagsMap": { - "type": "map", - "key": { - "target": "com.amazonaws.honeycode#TagKey" - }, - "value": { - "target": "com.amazonaws.honeycode#TagValue" - }, - "traits": { - "smithy.api#documentation": "

A string to string map representing tags

", - "smithy.api#length": { - "min": 0, - "max": 100 - } - } - }, - "com.amazonaws.honeycode#ThrottlingException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.honeycode#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

Tps(transactions per second) rate reached.

", - "smithy.api#error": "client", - "smithy.api#httpError": 429 - } - }, - "com.amazonaws.honeycode#TimestampInMillis": { - "type": "timestamp" - }, - "com.amazonaws.honeycode#UntagResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.honeycode#UntagResourceRequest" - }, - "output": { - "target": "com.amazonaws.honeycode#UntagResourceResult" - }, - "errors": [ - { - "target": "com.amazonaws.honeycode#AccessDeniedException" - }, - { - "target": "com.amazonaws.honeycode#InternalServerException" - }, - { - "target": "com.amazonaws.honeycode#RequestTimeoutException" - }, - { - "target": "com.amazonaws.honeycode#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.honeycode#ServiceUnavailableException" - }, - { - "target": "com.amazonaws.honeycode#ThrottlingException" - }, - { - "target": "com.amazonaws.honeycode#ValidationException" - } - ], - "traits": { - "smithy.api#documentation": "

\n The UntagResource API allows you to removes tags from an ARN-able resource. Resource includes workbook, table,\n screen and screen-automation.\n

", - "smithy.api#http": { - "method": "DELETE", - "uri": "/tags/{resourceArn}", - "code": 200 - } - } - }, - "com.amazonaws.honeycode#UntagResourceRequest": { - "type": "structure", - "members": { - "resourceArn": { - "target": "com.amazonaws.honeycode#ResourceArn", - "traits": { - "smithy.api#documentation": "

The resource's Amazon Resource Name (ARN).

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "tagKeys": { - "target": "com.amazonaws.honeycode#TagKeysList", - "traits": { - "smithy.api#documentation": "

A list of tag keys to remove from the resource.

", - "smithy.api#httpQuery": "tagKeys", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.honeycode#UntagResourceResult": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.honeycode#UpdateRowData": { - "type": "structure", - "members": { - "rowId": { - "target": "com.amazonaws.honeycode#RowId", - "traits": { - "smithy.api#documentation": "

\n The id of the row that needs to be updated.\n

", - "smithy.api#required": {} - } - }, - "cellsToUpdate": { - "target": "com.amazonaws.honeycode#RowDataInput", - "traits": { - "smithy.api#documentation": "

\n A map representing the cells to update in the given row. The key is the column id of the\n cell and the value is the CellInput object that represents the data to set in that cell.\n

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Data needed to create a single row in a table as part of the BatchCreateTableRows request.\n

" - } - }, - "com.amazonaws.honeycode#UpdateRowDataList": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#UpdateRowData" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 100 - } - } - }, - "com.amazonaws.honeycode#UpsertAction": { - "type": "enum", - "members": { - "Updated": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UPDATED" - } - }, - "Appended": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "APPENDED" - } - } - } - }, - "com.amazonaws.honeycode#UpsertRowData": { - "type": "structure", - "members": { - "batchItemId": { - "target": "com.amazonaws.honeycode#BatchItemId", - "traits": { - "smithy.api#documentation": "

\n An external identifier that represents a single item in the request that is being upserted as part of the\n BatchUpsertTableRows request. This can be any string that you can use to identify the item in the request.\n The BatchUpsertTableRows API puts the batch item id in the results to allow you to link data in the\n request to data in the results.\n

", - "smithy.api#required": {} - } - }, - "filter": { - "target": "com.amazonaws.honeycode#Filter", - "traits": { - "smithy.api#documentation": "

\n The filter formula to use to find existing matching rows to update. The formula needs to return zero or more\n rows. If the formula returns 0 rows, then a new row will be appended in the target table. If the formula\n returns one or more rows, then the returned rows will be updated.\n

\n

\n Note that the filter formula needs to return rows from the target table for the upsert operation to succeed.\n If the filter formula has a syntax error or it doesn't evaluate to zero or more rows in the target table\n for any one item in the input list, then the entire BatchUpsertTableRows request fails and no updates are\n made to the table.\n

", - "smithy.api#required": {} - } - }, - "cellsToUpdate": { - "target": "com.amazonaws.honeycode#RowDataInput", - "traits": { - "smithy.api#documentation": "

\n A map representing the cells to update for the matching rows or an appended row. The key is the column id\n of the cell and the value is the CellInput object that represents the data to set in that cell.\n

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Data needed to upsert rows in a table as part of a single item in the BatchUpsertTableRows request.\n

" - } - }, - "com.amazonaws.honeycode#UpsertRowDataList": { - "type": "list", - "member": { - "target": "com.amazonaws.honeycode#UpsertRowData" - } - }, - "com.amazonaws.honeycode#UpsertRowsResult": { - "type": "structure", - "members": { - "rowIds": { - "target": "com.amazonaws.honeycode#RowIdList", - "traits": { - "smithy.api#documentation": "

\n The list of row ids that were changed as part of an upsert row operation. If the upsert resulted in an\n update, this list could potentially contain multiple rows that matched the filter and hence got updated.\n If the upsert resulted in an append, this list would only have the single row that was appended.\n

", - "smithy.api#required": {} - } - }, - "upsertAction": { - "target": "com.amazonaws.honeycode#UpsertAction", - "traits": { - "smithy.api#documentation": "

\n The result of the upsert action.\n

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n An object that represents the result of a single upsert row request.\n

" - } - }, - "com.amazonaws.honeycode#UpsertRowsResultMap": { - "type": "map", - "key": { - "target": "com.amazonaws.honeycode#BatchItemId" - }, - "value": { - "target": "com.amazonaws.honeycode#UpsertRowsResult" - } - }, - "com.amazonaws.honeycode#ValidationException": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.honeycode#ErrorMessage", - "traits": { - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

\n Request is invalid. The message in the response contains details on why the request is invalid.\n

", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.honeycode#VariableName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 255 - }, - "smithy.api#pattern": "^(?!\\s*$).+$", - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.honeycode#VariableValue": { - "type": "structure", - "members": { - "rawValue": { - "target": "com.amazonaws.honeycode#RawValue", - "traits": { - "smithy.api#documentation": "

Raw value of the variable.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

The input variables to the app to be used by the InvokeScreenAutomation action request.

", - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.honeycode#VariableValueMap": { - "type": "map", - "key": { - "target": "com.amazonaws.honeycode#VariableName" - }, - "value": { - "target": "com.amazonaws.honeycode#VariableValue" - }, - "traits": { - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.honeycode#WorkbookCursor": { - "type": "long", - "traits": { - "smithy.api#default": 0 - } - } - } -} diff --git a/models/imagebuilder.json b/models/imagebuilder.json index 81ac653883..fead5ea44b 100644 --- a/models/imagebuilder.json +++ b/models/imagebuilder.json @@ -547,7 +547,7 @@ "com.amazonaws.imagebuilder#ComponentBuildVersionArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" + "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" } }, "com.amazonaws.imagebuilder#ComponentConfiguration": { @@ -926,13 +926,13 @@ "com.amazonaws.imagebuilder#ComponentVersionArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" + "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" } }, "com.amazonaws.imagebuilder#ComponentVersionArnOrBuildVersionArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):component/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" + "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):component/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" } }, "com.amazonaws.imagebuilder#ComponentVersionList": { @@ -5455,13 +5455,13 @@ "com.amazonaws.imagebuilder#ImageBuildVersionArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" + "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" } }, "com.amazonaws.imagebuilder#ImageBuilderArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$" + "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):(?:image-recipe|container-recipe|infrastructure-configuration|distribution-configuration|component|image|image-pipeline|lifecycle-policy|workflow\\/(?:build|test|distribution))/[a-z0-9-_]+(?:/(?:(?:x|[0-9]+)\\.(?:x|[0-9]+)\\.(?:x|[0-9]+))(?:/[0-9]+)?)?$" } }, "com.amazonaws.imagebuilder#ImagePackage": { @@ -6424,13 +6424,13 @@ "com.amazonaws.imagebuilder#ImageVersionArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" + "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" } }, "com.amazonaws.imagebuilder#ImageVersionArnOrBuildVersionArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws):image/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" + "smithy.api#pattern": "^arn:aws[^:]*:imagebuilder:[^:]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):image/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" } }, "com.amazonaws.imagebuilder#ImageVersionList": { @@ -12676,7 +12676,7 @@ "min": 0, "max": 1024 }, - "smithy.api#pattern": "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" + "smithy.api#pattern": "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+$" } }, "com.amazonaws.imagebuilder#WorkflowConfiguration": { @@ -12882,7 +12882,7 @@ "com.amazonaws.imagebuilder#WorkflowNameArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/x\\.x\\.x$" + "smithy.api#pattern": "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/x\\.x\\.x$" } }, "com.amazonaws.imagebuilder#WorkflowParameter": { @@ -13464,13 +13464,13 @@ "com.amazonaws.imagebuilder#WorkflowVersionArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" + "smithy.api#pattern": "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/[0-9]+\\.[0-9]+\\.[0-9]+$" } }, "com.amazonaws.imagebuilder#WorkflowVersionArnOrBuildVersionArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" + "smithy.api#pattern": "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:(?:([0-9]+|x)\\.([0-9]+|x)\\.([0-9]+|x))|(?:[0-9]+\\.[0-9]+\\.[0-9]+/[0-9]+))$" } }, "com.amazonaws.imagebuilder#WorkflowVersionList": { @@ -13482,7 +13482,7 @@ "com.amazonaws.imagebuilder#WorkflowWildcardVersionArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:[0-9]+|x)\\.(?:[0-9]+|x)\\.(?:[0-9]+|x)$" + "smithy.api#pattern": "^arn:aws(?:-[a-z]+)*:imagebuilder:[a-z]{2,}(?:-[a-z]+)+-[0-9]+:(?:[0-9]{12}|aws(?:-[a-z-]+)?):workflow/(build|test|distribution)/[a-z0-9-_]+/(?:[0-9]+|x)\\.(?:[0-9]+|x)\\.(?:[0-9]+|x)$" } }, "com.amazonaws.imagebuilder#imagebuilder": { diff --git a/models/iot-wireless.json b/models/iot-wireless.json index e703a56407..92d58ae883 100644 --- a/models/iot-wireless.json +++ b/models/iot-wireless.json @@ -4479,6 +4479,12 @@ "traits": { "smithy.api#enumValue": "Successful" } + }, + "Device_exist_in_conflict_fuota_task": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Device_exist_in_conflict_fuota_task" + } } }, "traits": { @@ -10159,6 +10165,18 @@ "smithy.api#enumValue": "DeviceSNR" } }, + "DeviceRoamingRSSI": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DeviceRoamingRSSI" + } + }, + "DeviceRoamingSNR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DeviceRoamingSNR" + } + }, "DeviceUplinkCount": { "target": "smithy.api#Unit", "traits": { diff --git a/models/iotfleetwise.json b/models/iotfleetwise.json index d45c4923c0..f0bd3f5e59 100644 --- a/models/iotfleetwise.json +++ b/models/iotfleetwise.json @@ -847,7 +847,7 @@ "signalCatalogArn": { "target": "com.amazonaws.iotfleetwise#arn", "traits": { - "smithy.api#documentation": "

(Optional) The Amazon Resource Name (ARN) of the signal catalog to associate with the campaign.\n

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the signal catalog to associate with the campaign.\n

", "smithy.api#required": {} } }, @@ -5560,6 +5560,20 @@ "smithy.api#httpQuery": "modelManifestArn" } }, + "attributeNames": { + "target": "com.amazonaws.iotfleetwise#attributeNamesList", + "traits": { + "smithy.api#documentation": "

The fully qualified names of the attributes. For example, the fully qualified name of an attribute might be Vehicle.Body.Engine.Type.

", + "smithy.api#httpQuery": "attributeNames" + } + }, + "attributeValues": { + "target": "com.amazonaws.iotfleetwise#attributeValuesList", + "traits": { + "smithy.api#documentation": "

Static information about a vehicle attribute value in string format. For example:

\n

\n \"1.3 L R2\"\n

", + "smithy.api#httpQuery": "attributeValues" + } + }, "nextToken": { "target": "com.amazonaws.iotfleetwise#nextToken", "traits": { @@ -8912,9 +8926,33 @@ "smithy.api#pattern": "^[a-zA-Z0-9_.-]+$" } }, + "com.amazonaws.iotfleetwise#attributeNamesList": { + "type": "list", + "member": { + "target": "com.amazonaws.iotfleetwise#attributeName" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, "com.amazonaws.iotfleetwise#attributeValue": { "type": "string" }, + "com.amazonaws.iotfleetwise#attributeValuesList": { + "type": "list", + "member": { + "target": "com.amazonaws.iotfleetwise#attributeValue" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, "com.amazonaws.iotfleetwise#attributesMap": { "type": "map", "key": { diff --git a/models/iottwinmaker.json b/models/iottwinmaker.json index 020fd0f200..5e190b05ff 100644 --- a/models/iottwinmaker.json +++ b/models/iottwinmaker.json @@ -6782,6 +6782,10 @@ { "name": "CREATE", "value": "CREATE" + }, + { + "name": "RESET_VALUE", + "value": "RESET_VALUE" } ] } diff --git a/models/ivs-realtime.json b/models/ivs-realtime.json index 8f144a2e2c..234a738b0e 100644 --- a/models/ivs-realtime.json +++ b/models/ivs-realtime.json @@ -36,6 +36,9 @@ { "target": "com.amazonaws.ivsrealtime#DeleteEncoderConfiguration" }, + { + "target": "com.amazonaws.ivsrealtime#DeletePublicKey" + }, { "target": "com.amazonaws.ivsrealtime#DeleteStage" }, @@ -54,6 +57,9 @@ { "target": "com.amazonaws.ivsrealtime#GetParticipant" }, + { + "target": "com.amazonaws.ivsrealtime#GetPublicKey" + }, { "target": "com.amazonaws.ivsrealtime#GetStage" }, @@ -63,6 +69,9 @@ { "target": "com.amazonaws.ivsrealtime#GetStorageConfiguration" }, + { + "target": "com.amazonaws.ivsrealtime#ImportPublicKey" + }, { "target": "com.amazonaws.ivsrealtime#ListCompositions" }, @@ -75,6 +84,9 @@ { "target": "com.amazonaws.ivsrealtime#ListParticipants" }, + { + "target": "com.amazonaws.ivsrealtime#ListPublicKeys" + }, { "target": "com.amazonaws.ivsrealtime#ListStages" }, @@ -134,7 +146,7 @@ "date" ] }, - "smithy.api#documentation": "

\n Introduction\n

\n

The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP \n\t API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, \n\t including errors.\n

\n

Terminology:

\n
    \n
  • \n

    A stage is a virtual space where participants can exchange video in real time.

    \n
  • \n
  • \n

    A participant token is a token that authenticates a participant when they join a stage.

    \n
  • \n
  • \n

    A participant object represents participants (people) in the stage and\n contains information about them. When a token is created, it includes a participant ID;\n when a participant uses that token to join a stage, the participant is associated with\n that participant ID. There is a 1:1 mapping between participant tokens and\n participants.

    \n
  • \n
  • \n

    Server-side composition: The composition process composites participants\n of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels).\n Composition endpoints support this process.

    \n
  • \n
  • \n

    Server-side composition: A composition controls the look of the outputs,\n including how participants are positioned in the video.

    \n
  • \n
\n

\n Resources\n

\n

The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS Real-Time Streaming):

\n
    \n
  • \n

    \n Stage — A stage is a virtual space where participants can exchange video in real time.

    \n
  • \n
\n

\n Tagging\n

\n

A tag is a metadata label that you assign to an AWS resource. A tag\n comprises a key and a value, both set by you. For\n example, you might set a tag as topic:nature to label a particular video\n category. See Tagging AWS Resources for more information, including restrictions that apply to\n tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific\n constraints beyond what is documented there.

\n

Tags can help you identify and organize your AWS resources. For example, you can use the\n same tag for different resources to indicate that they are related. You can also use tags to\n manage access (see Access Tags).

\n

The Amazon IVS real-time API has these tag-related endpoints: TagResource, UntagResource, and\n ListTagsForResource. The following resource supports tagging: Stage.

\n

At most 50 tags can be applied to a resource.

\n

\n Stages Endpoints\n

\n
    \n
  • \n

    \n CreateParticipantToken — Creates an additional token for a specified stage. This can be done after stage creation or when tokens expire.

    \n
  • \n
  • \n

    \n CreateStage — Creates a new stage (and optionally participant tokens).

    \n
  • \n
  • \n

    \n DeleteStage — Shuts down and deletes the specified stage (disconnecting all participants).

    \n
  • \n
  • \n

    \n DisconnectParticipant — Disconnects a specified participant and revokes the participant permanently from a specified stage.

    \n
  • \n
  • \n

    \n GetParticipant — Gets information about the specified\n participant token.

    \n
  • \n
  • \n

    \n GetStage — Gets information for the specified stage.

    \n
  • \n
  • \n

    \n GetStageSession — Gets information for the specified stage\n session.

    \n
  • \n
  • \n

    \n ListParticipantEvents — Lists events for a specified\n participant that occurred during a specified stage session.

    \n
  • \n
  • \n

    \n ListParticipants — Lists all participants in a specified stage\n session.

    \n
  • \n
  • \n

    \n ListStages — Gets summary information about all stages in your account, in the AWS region where the API request is processed.

    \n
  • \n
  • \n

    \n ListStageSessions — Gets all sessions for a specified stage.

    \n
  • \n
  • \n

    \n UpdateStage — Updates a stage’s configuration.

    \n
  • \n
\n

\n Composition Endpoints\n

\n
    \n
  • \n

    \n GetComposition — Gets information about the specified\n Composition resource.

    \n
  • \n
  • \n

    \n ListCompositions — Gets summary information about all\n Compositions in your account, in the AWS region where the API request is processed.

    \n
  • \n
  • \n

    \n StartComposition — Starts a Composition from a stage based on\n the configuration provided in the request.

    \n
  • \n
  • \n

    \n StopComposition — Stops and deletes a Composition resource.\n Any broadcast from the Composition resource is stopped.

    \n
  • \n
\n

\n EncoderConfiguration Endpoints\n

\n
    \n
  • \n

    \n CreateEncoderConfiguration — Creates an EncoderConfiguration object.

    \n
  • \n
  • \n

    \n DeleteEncoderConfiguration — Deletes an EncoderConfiguration\n resource. Ensures that no Compositions are using this template; otherwise, returns an\n error.

    \n
  • \n
  • \n

    \n GetEncoderConfiguration — Gets information about the specified\n EncoderConfiguration resource.

    \n
  • \n
  • \n

    \n ListEncoderConfigurations — Gets summary information about all\n EncoderConfigurations in your account, in the AWS region where the API request is\n processed.

    \n
  • \n
\n

\n StorageConfiguration Endpoints\n

\n \n

\n Tags Endpoints\n

\n
    \n
  • \n

    \n ListTagsForResource — Gets information about AWS tags for the\n specified ARN.

    \n
  • \n
  • \n

    \n TagResource — Adds or updates tags for the AWS resource with\n the specified ARN.

    \n
  • \n
  • \n

    \n UntagResource — Removes tags from the resource with the\n specified ARN.

    \n
  • \n
", + "smithy.api#documentation": "

The Amazon Interactive Video Service (IVS) real-time API is REST compatible, using a standard HTTP \n\t API and an AWS EventBridge event stream for responses. JSON is used for both requests and responses, \n\t including errors.\n

\n

\n Key Concepts\n

\n
    \n
  • \n

    \n Stage — A virtual space where participants can exchange video in real time.

    \n
  • \n
  • \n

    \n Participant token — A token that authenticates a participant when they join a stage.

    \n
  • \n
  • \n

    \n Participant object — Represents participants (people) in the stage and\n contains information about them. When a token is created, it includes a participant ID;\n when a participant uses that token to join a stage, the participant is associated with\n that participant ID. There is a 1:1 mapping between participant tokens and\n participants.

    \n
  • \n
\n

For server-side composition:

\n
    \n
  • \n

    \n Composition process — Composites participants\n of a stage into a single video and forwards it to a set of outputs (e.g., IVS channels).\n Composition endpoints support this process.

    \n
  • \n
  • \n

    \n Composition — Controls the look of the outputs,\n including how participants are positioned in the video.

    \n
  • \n
\n

For more information about your IVS live stream, also see Getting Started with Amazon IVS Real-Time Streaming.

\n

\n Tagging\n

\n

A tag is a metadata label that you assign to an AWS resource. A tag\n comprises a key and a value, both set by you. For\n example, you might set a tag as topic:nature to label a particular video\n category. See Tagging AWS Resources for more information, including restrictions that apply to\n tags and \"Tag naming limits and requirements\"; Amazon IVS stages has no service-specific\n constraints beyond what is documented there.

\n

Tags can help you identify and organize your AWS resources. For example, you can use the\n same tag for different resources to indicate that they are related. You can also use tags to\n manage access (see Access Tags).

\n

The Amazon IVS real-time API has these tag-related endpoints: TagResource, UntagResource, and\n ListTagsForResource. The following resource supports tagging: Stage.

\n

At most 50 tags can be applied to a resource.

", "smithy.api#title": "Amazon Interactive Video Service RealTime", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -812,6 +824,37 @@ "smithy.api#pattern": "^[a-zA-Z0-9-_]*$" } }, + "com.amazonaws.ivsrealtime#AutoParticipantRecordingConfiguration": { + "type": "structure", + "members": { + "storageConfigurationArn": { + "target": "com.amazonaws.ivsrealtime#AutoParticipantRecordingStorageConfigurationArn", + "traits": { + "smithy.api#documentation": "

ARN of the StorageConfiguration resource to use for individual participant recording. Default: \"\" (empty string, no storage configuration is specified). Individual participant recording cannot be started unless a storage configuration is specified, when a Stage is created or updated.

", + "smithy.api#required": {} + } + }, + "mediaTypes": { + "target": "com.amazonaws.ivsrealtime#ParticipantRecordingMediaTypeList", + "traits": { + "smithy.api#documentation": "

Types of media to be recorded. Default: AUDIO_VIDEO.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Object specifying a configuration for individual participant recording.

" + } + }, + "com.amazonaws.ivsrealtime#AutoParticipantRecordingStorageConfigurationArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + }, + "smithy.api#pattern": "^^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:storage-configuration/[a-zA-Z0-9-]+$$" + } + }, "com.amazonaws.ivsrealtime#Bitrate": { "type": "integer", "traits": { @@ -1245,6 +1288,12 @@ "traits": { "smithy.api#documentation": "

Tags attached to the resource. Array of maps, each of the form string:string\n (key:value). See Tagging AWS\n Resources for details, including restrictions that apply to tags and \"Tag naming\n limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented\n there.

" } + }, + "autoParticipantRecordingConfiguration": { + "target": "com.amazonaws.ivsrealtime#AutoParticipantRecordingConfiguration", + "traits": { + "smithy.api#documentation": "

Configuration object for individual participant recording, to attach to the new stage.

" + } } } }, @@ -1403,6 +1452,61 @@ "smithy.api#output": {} } }, + "com.amazonaws.ivsrealtime#DeletePublicKey": { + "type": "operation", + "input": { + "target": "com.amazonaws.ivsrealtime#DeletePublicKeyRequest" + }, + "output": { + "target": "com.amazonaws.ivsrealtime#DeletePublicKeyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ivsrealtime#AccessDeniedException" + }, + { + "target": "com.amazonaws.ivsrealtime#ConflictException" + }, + { + "target": "com.amazonaws.ivsrealtime#PendingVerification" + }, + { + "target": "com.amazonaws.ivsrealtime#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ivsrealtime#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the specified public key used to sign stage participant tokens. \n\t This invalidates future participant tokens generated using the key pair’s private key.\n

", + "smithy.api#http": { + "method": "POST", + "uri": "/DeletePublicKey" + } + } + }, + "com.amazonaws.ivsrealtime#DeletePublicKeyRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.ivsrealtime#PublicKeyArn", + "traits": { + "smithy.api#documentation": "

ARN of the public key to be deleted.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ivsrealtime#DeletePublicKeyResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ivsrealtime#DeleteStage": { "type": "operation", "input": { @@ -2195,6 +2299,63 @@ "smithy.api#output": {} } }, + "com.amazonaws.ivsrealtime#GetPublicKey": { + "type": "operation", + "input": { + "target": "com.amazonaws.ivsrealtime#GetPublicKeyRequest" + }, + "output": { + "target": "com.amazonaws.ivsrealtime#GetPublicKeyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ivsrealtime#AccessDeniedException" + }, + { + "target": "com.amazonaws.ivsrealtime#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ivsrealtime#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets information for the specified public key.

", + "smithy.api#http": { + "method": "POST", + "uri": "/GetPublicKey" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.ivsrealtime#GetPublicKeyRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.ivsrealtime#PublicKeyArn", + "traits": { + "smithy.api#documentation": "

ARN of the public key for which the information is to be retrieved.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ivsrealtime#GetPublicKeyResponse": { + "type": "structure", + "members": { + "publicKey": { + "target": "com.amazonaws.ivsrealtime#PublicKey", + "traits": { + "smithy.api#documentation": "

The public key that is returned.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ivsrealtime#GetStage": { "type": "operation", "input": { @@ -2382,7 +2543,7 @@ "featuredParticipantAttribute": { "target": "com.amazonaws.ivsrealtime#AttributeKey", "traits": { - "smithy.api#documentation": "

This attribute name identifies the featured slot. A participant with this attribute set\n to \"true\" (as a string value) in ParticipantTokenConfiguration is placed in the featured\n slot.

" + "smithy.api#documentation": "

This attribute name identifies the featured slot. A participant with this attribute set\n to \"true\" (as a string value) in ParticipantTokenConfiguration is placed in the featured\n slot. Default: \"\" (no featured participant).

" } }, "omitStoppedVideo": { @@ -2395,13 +2556,13 @@ "videoAspectRatio": { "target": "com.amazonaws.ivsrealtime#VideoAspectRatio", "traits": { - "smithy.api#documentation": "

Sets the non-featured participant display mode. Default: VIDEO.

" + "smithy.api#documentation": "

Sets the non-featured participant display mode, to control the aspect ratio of video tiles. VIDEO is 16:9, SQUARE is 1:1, and PORTRAIT is 3:4. Default: VIDEO.

" } }, "videoFillMode": { "target": "com.amazonaws.ivsrealtime#VideoFillMode", "traits": { - "smithy.api#documentation": "

Defines how video fits within the participant tile. When not set, \n\t videoFillMode defaults to COVER fill mode for participants in the grid \n\t and to CONTAIN fill mode for featured participants.

" + "smithy.api#documentation": "

Defines how video content fits within the participant tile: FILL (stretched), COVER (cropped),\n or CONTAIN (letterboxed). When not set, \n videoFillMode defaults to COVER fill mode for participants in the grid \n and to CONTAIN fill mode for featured participants.

" } }, "gridGap": { @@ -2434,6 +2595,81 @@ } } }, + "com.amazonaws.ivsrealtime#ImportPublicKey": { + "type": "operation", + "input": { + "target": "com.amazonaws.ivsrealtime#ImportPublicKeyRequest" + }, + "output": { + "target": "com.amazonaws.ivsrealtime#ImportPublicKeyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ivsrealtime#AccessDeniedException" + }, + { + "target": "com.amazonaws.ivsrealtime#ConflictException" + }, + { + "target": "com.amazonaws.ivsrealtime#PendingVerification" + }, + { + "target": "com.amazonaws.ivsrealtime#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.ivsrealtime#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Import a public key to be used for signing stage participant tokens.

", + "smithy.api#http": { + "method": "POST", + "uri": "/ImportPublicKey" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.ivsrealtime#ImportPublicKeyRequest": { + "type": "structure", + "members": { + "publicKeyMaterial": { + "target": "com.amazonaws.ivsrealtime#PublicKeyMaterial", + "traits": { + "smithy.api#documentation": "

The content of the public key to be imported.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.ivsrealtime#PublicKeyName", + "traits": { + "smithy.api#documentation": "

Name of the public key to be imported.

" + } + }, + "tags": { + "target": "com.amazonaws.ivsrealtime#Tags", + "traits": { + "smithy.api#documentation": "

Tags attached to the resource. Array of maps, each of the form string:string\n (key:value). See Tagging AWS\n Resources for details, including restrictions that apply to tags and \"Tag naming\n limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented\n there.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ivsrealtime#ImportPublicKeyResponse": { + "type": "structure", + "members": { + "publicKey": { + "target": "com.amazonaws.ivsrealtime#PublicKey", + "traits": { + "smithy.api#documentation": "

The public key that was imported.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ivsrealtime#InternalServerException": { "type": "structure", "members": { @@ -2784,20 +3020,20 @@ "filterByUserId": { "target": "com.amazonaws.ivsrealtime#UserId", "traits": { - "smithy.api#documentation": "

Filters the response list to match the specified user ID. Only one of\n filterByUserId, filterByPublished, or\n filterByState can be provided per request. A userId is a\n customer-assigned name to help identify the token; this can be used to link a participant\n to a user in the customer’s own systems.

" + "smithy.api#documentation": "

Filters the response list to match the specified user ID. \n Only one of filterByUserId, filterByPublished, \n filterByState, or filterByRecordingState can be provided per request.\n\t A userId is a\n customer-assigned name to help identify the token; this can be used to link a participant\n to a user in the customer’s own systems.

" } }, "filterByPublished": { "target": "com.amazonaws.ivsrealtime#Published", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Filters the response list to only show participants who published during the stage\n session. Only one of filterByUserId, filterByPublished, or\n filterByState can be provided per request.

" + "smithy.api#documentation": "

Filters the response list to only show participants who published during the stage session.\n Only one of filterByUserId, filterByPublished, \n filterByState, or filterByRecordingState can be provided per request.

" } }, "filterByState": { "target": "com.amazonaws.ivsrealtime#ParticipantState", "traits": { - "smithy.api#documentation": "

Filters the response list to only show participants in the specified state. Only one of\n filterByUserId, filterByPublished, or\n filterByState can be provided per request.

" + "smithy.api#documentation": "

Filters the response list to only show participants in the specified state. \n Only one of filterByUserId, filterByPublished, \n filterByState, or filterByRecordingState can be provided per request.

" } }, "nextToken": { @@ -2811,6 +3047,12 @@ "traits": { "smithy.api#documentation": "

Maximum number of results to return. Default: 50.

" } + }, + "filterByRecordingState": { + "target": "com.amazonaws.ivsrealtime#ParticipantRecordingFilterByRecordingState", + "traits": { + "smithy.api#documentation": "

Filters the response list to only show participants with the specified recording state.\n Only one of filterByUserId, filterByPublished, \n filterByState, or filterByRecordingState can be provided per request.

" + } } }, "traits": { @@ -2838,6 +3080,78 @@ "smithy.api#output": {} } }, + "com.amazonaws.ivsrealtime#ListPublicKeys": { + "type": "operation", + "input": { + "target": "com.amazonaws.ivsrealtime#ListPublicKeysRequest" + }, + "output": { + "target": "com.amazonaws.ivsrealtime#ListPublicKeysResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ivsrealtime#AccessDeniedException" + }, + { + "target": "com.amazonaws.ivsrealtime#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets summary information about all public keys in your account, in the AWS region where the API request is processed.

", + "smithy.api#http": { + "method": "POST", + "uri": "/ListPublicKeys" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "publicKeys" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.ivsrealtime#ListPublicKeysRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.ivsrealtime#PaginationToken", + "traits": { + "smithy.api#documentation": "

The first public key to retrieve. This is used for pagination; see the nextToken response field.

" + } + }, + "maxResults": { + "target": "com.amazonaws.ivsrealtime#MaxPublicKeyResults", + "traits": { + "smithy.api#documentation": "

Maximum number of results to return. Default: 50.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ivsrealtime#ListPublicKeysResponse": { + "type": "structure", + "members": { + "publicKeys": { + "target": "com.amazonaws.ivsrealtime#PublicKeyList", + "traits": { + "smithy.api#documentation": "

List of the matching public keys (summary information only).

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.ivsrealtime#PaginationToken", + "traits": { + "smithy.api#documentation": "

If there are more public keys than maxResults, use nextToken in the request to get the next set.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ivsrealtime#ListStageSessions": { "type": "operation", "input": { @@ -3153,6 +3467,15 @@ } } }, + "com.amazonaws.ivsrealtime#MaxPublicKeyResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, "com.amazonaws.ivsrealtime#MaxStageResults": { "type": "integer", "traits": { @@ -3271,6 +3594,24 @@ "traits": { "smithy.api#documentation": "

The participant’s SDK version.

" } + }, + "recordingS3BucketName": { + "target": "com.amazonaws.ivsrealtime#ParticipantRecordingS3BucketName", + "traits": { + "smithy.api#documentation": "

Name of the S3 bucket to where the participant is being recorded, if individual participant recording is enabled, or \"\" (empty string), if recording is not enabled.

" + } + }, + "recordingS3Prefix": { + "target": "com.amazonaws.ivsrealtime#ParticipantRecordingS3Prefix", + "traits": { + "smithy.api#documentation": "

S3 prefix of the S3 bucket where the participant is being recorded, if individual participant recording is enabled, or \"\" (empty string), if recording is not enabled.

" + } + }, + "recordingState": { + "target": "com.amazonaws.ivsrealtime#ParticipantRecordingState", + "traits": { + "smithy.api#documentation": "

The participant’s recording state.

" + } } }, "traits": { @@ -3312,6 +3653,111 @@ "target": "com.amazonaws.ivsrealtime#ParticipantSummary" } }, + "com.amazonaws.ivsrealtime#ParticipantRecordingFilterByRecordingState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "STARTING", + "name": "STARTING" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "STOPPING", + "name": "STOPPING" + }, + { + "value": "STOPPED", + "name": "STOPPED" + }, + { + "value": "FAILED", + "name": "FAILED" + } + ] + } + }, + "com.amazonaws.ivsrealtime#ParticipantRecordingMediaType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AUDIO_VIDEO", + "name": "AUDIO_VIDEO" + }, + { + "value": "AUDIO_ONLY", + "name": "AUDIO_ONLY" + } + ] + } + }, + "com.amazonaws.ivsrealtime#ParticipantRecordingMediaTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.ivsrealtime#ParticipantRecordingMediaType" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1 + } + } + }, + "com.amazonaws.ivsrealtime#ParticipantRecordingS3BucketName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 63 + }, + "smithy.api#pattern": "^[a-z0-9-.]*$" + } + }, + "com.amazonaws.ivsrealtime#ParticipantRecordingS3Prefix": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9-]*$" + } + }, + "com.amazonaws.ivsrealtime#ParticipantRecordingState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "STARTING", + "name": "STARTING" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "STOPPING", + "name": "STOPPING" + }, + { + "value": "STOPPED", + "name": "STOPPED" + }, + { + "value": "FAILED", + "name": "FAILED" + }, + { + "value": "DISABLED", + "name": "DISABLED" + } + ] + } + }, "com.amazonaws.ivsrealtime#ParticipantState": { "type": "string", "traits": { @@ -3360,6 +3806,12 @@ "smithy.api#default": false, "smithy.api#documentation": "

Whether the participant ever published to the stage session.

" } + }, + "recordingState": { + "target": "com.amazonaws.ivsrealtime#ParticipantRecordingState", + "traits": { + "smithy.api#documentation": "

The participant’s recording state.

" + } } }, "traits": { @@ -3574,7 +4026,7 @@ "featuredParticipantAttribute": { "target": "com.amazonaws.ivsrealtime#AttributeKey", "traits": { - "smithy.api#documentation": "

This attribute name identifies the featured slot. A participant with this attribute set\n to \"true\" (as a string value) in ParticipantTokenConfiguration is placed in the featured\n slot.

" + "smithy.api#documentation": "

This attribute name identifies the featured slot. A participant with this attribute set\n to \"true\" (as a string value) in ParticipantTokenConfiguration is placed in the featured\n slot. Default: \"\" (no featured participant).

" } }, "omitStoppedVideo": { @@ -3587,7 +4039,7 @@ "videoFillMode": { "target": "com.amazonaws.ivsrealtime#VideoFillMode", "traits": { - "smithy.api#documentation": "

Defines how video fits within the participant tile. Default: COVER.\n

" + "smithy.api#documentation": "

Defines how video content fits within the participant tile: FILL (stretched), \n\t COVER (cropped), or CONTAIN (letterboxed). Default: COVER.

" } }, "gridGap": { @@ -3600,13 +4052,13 @@ "pipParticipantAttribute": { "target": "com.amazonaws.ivsrealtime#AttributeKey", "traits": { - "smithy.api#documentation": "

Identifies the PiP slot. A participant with this attribute set\n to \"true\" (as a string value) in ParticipantTokenConfiguration \n\t\t is placed in the PiP slot.

" + "smithy.api#documentation": "

Specifies the participant for the PiP window. A participant with this attribute set\n to \"true\" (as a string value) in ParticipantTokenConfiguration \n\t is placed in the PiP slot. Default: \"\" (no PiP participant).

" } }, "pipBehavior": { "target": "com.amazonaws.ivsrealtime#PipBehavior", "traits": { - "smithy.api#documentation": "

Defines PiP behavior when all participants have left. Default: STATIC.

" + "smithy.api#documentation": "

Defines PiP behavior when all participants have left: STATIC (maintains original position/size) or DYNAMIC (expands to full composition). Default: STATIC.

" } }, "pipOffset": { @@ -3693,6 +4145,105 @@ } } }, + "com.amazonaws.ivsrealtime#PublicKey": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.ivsrealtime#PublicKeyArn", + "traits": { + "smithy.api#documentation": "

Public key ARN.

" + } + }, + "name": { + "target": "com.amazonaws.ivsrealtime#PublicKeyName", + "traits": { + "smithy.api#documentation": "

Public key name.

" + } + }, + "publicKeyMaterial": { + "target": "com.amazonaws.ivsrealtime#PublicKeyMaterial", + "traits": { + "smithy.api#documentation": "

Public key material.

" + } + }, + "fingerprint": { + "target": "com.amazonaws.ivsrealtime#PublicKeyFingerprint", + "traits": { + "smithy.api#documentation": "

The public key fingerprint, a short string used to identify or verify the full public key.

" + } + }, + "tags": { + "target": "com.amazonaws.ivsrealtime#Tags", + "traits": { + "smithy.api#documentation": "

Tags attached to the resource. Array of maps, each of the form string:string\n (key:value). See Tagging AWS\n Resources for details, including restrictions that apply to tags and \"Tag naming\n limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented\n there.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Object specifying a public key used to sign stage participant tokens.

" + } + }, + "com.amazonaws.ivsrealtime#PublicKeyArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^arn:aws:ivs:[a-z0-9-]+:[0-9]+:public-key/[a-zA-Z0-9-]+$" + } + }, + "com.amazonaws.ivsrealtime#PublicKeyFingerprint": { + "type": "string" + }, + "com.amazonaws.ivsrealtime#PublicKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.ivsrealtime#PublicKeySummary" + } + }, + "com.amazonaws.ivsrealtime#PublicKeyMaterial": { + "type": "string", + "traits": { + "smithy.api#pattern": "-----BEGIN PUBLIC KEY-----\\r?\\n([a-zA-Z0-9+/=\\r\\n]+)\\r?\\n-----END PUBLIC KEY-----(\\r?\\n)?" + } + }, + "com.amazonaws.ivsrealtime#PublicKeyName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + }, + "smithy.api#pattern": "^[a-zA-Z0-9-_]*$" + } + }, + "com.amazonaws.ivsrealtime#PublicKeySummary": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.ivsrealtime#PublicKeyArn", + "traits": { + "smithy.api#documentation": "

Public key ARN.

" + } + }, + "name": { + "target": "com.amazonaws.ivsrealtime#PublicKeyName", + "traits": { + "smithy.api#documentation": "

Public key name.

" + } + }, + "tags": { + "target": "com.amazonaws.ivsrealtime#Tags", + "traits": { + "smithy.api#documentation": "

Tags attached to the resource. Array of maps, each of the form string:string\n (key:value). See Tagging AWS\n Resources for details, including restrictions that apply to tags and \"Tag naming\n limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented\n there.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary information about a public key.

" + } + }, "com.amazonaws.ivsrealtime#Published": { "type": "boolean", "traits": { @@ -3861,6 +4412,18 @@ "traits": { "smithy.api#documentation": "

Tags attached to the resource. Array of maps, each of the form string:string\n (key:value). See Tagging AWS\n Resources for details, including restrictions that apply to tags and \"Tag naming\n limits and requirements\"; Amazon IVS has no constraints on tags beyond what is documented\n there.

" } + }, + "autoParticipantRecordingConfiguration": { + "target": "com.amazonaws.ivsrealtime#AutoParticipantRecordingConfiguration", + "traits": { + "smithy.api#documentation": "

Configuration object for individual participant recording, attached to the stage.

" + } + }, + "endpoints": { + "target": "com.amazonaws.ivsrealtime#StageEndpoints", + "traits": { + "smithy.api#documentation": "

Summary information about various endpoints for a stage.

" + } } }, "traits": { @@ -3877,6 +4440,35 @@ "smithy.api#pattern": "^arn:aws:ivs:[a-z0-9-]+:[0-9]+:stage/[a-zA-Z0-9-]+$" } }, + "com.amazonaws.ivsrealtime#StageEndpoint": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + } + } + }, + "com.amazonaws.ivsrealtime#StageEndpoints": { + "type": "structure", + "members": { + "events": { + "target": "com.amazonaws.ivsrealtime#StageEndpoint", + "traits": { + "smithy.api#documentation": "

Events endpoint.

" + } + }, + "whip": { + "target": "com.amazonaws.ivsrealtime#StageEndpoint", + "traits": { + "smithy.api#documentation": "

WHIP endpoint.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary information about various endpoints for a stage.

" + } + }, "com.amazonaws.ivsrealtime#StageName": { "type": "string", "traits": { @@ -4446,6 +5038,12 @@ "traits": { "smithy.api#documentation": "

Name of the stage to be updated.

" } + }, + "autoParticipantRecordingConfiguration": { + "target": "com.amazonaws.ivsrealtime#AutoParticipantRecordingConfiguration", + "traits": { + "smithy.api#documentation": "

Configuration object for individual participant recording, to attach to the stage. Note that this cannot be updated while recording is active.

" + } } } }, diff --git a/models/kafka.json b/models/kafka.json index f5c5da8471..32392e8209 100644 --- a/models/kafka.json +++ b/models/kafka.json @@ -260,6 +260,28 @@ "smithy.api#documentation": "

The distribution of broker nodes across Availability Zones. This is an optional parameter. If you don't specify it, Amazon MSK gives it the value DEFAULT. You can also explicitly set this parameter to the value DEFAULT. No other values are currently allowed.

\n

Amazon MSK distributes the broker nodes evenly across the Availability Zones that correspond to the subnets you provide when you create the cluster.

" } }, + "com.amazonaws.kafka#BrokerCountUpdateInfo": { + "type": "structure", + "members": { + "CreatedBrokerIds": { + "target": "com.amazonaws.kafka#__listOf__double", + "traits": { + "smithy.api#documentation": "

Kafka Broker IDs of brokers being created.

", + "smithy.api#jsonName": "createdBrokerIds" + } + }, + "DeletedBrokerIds": { + "target": "com.amazonaws.kafka#__listOf__double", + "traits": { + "smithy.api#documentation": "

Kafka Broker IDs of brokers being deleted.

", + "smithy.api#jsonName": "deletedBrokerIds" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information regarding UpdateBrokerCount.

" + } + }, "com.amazonaws.kafka#BrokerEBSVolumeInfo": { "type": "structure", "members": { @@ -1501,6 +1523,21 @@ "smithy.api#documentation": "

Details about consumer group replication.

" } }, + "com.amazonaws.kafka#ControllerNodeInfo": { + "type": "structure", + "members": { + "Endpoints": { + "target": "com.amazonaws.kafka#__listOf__string", + "traits": { + "smithy.api#documentation": "

Endpoints for accessing the Controller.

", + "smithy.api#jsonName": "endpoints" + } + } + }, + "traits": { + "smithy.api#documentation": "

Controller node information.

" + } + }, "com.amazonaws.kafka#CreateCluster": { "type": "operation", "input": { @@ -6375,6 +6412,13 @@ "smithy.api#documentation": "

This controls storage mode for supported storage tiers.

", "smithy.api#jsonName": "storageMode" } + }, + "BrokerCountUpdateInfo": { + "target": "com.amazonaws.kafka#BrokerCountUpdateInfo", + "traits": { + "smithy.api#documentation": "

Describes brokers being changed during a broker count update.

", + "smithy.api#jsonName": "brokerCountUpdateInfo" + } } }, "traits": { @@ -6432,6 +6476,13 @@ "smithy.api#jsonName": "brokerNodeInfo" } }, + "ControllerNodeInfo": { + "target": "com.amazonaws.kafka#ControllerNodeInfo", + "traits": { + "smithy.api#documentation": "

The ControllerNodeInfo.

", + "smithy.api#jsonName": "controllerNodeInfo" + } + }, "InstanceType": { "target": "com.amazonaws.kafka#__string", "traits": { @@ -9533,6 +9584,12 @@ "target": "com.amazonaws.kafka#VpcConnection" } }, + "com.amazonaws.kafka#__listOf__double": { + "type": "list", + "member": { + "target": "com.amazonaws.kafka#__double" + } + }, "com.amazonaws.kafka#__listOf__string": { "type": "list", "member": { diff --git a/models/kinesis-analytics-v2.json b/models/kinesis-analytics-v2.json index 5c488f7aa7..92689a0549 100644 --- a/models/kinesis-analytics-v2.json +++ b/models/kinesis-analytics-v2.json @@ -115,6 +115,12 @@ "traits": { "smithy.api#documentation": "

The descriptions of the current CloudWatch logging options for the SQL-based Kinesis Data Analytics application.

" } + }, + "OperationId": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationId", + "traits": { + "smithy.api#documentation": "Operation ID for tracking AddApplicationCloudWatchLoggingOption request" + } } }, "traits": { @@ -415,7 +421,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds a reference data source to an existing SQL-based Kinesis Data Analytics application.

\n

Kinesis Data Analytics reads reference data (that is, an Amazon S3 object) and creates an\n in-application table within your application. In the request, you provide the source (S3\n bucket name and object key name), name of the in-application table to create, and the\n necessary mapping information that describes how data in an Amazon S3 object maps to columns\n in the resulting in-application table.

" + "smithy.api#documentation": "

Adds a reference data source to an existing SQL-based Kinesis Data Analytics application.

\n

Kinesis Data Analytics reads reference data (that is, an Amazon S3 object) and creates an\n in-application table within your application. In the request, you provide the source (S3\n bucket name and object key name), name of the in-application table to create, and the\n necessary mapping information that describes how data in an Amazon S3 object maps to columns\n in the resulting in-application table.

" } }, "com.amazonaws.kinesisanalyticsv2#AddApplicationReferenceDataSourceRequest": { @@ -556,6 +562,12 @@ "traits": { "smithy.api#documentation": "

The parameters of the new VPC configuration.

" } + }, + "OperationId": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationId", + "traits": { + "smithy.api#documentation": "Operation ID for tracking AddApplicationVpcConfiguration request" + } } }, "traits": { @@ -657,6 +669,9 @@ "smithy.api#documentation": "

Describes whether snapshots are enabled for a Managed Service for Apache Flink application.

" } }, + "ApplicationSystemRollbackConfiguration": { + "target": "com.amazonaws.kinesisanalyticsv2#ApplicationSystemRollbackConfiguration" + }, "VpcConfigurations": { "target": "com.amazonaws.kinesisanalyticsv2#VpcConfigurations", "traits": { @@ -713,6 +728,9 @@ "smithy.api#documentation": "

Describes whether snapshots are enabled for a Managed Service for Apache Flink application.

" } }, + "ApplicationSystemRollbackConfigurationDescription": { + "target": "com.amazonaws.kinesisanalyticsv2#ApplicationSystemRollbackConfigurationDescription" + }, "VpcConfigurationDescriptions": { "target": "com.amazonaws.kinesisanalyticsv2#VpcConfigurationDescriptions", "traits": { @@ -763,6 +781,9 @@ "smithy.api#documentation": "

Describes whether snapshots are enabled for a Managed Service for Apache Flink application.

" } }, + "ApplicationSystemRollbackConfigurationUpdate": { + "target": "com.amazonaws.kinesisanalyticsv2#ApplicationSystemRollbackConfigurationUpdate" + }, "VpcConfigurationUpdates": { "target": "com.amazonaws.kinesisanalyticsv2#VpcConfigurationUpdates", "traits": { @@ -881,6 +902,12 @@ "smithy.api#documentation": "

If you reverted the application using RollbackApplication,\n the application version when RollbackApplication was called.

" } }, + "ApplicationVersionCreateTimestamp": { + "target": "com.amazonaws.kinesisanalyticsv2#Timestamp", + "traits": { + "smithy.api#documentation": "The current timestamp when the application version was created." + } + }, "ConditionalToken": { "target": "com.amazonaws.kinesisanalyticsv2#ConditionalToken", "traits": { @@ -981,6 +1008,7 @@ "com.amazonaws.kinesisanalyticsv2#ApplicationName": { "type": "string", "traits": { + "smithy.api#documentation": "The name of the application", "smithy.api#length": { "min": 1, "max": 128 @@ -988,6 +1016,84 @@ "smithy.api#pattern": "^[a-zA-Z0-9_.-]+$" } }, + "com.amazonaws.kinesisanalyticsv2#ApplicationOperationInfo": { + "type": "structure", + "members": { + "Operation": { + "target": "com.amazonaws.kinesisanalyticsv2#Operation" + }, + "OperationId": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationId" + }, + "StartTime": { + "target": "com.amazonaws.kinesisanalyticsv2#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp at which the operation was created" + } + }, + "EndTime": { + "target": "com.amazonaws.kinesisanalyticsv2#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp at which the operation finished for the application" + } + }, + "OperationStatus": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationStatus" + } + }, + "traits": { + "smithy.api#documentation": "Provides a description of the operation, such as the type and status of operation" + } + }, + "com.amazonaws.kinesisanalyticsv2#ApplicationOperationInfoDetails": { + "type": "structure", + "members": { + "Operation": { + "target": "com.amazonaws.kinesisanalyticsv2#Operation", + "traits": { + "smithy.api#required": {} + } + }, + "StartTime": { + "target": "com.amazonaws.kinesisanalyticsv2#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp at which the operation was created", + "smithy.api#required": {} + } + }, + "EndTime": { + "target": "com.amazonaws.kinesisanalyticsv2#Timestamp", + "traits": { + "smithy.api#documentation": "The timestamp at which the operation finished for the application", + "smithy.api#required": {} + } + }, + "OperationStatus": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationStatus", + "traits": { + "smithy.api#required": {} + } + }, + "ApplicationVersionChangeDetails": { + "target": "com.amazonaws.kinesisanalyticsv2#ApplicationVersionChangeDetails" + }, + "OperationFailureDetails": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationFailureDetails" + } + }, + "traits": { + "smithy.api#documentation": "Provides a description of the operation, such as the operation-type and status" + } + }, + "com.amazonaws.kinesisanalyticsv2#ApplicationOperationInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.kinesisanalyticsv2#ApplicationOperationInfo" + }, + "traits": { + "smithy.api#documentation": "List of ApplicationOperationInfo for an application" + } + }, "com.amazonaws.kinesisanalyticsv2#ApplicationRestoreConfiguration": { "type": "structure", "members": { @@ -1203,6 +1309,73 @@ "smithy.api#documentation": "

Provides application summary information, including the application Amazon Resource Name (ARN), name, and status.

" } }, + "com.amazonaws.kinesisanalyticsv2#ApplicationSystemRollbackConfiguration": { + "type": "structure", + "members": { + "RollbackEnabled": { + "target": "com.amazonaws.kinesisanalyticsv2#BooleanObject", + "traits": { + "smithy.api#documentation": "Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Describes system rollback configuration for a Managed Service for Apache Flink application" + } + }, + "com.amazonaws.kinesisanalyticsv2#ApplicationSystemRollbackConfigurationDescription": { + "type": "structure", + "members": { + "RollbackEnabled": { + "target": "com.amazonaws.kinesisanalyticsv2#BooleanObject", + "traits": { + "smithy.api#documentation": "Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Describes system rollback configuration for a Managed Service for Apache Flink application" + } + }, + "com.amazonaws.kinesisanalyticsv2#ApplicationSystemRollbackConfigurationUpdate": { + "type": "structure", + "members": { + "RollbackEnabledUpdate": { + "target": "com.amazonaws.kinesisanalyticsv2#BooleanObject", + "traits": { + "smithy.api#documentation": "Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Describes system rollback configuration for a Managed Service for Apache Flink application" + } + }, + "com.amazonaws.kinesisanalyticsv2#ApplicationVersionChangeDetails": { + "type": "structure", + "members": { + "ApplicationVersionUpdatedFrom": { + "target": "com.amazonaws.kinesisanalyticsv2#ApplicationVersionId", + "traits": { + "smithy.api#documentation": "The operation was performed on this version of the application", + "smithy.api#required": {} + } + }, + "ApplicationVersionUpdatedTo": { + "target": "com.amazonaws.kinesisanalyticsv2#ApplicationVersionId", + "traits": { + "smithy.api#documentation": "The operation execution resulted in the transition to the following version of the application", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Contains information about the application version changes due to an operation" + } + }, "com.amazonaws.kinesisanalyticsv2#ApplicationVersionId": { "type": "long", "traits": { @@ -1308,7 +1481,7 @@ } }, "traits": { - "smithy.api#documentation": "

For a SQL-based Kinesis Data Analytics application, provides additional mapping information when the record\n format uses delimiters, such as CSV. For example, the following sample records use CSV format,\n where the records use the '\\n' as the row delimiter and a comma (\",\") as\n the column delimiter:

\n \n

\n \"name1\", \"address1\"\n

\n

\n \"name2\", \"address2\"\n

" + "smithy.api#documentation": "

For a SQL-based Kinesis Data Analytics application, provides additional mapping information when the record\n format uses delimiters, such as CSV. For example, the following sample records use CSV format,\n where the records use the '\\n' as the row delimiter and a comma (\",\") as\n the column delimiter:

\n

\n \"name1\", \"address1\"\n

\n

\n \"name2\", \"address2\"\n

" } }, "com.amazonaws.kinesisanalyticsv2#CatalogConfiguration": { @@ -1771,7 +1944,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates and returns a URL that you can use to connect to \n an application's extension.

\n

The IAM role or user used to call this API defines the permissions to access the\n extension. After the presigned URL is created, no additional permission is required to access\n this URL. IAM authorization policies for this API are also enforced for every HTTP request\n that attempts to connect to the extension.

\n

You control the amount of time that the URL will be valid using the SessionExpirationDurationInSeconds\n parameter. If you do not provide this parameter, the returned URL is valid for twelve hours.

\n \n

The URL that you get from a call to CreateApplicationPresignedUrl must be used within 3 minutes\n to be valid. \n If you first try to use the URL after the 3-minute limit expires, the service returns an HTTP 403 Forbidden error.

\n
" + "smithy.api#documentation": "

Creates and returns a URL that you can use to connect to \n an application's extension.

\n

The IAM role or user used to call this API defines the permissions to access the\n extension. After the presigned URL is created, no additional permission is required to access\n this URL. IAM authorization policies for this API are also enforced for every HTTP request\n that attempts to connect to the extension.

\n

You control the amount of time that the URL will be valid using the SessionExpirationDurationInSeconds\n parameter. If you do not provide this parameter, the returned URL is valid for twelve hours.

\n \n

The URL that you get from a call to CreateApplicationPresignedUrl must be used within 3 minutes\n to be valid. \n If you first try to use the URL after the 3-minute limit expires, the service returns an HTTP 403 Forbidden error.

\n
" } }, "com.amazonaws.kinesisanalyticsv2#CreateApplicationPresignedUrlRequest": { @@ -2153,6 +2326,12 @@ "traits": { "smithy.api#documentation": "

The descriptions of the remaining CloudWatch logging options for the application.

" } + }, + "OperationId": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationId", + "traits": { + "smithy.api#documentation": "Operation ID for tracking DeleteApplicationCloudWatchLoggingOption request" + } } }, "traits": { @@ -2567,6 +2746,12 @@ "traits": { "smithy.api#documentation": "

The updated version ID of the application.

" } + }, + "OperationId": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationId", + "traits": { + "smithy.api#documentation": "Operation ID for tracking DeleteApplicationVpcConfiguration request" + } } }, "traits": { @@ -2640,6 +2825,62 @@ "smithy.api#documentation": "

Returns information about a specific Managed Service for Apache Flink application.

\n

If you want to retrieve a list of all applications in your account,\n use the ListApplications operation.

" } }, + "com.amazonaws.kinesisanalyticsv2#DescribeApplicationOperation": { + "type": "operation", + "input": { + "target": "com.amazonaws.kinesisanalyticsv2#DescribeApplicationOperationRequest" + }, + "output": { + "target": "com.amazonaws.kinesisanalyticsv2#DescribeApplicationOperationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kinesisanalyticsv2#InvalidArgumentException" + }, + { + "target": "com.amazonaws.kinesisanalyticsv2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.kinesisanalyticsv2#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "Returns information about a specific operation performed on a Managed Service for Apache Flink application" + } + }, + "com.amazonaws.kinesisanalyticsv2#DescribeApplicationOperationRequest": { + "type": "structure", + "members": { + "ApplicationName": { + "target": "com.amazonaws.kinesisanalyticsv2#ApplicationName", + "traits": { + "smithy.api#required": {} + } + }, + "OperationId": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationId", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Request for information about a specific operation performed on a Managed Service for Apache Flink application", + "smithy.api#input": {} + } + }, + "com.amazonaws.kinesisanalyticsv2#DescribeApplicationOperationResponse": { + "type": "structure", + "members": { + "ApplicationOperationInfoDetails": { + "target": "com.amazonaws.kinesisanalyticsv2#ApplicationOperationInfoDetails" + } + }, + "traits": { + "smithy.api#documentation": "Provides details of the operation corresponding to the operation-ID on a Managed Service for Apache Flink application", + "smithy.api#output": {} + } + }, "com.amazonaws.kinesisanalyticsv2#DescribeApplicationRequest": { "type": "structure", "members": { @@ -2954,9 +3195,30 @@ "smithy.api#documentation": "

Describes updates to the execution property groups for a Managed Service for Apache Flink application or a Studio notebook.

" } }, + "com.amazonaws.kinesisanalyticsv2#ErrorInfo": { + "type": "structure", + "members": { + "ErrorString": { + "target": "com.amazonaws.kinesisanalyticsv2#ErrorString" + } + }, + "traits": { + "smithy.api#documentation": "Provides a description of the operation failure error" + } + }, "com.amazonaws.kinesisanalyticsv2#ErrorMessage": { "type": "string" }, + "com.amazonaws.kinesisanalyticsv2#ErrorString": { + "type": "string", + "traits": { + "smithy.api#documentation": "Error message resulting in failure of the operation", + "smithy.api#length": { + "min": 1, + "max": 512 + } + } + }, "com.amazonaws.kinesisanalyticsv2#FileKey": { "type": "string", "traits": { @@ -3446,7 +3708,7 @@ "InputStartingPosition": { "target": "com.amazonaws.kinesisanalyticsv2#InputStartingPosition", "traits": { - "smithy.api#documentation": "

The starting position on the stream.

\n
    \n
  • \n

    \n NOW - Start reading just after the most recent record in the stream, and\n start at the request timestamp that the customer issued.

    \n \n \n
  • \n
  • \n

    \n TRIM_HORIZON - Start reading at the last untrimmed record in the stream, \n which is the oldest record available in the stream. This option is not available \n for an Amazon Kinesis Data Firehose delivery stream.

    \n
  • \n
  • \n

    \n LAST_STOPPED_POINT - Resume reading from where the application last stopped reading.

    \n
  • \n
" + "smithy.api#documentation": "

The starting position on the stream.

\n
    \n
  • \n

    \n NOW - Start reading just after the most recent record in the stream, and\n start at the request timestamp that the customer issued.

    \n
  • \n
  • \n

    \n TRIM_HORIZON - Start reading at the last untrimmed record in the stream, \n which is the oldest record available in the stream. This option is not available \n for an Amazon Kinesis Data Firehose delivery stream.

    \n
  • \n
  • \n

    \n LAST_STOPPED_POINT - Resume reading from where the application last stopped reading.

    \n
  • \n
" } } }, @@ -3636,6 +3898,9 @@ { "target": "com.amazonaws.kinesisanalyticsv2#DescribeApplication" }, + { + "target": "com.amazonaws.kinesisanalyticsv2#DescribeApplicationOperation" + }, { "target": "com.amazonaws.kinesisanalyticsv2#DescribeApplicationSnapshot" }, @@ -3645,6 +3910,9 @@ { "target": "com.amazonaws.kinesisanalyticsv2#DiscoverInputSchema" }, + { + "target": "com.amazonaws.kinesisanalyticsv2#ListApplicationOperations" + }, { "target": "com.amazonaws.kinesisanalyticsv2#ListApplications" }, @@ -3684,7 +3952,7 @@ "sdkId": "Kinesis Analytics V2", "arnNamespace": "kinesisanalytics", "cloudFormationName": "KinesisAnalyticsV2", - "cloudTrailEventSource": "kinesisanalyticsv2.amazonaws.com", + "cloudTrailEventSource": "kinesisanalytics.amazonaws.com", "docId": "kinesisanalyticsv2-2018-05-23", "endpointPrefix": "kinesisanalytics" }, @@ -4880,6 +5148,87 @@ "smithy.api#error": "client" } }, + "com.amazonaws.kinesisanalyticsv2#ListApplicationOperations": { + "type": "operation", + "input": { + "target": "com.amazonaws.kinesisanalyticsv2#ListApplicationOperationsRequest" + }, + "output": { + "target": "com.amazonaws.kinesisanalyticsv2#ListApplicationOperationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kinesisanalyticsv2#InvalidArgumentException" + }, + { + "target": "com.amazonaws.kinesisanalyticsv2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.kinesisanalyticsv2#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "Lists information about operations performed on a Managed Service for Apache Flink application", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "ApplicationOperationInfoList", + "pageSize": "Limit" + } + } + }, + "com.amazonaws.kinesisanalyticsv2#ListApplicationOperationsInputLimit": { + "type": "integer", + "traits": { + "smithy.api#documentation": "Limit on the number of records returned in the response", + "smithy.api#range": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.kinesisanalyticsv2#ListApplicationOperationsRequest": { + "type": "structure", + "members": { + "ApplicationName": { + "target": "com.amazonaws.kinesisanalyticsv2#ApplicationName", + "traits": { + "smithy.api#required": {} + } + }, + "Limit": { + "target": "com.amazonaws.kinesisanalyticsv2#ListApplicationOperationsInputLimit" + }, + "NextToken": { + "target": "com.amazonaws.kinesisanalyticsv2#NextToken" + }, + "Operation": { + "target": "com.amazonaws.kinesisanalyticsv2#Operation" + }, + "OperationStatus": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationStatus" + } + }, + "traits": { + "smithy.api#documentation": "Request to list operations performed on an application", + "smithy.api#input": {} + } + }, + "com.amazonaws.kinesisanalyticsv2#ListApplicationOperationsResponse": { + "type": "structure", + "members": { + "ApplicationOperationInfoList": { + "target": "com.amazonaws.kinesisanalyticsv2#ApplicationOperationInfoList" + }, + "NextToken": { + "target": "com.amazonaws.kinesisanalyticsv2#NextToken" + } + }, + "traits": { + "smithy.api#documentation": "Response with the list of operations for an application", + "smithy.api#output": {} + } + }, "com.amazonaws.kinesisanalyticsv2#ListApplicationSnapshots": { "type": "operation", "input": { @@ -4897,7 +5246,13 @@ } ], "traits": { - "smithy.api#documentation": "

Lists information about the current application snapshots.

" + "smithy.api#documentation": "

Lists information about the current application snapshots.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "SnapshotSummaries", + "pageSize": "Limit" + } } }, "com.amazonaws.kinesisanalyticsv2#ListApplicationSnapshotsRequest": { @@ -4967,7 +5322,13 @@ } ], "traits": { - "smithy.api#documentation": "

Lists all the versions for the specified application, including versions that were rolled back. The response also includes a summary of the configuration \n associated with each version.

\n \n

To get the complete description of a specific application version, invoke the DescribeApplicationVersion operation.

\n \n

This operation is supported only for Managed Service for Apache Flink.

\n
" + "smithy.api#documentation": "

Lists all the versions for the specified application, including versions that were rolled back. The response also includes a summary of the configuration \n associated with each version.

\n

To get the complete description of a specific application version, invoke the DescribeApplicationVersion operation.

\n \n

This operation is supported only for Managed Service for Apache Flink.

\n
", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "ApplicationVersionSummaries", + "pageSize": "Limit" + } } }, "com.amazonaws.kinesisanalyticsv2#ListApplicationVersionsInputLimit": { @@ -5040,7 +5401,13 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of Managed Service for Apache Flink applications in your account. For each\n application, the response includes the application name, Amazon Resource Name (ARN), and\n status.

\n

If you want detailed information about a specific application, use \n DescribeApplication.

" + "smithy.api#documentation": "

Returns a list of Managed Service for Apache Flink applications in your account. For each\n application, the response includes the application name, Amazon Resource Name (ARN), and\n status.

\n

If you want detailed information about a specific application, use \n DescribeApplication.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "ApplicationSummaries", + "pageSize": "Limit" + } } }, "com.amazonaws.kinesisanalyticsv2#ListApplicationsInputLimit": { @@ -5391,6 +5758,7 @@ "com.amazonaws.kinesisanalyticsv2#NextToken": { "type": "string", "traits": { + "smithy.api#documentation": "If a previous command returned a pagination token, pass it into this value to retrieve the next set of results", "smithy.api#length": { "min": 1, "max": 512 @@ -5406,6 +5774,75 @@ } } }, + "com.amazonaws.kinesisanalyticsv2#Operation": { + "type": "string", + "traits": { + "smithy.api#documentation": "Type of operation performed on an application", + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.kinesisanalyticsv2#OperationFailureDetails": { + "type": "structure", + "members": { + "RollbackOperationId": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationId", + "traits": { + "smithy.api#documentation": "Provides the operation ID of a system-rollback operation executed due to failure in the current operation" + } + }, + "ErrorInfo": { + "target": "com.amazonaws.kinesisanalyticsv2#ErrorInfo" + } + }, + "traits": { + "smithy.api#documentation": "Provides a description of the operation failure" + } + }, + "com.amazonaws.kinesisanalyticsv2#OperationId": { + "type": "string", + "traits": { + "smithy.api#documentation": "Identifier of the Operation", + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.kinesisanalyticsv2#OperationStatus": { + "type": "enum", + "members": { + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "CANCELLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CANCELLED" + } + }, + "SUCCESSFUL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCESSFUL" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + }, + "traits": { + "smithy.api#documentation": "Status of the operation performed on an application" + } + }, "com.amazonaws.kinesisanalyticsv2#Output": { "type": "structure", "members": { @@ -5443,7 +5880,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes a SQL-based Kinesis Data Analytics application's output configuration,\n in which you identify an in-application stream and a destination where you want the\n in-application stream data to be written. The destination can be a Kinesis data stream or a\n Kinesis Data Firehose delivery stream.

\n \n

" + "smithy.api#documentation": "

Describes a SQL-based Kinesis Data Analytics application's output configuration,\n in which you identify an in-application stream and a destination where you want the\n in-application stream data to be written. The destination can be a Kinesis data stream or a\n Kinesis Data Firehose delivery stream.

\n

" } }, "com.amazonaws.kinesisanalyticsv2#OutputDescription": { @@ -6115,7 +6552,7 @@ } ], "traits": { - "smithy.api#documentation": "

Reverts the application to the previous running version. You can\n roll back an application if you suspect it is stuck in a transient status.

\n

You can roll back an application only if it is in the UPDATING \n or AUTOSCALING status.

\n

When you rollback an application, it loads state data from the last successful snapshot.\n If the application has no snapshots, Managed Service for Apache Flink rejects the rollback request.

\n

This action is not supported for Managed Service for Apache Flink for SQL applications.

" + "smithy.api#documentation": "

Reverts the application to the previous running version. You can roll back an\n application if you suspect it is stuck in a transient status or in the running status.

\n

You can roll back an application only if it is in the UPDATING,\n AUTOSCALING, or RUNNING statuses.

\n

When you rollback an application, it loads state data from the last successful snapshot.\n If the application has no snapshots, Managed Service for Apache Flink rejects the rollback request.

" } }, "com.amazonaws.kinesisanalyticsv2#RollbackApplicationRequest": { @@ -6148,6 +6585,12 @@ "traits": { "smithy.api#required": {} } + }, + "OperationId": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationId", + "traits": { + "smithy.api#documentation": "Operation ID for tracking RollbackApplication request" + } } }, "traits": { @@ -6465,7 +6908,7 @@ } }, "traits": { - "smithy.api#documentation": "

For a SQL-based Kinesis Data Analytics application, identifies the Amazon S3\n bucket and object that contains the reference data.

\n \n

A SQL-based Kinesis Data Analytics application loads reference data only once. If the data changes, you call the UpdateApplication\n operation to trigger reloading of data into your application.

" + "smithy.api#documentation": "

For a SQL-based Kinesis Data Analytics application, identifies the Amazon S3\n bucket and object that contains the reference data.

\n

A SQL-based Kinesis Data Analytics application loads reference data only once. If the data changes, you call the UpdateApplication\n operation to trigger reloading of data into your application.

" } }, "com.amazonaws.kinesisanalyticsv2#S3ReferenceDataSourceDescription": { @@ -6825,7 +7268,14 @@ }, "com.amazonaws.kinesisanalyticsv2#StartApplicationResponse": { "type": "structure", - "members": {}, + "members": { + "OperationId": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationId", + "traits": { + "smithy.api#documentation": "Operation ID for tracking StartApplication request" + } + } + }, "traits": { "smithy.api#output": {} } @@ -6859,7 +7309,7 @@ } ], "traits": { - "smithy.api#documentation": "

Stops the application from processing data. You can stop\n an application only if it is in the running status, unless you set the Force \n parameter to true.

\n

You can use the DescribeApplication operation to find the application status.\n

\n

Managed Service for Apache Flink takes a snapshot when the application is stopped, unless Force is set \n to true.

" + "smithy.api#documentation": "

Stops the application from processing data. You can stop\n an application only if it is in the running status, unless you set the Force \n parameter to true.

\n

You can use the DescribeApplication operation to find the application status.\n

\n

Managed Service for Apache Flink takes a snapshot when the application is stopped, unless Force is set \n to true.

" } }, "com.amazonaws.kinesisanalyticsv2#StopApplicationRequest": { @@ -6875,7 +7325,7 @@ "Force": { "target": "com.amazonaws.kinesisanalyticsv2#BooleanObject", "traits": { - "smithy.api#documentation": "

Set to true to force the application to stop. If you set Force\n to true, Managed Service for Apache Flink stops the application without taking a snapshot. \n

\n \n

Force-stopping your application may lead to data loss or duplication.\n To prevent data loss or duplicate processing of data during application restarts, \n we recommend you to take frequent snapshots of your application.

\n
\n

You can only force stop a Managed Service for Apache Flink application. You can't force stop a SQL-based Kinesis Data Analytics application.

\n

The application must be in the\n STARTING, UPDATING, STOPPING, AUTOSCALING, or\n RUNNING status.

" + "smithy.api#documentation": "

Set to true to force the application to stop. If you set Force\n to true, Managed Service for Apache Flink stops the application without taking a snapshot. \n

\n \n

Force-stopping your application may lead to data loss or duplication.\n To prevent data loss or duplicate processing of data during application restarts, \n we recommend you to take frequent snapshots of your application.

\n
\n

You can only force stop a Managed Service for Apache Flink application. You can't force stop a SQL-based Kinesis Data Analytics application.

\n

The application must be in the\n STARTING, UPDATING, STOPPING, AUTOSCALING, or\n RUNNING status.

" } } }, @@ -6885,7 +7335,14 @@ }, "com.amazonaws.kinesisanalyticsv2#StopApplicationResponse": { "type": "structure", - "members": {}, + "members": { + "OperationId": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationId", + "traits": { + "smithy.api#documentation": "Operation ID for tracking StopApplication request" + } + } + }, "traits": { "smithy.api#output": {} } @@ -7302,7 +7759,7 @@ "RuntimeEnvironmentUpdate": { "target": "com.amazonaws.kinesisanalyticsv2#RuntimeEnvironment", "traits": { - "smithy.api#documentation": "

Updates the Managed Service for Apache Flink runtime environment used to run your code. To avoid issues you must:

\n
    \n
  • \n

    Ensure your new jar and dependencies are compatible with the new runtime selected.

    \n
  • \n
  • \n

    Ensure your new code's state is compatible with the snapshot from which your application will start

    \n
  • \n
" + "smithy.api#documentation": "

Updates the Managed Service for Apache Flink runtime environment used to run your code. To avoid issues you must:

\n
    \n
  • \n

    Ensure your new jar and dependencies are compatible with the new runtime selected.

    \n
  • \n
  • \n

    Ensure your new code's state is compatible with the snapshot from which your application will start

    \n
  • \n
" } } }, @@ -7319,6 +7776,12 @@ "smithy.api#documentation": "

Describes application updates.

", "smithy.api#required": {} } + }, + "OperationId": { + "target": "com.amazonaws.kinesisanalyticsv2#OperationId", + "traits": { + "smithy.api#documentation": "Operation ID for tracking UpdateApplication request" + } } }, "traits": { diff --git a/models/kms.json b/models/kms.json index e6e77c9ae7..e391dae4bf 100644 --- a/models/kms.json +++ b/models/kms.json @@ -64,6 +64,12 @@ "traits": { "smithy.api#enumValue": "RSA_AES_KEY_WRAP_SHA_256" } + }, + "SM2PKE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SM2PKE" + } } } }, @@ -980,7 +986,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a unique customer managed KMS key in your Amazon Web Services account and Region.\n You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services\n services let you use KMS keys that you create and manage to protect your service\n resources.

\n

A KMS key is a logical representation of a cryptographic key. In addition to the key\n material used in cryptographic operations, a KMS key includes metadata, such as the key ID,\n key policy, creation date, description, and key state. For details, see Managing keys in the\n Key Management Service Developer Guide\n

\n

Use the parameters of CreateKey to specify the type of KMS key, the source of\n its key material, its key policy, description, tags, and other properties.

\n \n

KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

\n
\n

To create different types of KMS keys, use the following guidance:

\n
\n
Symmetric encryption KMS key
\n
\n

By default, CreateKey creates a symmetric encryption KMS key with key\n material that KMS generates. This is the basic and most widely used type of KMS key, and\n provides the best performance.

\n

To create a symmetric encryption KMS key, you don't need to specify any parameters.\n The default value for KeySpec, SYMMETRIC_DEFAULT, the default\n value for KeyUsage, ENCRYPT_DECRYPT, and the default value for\n Origin, AWS_KMS, create a symmetric encryption KMS key with\n KMS key material.

\n

If you need a key for basic encryption and decryption or you are creating a KMS key\n to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key.\n The key material in a symmetric encryption key never leaves KMS unencrypted. You can\n use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but\n they are typically used to generate data keys and data keys pairs. For details, see\n GenerateDataKey and GenerateDataKeyPair.

\n

\n
\n
Asymmetric KMS keys
\n
\n

To create an asymmetric KMS key, use the KeySpec parameter to specify\n the type of key material in the KMS key. Then, use the KeyUsage parameter\n to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.\n You can't change these properties after the KMS key is created.

\n

Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an\n SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves\n KMS unencrypted. However, you can use the GetPublicKey operation to\n download the public key so it can be used outside of KMS. KMS keys with RSA or SM2 key\n pairs can be used to encrypt or decrypt data or sign and verify messages (but not both).\n KMS keys with ECC key pairs can be used only to sign and verify messages. For\n information about asymmetric KMS keys, see Asymmetric KMS keys in the\n Key Management Service Developer Guide.

\n

\n
\n
HMAC KMS key
\n
\n

To create an HMAC KMS key, set the KeySpec parameter to a key spec\n value for HMAC KMS keys. Then set the KeyUsage parameter to\n GENERATE_VERIFY_MAC. You must set the key usage even though\n GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys.\n You can't change these properties after the KMS key is created.

\n

HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use\n HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

\n

\n
\n
Multi-Region primary keys
\n
Imported key material
\n
\n

To create a multi-Region primary key in the local Amazon Web Services Region,\n use the MultiRegion parameter with a value of True. To create\n a multi-Region replica key, that is, a KMS key with the same key ID\n and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its\n primary key to a replica key, use the UpdatePrimaryRegion\n operation.

\n

You can create multi-Region KMS keys for all supported KMS key types: symmetric\n encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric\n signing KMS keys. You can also create multi-Region keys with imported key material.\n However, you can't create multi-Region keys in a custom key store.

\n

This operation supports multi-Region keys, an KMS feature that lets you create multiple\n interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key\n material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt\n it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

\n

\n
\n
\n

To import your own key material into a KMS key, begin by creating a KMS key with no\n key material. To do this, use the Origin parameter of\n CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token. Use\n the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For\n step-by-step instructions, see Importing Key Material in the \n Key Management Service Developer Guide\n .

\n

You can import key material into KMS keys of all supported KMS key types: symmetric\n encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric\n signing KMS keys. You can also create multi-Region keys with imported key material.\n However, you can't import key material into a KMS key in a custom key store.

\n

To create a multi-Region primary key with imported key material, use the\n Origin parameter of CreateKey with a value of\n EXTERNAL and the MultiRegion parameter with a value of\n True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into\n multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

\n

\n
\n
Custom key store
\n
\n

A custom key store lets you protect your Amazon Web Services resources using keys in a backing key\n store that you own and manage. When you request a cryptographic operation with a KMS key\n in a custom key store, the operation is performed in the backing key store using its\n cryptographic keys.

\n

KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an\n external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store,\n KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS\n key. When you create a KMS key in an external key store, you specify an existing\n encryption key in the external key manager.

\n \n

Some external key managers provide a simpler method for creating a KMS key in an\n external key store. For details, see your external key manager documentation.

\n
\n

Before you create a KMS key in a custom key store, the ConnectionState\n of the key store must be CONNECTED. To connect the custom key store, use\n the ConnectCustomKeyStore operation. To find the\n ConnectionState, use the DescribeCustomKeyStores\n operation.

\n

To create a KMS key in a custom key store, use the CustomKeyStoreId.\n Use the default KeySpec value, SYMMETRIC_DEFAULT, and the\n default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric\n encryption key. No other key type is supported in a custom key store.

\n

To create a KMS key in an CloudHSM key store, use the\n Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM\n cluster that is associated with the custom key store must have at least two active HSMs\n in different Availability Zones in the Amazon Web Services Region.

\n

To create a KMS key in an external key store, use the\n Origin parameter with a value of EXTERNAL_KEY_STORE and an\n XksKeyId parameter that identifies an existing external key.

\n \n

Some external key managers provide a simpler method for creating a KMS key in an\n external key store. For details, see your external key manager documentation.

\n
\n
\n
\n

\n Cross-account use: No. You cannot use this operation to\n create a KMS key in a different Amazon Web Services account.

\n

\n Required permissions: kms:CreateKey (IAM policy). To use the\n Tags parameter, kms:TagResource (IAM policy). For examples and information about related\n permissions, see Allow a user to create\n KMS keys in the Key Management Service Developer Guide.

\n

\n Related operations:\n

\n \n

\n Eventual consistency: The KMS API follows an eventual consistency model. \n For more information, see KMS eventual consistency.

" + "smithy.api#documentation": "

Creates a unique customer managed KMS key in your Amazon Web Services account and Region.\n You can use a KMS key in cryptographic operations, such as encryption and signing. Some Amazon Web Services\n services let you use KMS keys that you create and manage to protect your service\n resources.

\n

A KMS key is a logical representation of a cryptographic key. In addition to the key\n material used in cryptographic operations, a KMS key includes metadata, such as the key ID,\n key policy, creation date, description, and key state. For details, see Managing keys in the\n Key Management Service Developer Guide\n

\n

Use the parameters of CreateKey to specify the type of KMS key, the source of\n its key material, its key policy, description, tags, and other properties.

\n \n

KMS has replaced the term customer master key (CMK) with KMS key and KMS key. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.

\n
\n

To create different types of KMS keys, use the following guidance:

\n
\n
Symmetric encryption KMS key
\n
\n

By default, CreateKey creates a symmetric encryption KMS key with key\n material that KMS generates. This is the basic and most widely used type of KMS key, and\n provides the best performance.

\n

To create a symmetric encryption KMS key, you don't need to specify any parameters.\n The default value for KeySpec, SYMMETRIC_DEFAULT, the default\n value for KeyUsage, ENCRYPT_DECRYPT, and the default value for\n Origin, AWS_KMS, create a symmetric encryption KMS key with\n KMS key material.

\n

If you need a key for basic encryption and decryption or you are creating a KMS key\n to protect your resources in an Amazon Web Services service, create a symmetric encryption KMS key.\n The key material in a symmetric encryption key never leaves KMS unencrypted. You can\n use a symmetric encryption KMS key to encrypt and decrypt data up to 4,096 bytes, but\n they are typically used to generate data keys and data keys pairs. For details, see\n GenerateDataKey and GenerateDataKeyPair.

\n

\n
\n
Asymmetric KMS keys
\n
\n

To create an asymmetric KMS key, use the KeySpec parameter to specify\n the type of key material in the KMS key. Then, use the KeyUsage parameter\n to determine whether the KMS key will be used to encrypt and decrypt or sign and verify.\n You can't change these properties after the KMS key is created.

\n

Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair, or an\n SM2 key pair (China Regions only). The private key in an asymmetric KMS key never leaves\n KMS unencrypted. However, you can use the GetPublicKey operation to\n download the public key so it can be used outside of KMS. Each KMS key can have only one key usage. KMS keys with RSA key\n pairs can be used to encrypt and decrypt data or sign and verify messages (but not both).\n KMS keys with NIST-recommended ECC key pairs can be used to sign and verify messages or \n derive shared secrets (but not both). KMS keys with ECC_SECG_P256K1\n can be used only to sign and verify messages. KMS keys with SM2 key pairs (China Regions only)\n can be used to either encrypt and decrypt data, sign and verify messages, or derive shared secrets (you must choose one key usage type). For\n information about asymmetric KMS keys, see Asymmetric KMS keys in the\n Key Management Service Developer Guide.

\n

\n
\n
HMAC KMS key
\n
\n

To create an HMAC KMS key, set the KeySpec parameter to a key spec\n value for HMAC KMS keys. Then set the KeyUsage parameter to\n GENERATE_VERIFY_MAC. You must set the key usage even though\n GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS keys.\n You can't change these properties after the KMS key is created.

\n

HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can use\n HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to 4096 bytes.

\n

\n
\n
Multi-Region primary keys
\n
Imported key material
\n
\n

To create a multi-Region primary key in the local Amazon Web Services Region,\n use the MultiRegion parameter with a value of True. To create\n a multi-Region replica key, that is, a KMS key with the same key ID\n and key material as a primary key, but in a different Amazon Web Services Region, use the ReplicateKey operation. To change a replica key to a primary key, and its\n primary key to a replica key, use the UpdatePrimaryRegion\n operation.

\n

You can create multi-Region KMS keys for all supported KMS key types: symmetric\n encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric\n signing KMS keys. You can also create multi-Region keys with imported key material.\n However, you can't create multi-Region keys in a custom key store.

\n

This operation supports multi-Region keys, an KMS feature that lets you create multiple\n interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key\n material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt\n it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

\n

\n
\n
\n

To import your own key material into a KMS key, begin by creating a KMS key with no\n key material. To do this, use the Origin parameter of\n CreateKey with a value of EXTERNAL. Next, use GetParametersForImport operation to get a public key and import token. Use\n the wrapping public key to encrypt your key material. Then, use ImportKeyMaterial with your import token to import the key material. For\n step-by-step instructions, see Importing Key Material in the \n Key Management Service Developer Guide\n .

\n

You can import key material into KMS keys of all supported KMS key types: symmetric\n encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric\n signing KMS keys. You can also create multi-Region keys with imported key material.\n However, you can't import key material into a KMS key in a custom key store.

\n

To create a multi-Region primary key with imported key material, use the\n Origin parameter of CreateKey with a value of\n EXTERNAL and the MultiRegion parameter with a value of\n True. To create replicas of the multi-Region primary key, use the ReplicateKey operation. For instructions, see Importing key material into\n multi-Region keys. For more information about multi-Region keys, see Multi-Region keys in KMS in the Key Management Service Developer Guide.

\n

\n
\n
Custom key store
\n
\n

A custom key store lets you protect your Amazon Web Services resources using keys in a backing key\n store that you own and manage. When you request a cryptographic operation with a KMS key\n in a custom key store, the operation is performed in the backing key store using its\n cryptographic keys.

\n

KMS supports CloudHSM key stores backed by an CloudHSM cluster and external key stores backed by an\n external key manager outside of Amazon Web Services. When you create a KMS key in an CloudHSM key store,\n KMS generates an encryption key in the CloudHSM cluster and associates it with the KMS\n key. When you create a KMS key in an external key store, you specify an existing\n encryption key in the external key manager.

\n \n

Some external key managers provide a simpler method for creating a KMS key in an\n external key store. For details, see your external key manager documentation.

\n
\n

Before you create a KMS key in a custom key store, the ConnectionState\n of the key store must be CONNECTED. To connect the custom key store, use\n the ConnectCustomKeyStore operation. To find the\n ConnectionState, use the DescribeCustomKeyStores\n operation.

\n

To create a KMS key in a custom key store, use the CustomKeyStoreId.\n Use the default KeySpec value, SYMMETRIC_DEFAULT, and the\n default KeyUsage value, ENCRYPT_DECRYPT to create a symmetric\n encryption key. No other key type is supported in a custom key store.

\n

To create a KMS key in an CloudHSM key store, use the\n Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM\n cluster that is associated with the custom key store must have at least two active HSMs\n in different Availability Zones in the Amazon Web Services Region.

\n

To create a KMS key in an external key store, use the\n Origin parameter with a value of EXTERNAL_KEY_STORE and an\n XksKeyId parameter that identifies an existing external key.

\n \n

Some external key managers provide a simpler method for creating a KMS key in an\n external key store. For details, see your external key manager documentation.

\n
\n
\n
\n

\n Cross-account use: No. You cannot use this operation to\n create a KMS key in a different Amazon Web Services account.

\n

\n Required permissions: kms:CreateKey (IAM policy). To use the\n Tags parameter, kms:TagResource (IAM policy). For examples and information about related\n permissions, see Allow a user to create\n KMS keys in the Key Management Service Developer Guide.

\n

\n Related operations:\n

\n \n

\n Eventual consistency: The KMS API follows an eventual consistency model. \n For more information, see KMS eventual consistency.

" } }, "com.amazonaws.kms#CreateKeyRequest": { @@ -1001,7 +1007,7 @@ "KeyUsage": { "target": "com.amazonaws.kms#KeyUsageType", "traits": { - "smithy.api#documentation": "

Determines the cryptographic operations for which you can use the KMS key. The default value is\n ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric\n encryption KMS key; otherwise, it is required. You can't change the KeyUsage\n value after the KMS key is created.

\n

Select only one valid value.

\n
    \n
  • \n

    For symmetric encryption KMS keys, omit the parameter or specify\n ENCRYPT_DECRYPT.

    \n
  • \n
  • \n

    For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.

    \n
  • \n
  • \n

    For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT or\n SIGN_VERIFY.

    \n
  • \n
  • \n

    For asymmetric KMS keys with ECC key material, specify\n SIGN_VERIFY.

    \n
  • \n
  • \n

    For asymmetric KMS keys with SM2 key material (China Regions only), specify\n ENCRYPT_DECRYPT or SIGN_VERIFY.

    \n
  • \n
" + "smithy.api#documentation": "

Determines the cryptographic operations for which you can use the KMS key. The default value is\n ENCRYPT_DECRYPT. This parameter is optional when you are creating a symmetric\n encryption KMS key; otherwise, it is required. You can't change the KeyUsage\n value after the KMS key is created.

\n

Select only one valid value.

\n
    \n
  • \n

    For symmetric encryption KMS keys, omit the parameter or specify\n ENCRYPT_DECRYPT.

    \n
  • \n
  • \n

    For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.

    \n
  • \n
  • \n

    For asymmetric KMS keys with RSA key pairs, specify ENCRYPT_DECRYPT or\n SIGN_VERIFY.

    \n
  • \n
  • \n

    For asymmetric KMS keys with NIST-recommended elliptic curve key pairs, specify\n SIGN_VERIFY or KEY_AGREEMENT.

    \n
  • \n
  • \n

    For asymmetric KMS keys with ECC_SECG_P256K1 key pairs specify \n SIGN_VERIFY.

    \n
  • \n
  • \n

    For asymmetric KMS keys with SM2 key pairs (China Regions only), specify\n ENCRYPT_DECRYPT, SIGN_VERIFY, or KEY_AGREEMENT.

    \n
  • \n
" } }, "CustomerMasterKeySpec": { @@ -1016,7 +1022,7 @@ "KeySpec": { "target": "com.amazonaws.kms#KeySpec", "traits": { - "smithy.api#documentation": "

Specifies the type of KMS key to create. The default value,\n SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for\n encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key\n that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the \n Key Management Service Developer Guide\n .

\n

The KeySpec determines whether the KMS key contains a symmetric key or an\n asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't\n change the KeySpec after the KMS key is created. To further restrict the\n algorithms that can be used with the KMS key, use a condition key in its key policy or IAM\n policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the \n Key Management Service Developer Guide\n .

\n \n

\n Amazon Web Services services that\n are integrated with KMS use symmetric encryption KMS keys to protect your data.\n These services do not support asymmetric KMS keys or HMAC KMS keys.

\n
\n

KMS supports the following key specs for KMS keys:

\n
    \n
  • \n

    Symmetric encryption key (default)

    \n
      \n
    • \n

      \n SYMMETRIC_DEFAULT\n

      \n
    • \n
    \n
  • \n
  • \n

    HMAC keys (symmetric)

    \n
      \n
    • \n

      \n HMAC_224\n

      \n
    • \n
    • \n

      \n HMAC_256\n

      \n
    • \n
    • \n

      \n HMAC_384\n

      \n
    • \n
    • \n

      \n HMAC_512\n

      \n
    • \n
    \n
  • \n
  • \n

    Asymmetric RSA key pairs

    \n
      \n
    • \n

      \n RSA_2048\n

      \n
    • \n
    • \n

      \n RSA_3072\n

      \n
    • \n
    • \n

      \n RSA_4096\n

      \n
    • \n
    \n
  • \n
  • \n

    Asymmetric NIST-recommended elliptic curve key pairs

    \n
      \n
    • \n

      \n ECC_NIST_P256 (secp256r1)

      \n
    • \n
    • \n

      \n ECC_NIST_P384 (secp384r1)

      \n
    • \n
    • \n

      \n ECC_NIST_P521 (secp521r1)

      \n
    • \n
    \n
  • \n
  • \n

    Other asymmetric elliptic curve key pairs

    \n
      \n
    • \n

      \n ECC_SECG_P256K1 (secp256k1), commonly used for\n cryptocurrencies.

      \n
    • \n
    \n
  • \n
  • \n

    SM2 key pairs (China Regions only)

    \n
      \n
    • \n

      \n SM2\n

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

Specifies the type of KMS key to create. The default value,\n SYMMETRIC_DEFAULT, creates a KMS key with a 256-bit AES-GCM key that is used for\n encryption and decryption, except in China Regions, where it creates a 128-bit symmetric key\n that uses SM4 encryption. For help choosing a key spec for your KMS key, see Choosing a KMS key type in the \n Key Management Service Developer Guide\n .

\n

The KeySpec determines whether the KMS key contains a symmetric key or an\n asymmetric key pair. It also determines the algorithms that the KMS key supports. You can't\n change the KeySpec after the KMS key is created. To further restrict the\n algorithms that can be used with the KMS key, use a condition key in its key policy or IAM\n policy. For more information, see kms:EncryptionAlgorithm, kms:MacAlgorithm or kms:Signing Algorithm in the \n Key Management Service Developer Guide\n .

\n \n

\n Amazon Web Services services that\n are integrated with KMS use symmetric encryption KMS keys to protect your data.\n These services do not support asymmetric KMS keys or HMAC KMS keys.

\n
\n

KMS supports the following key specs for KMS keys:

\n
    \n
  • \n

    Symmetric encryption key (default)

    \n
      \n
    • \n

      \n SYMMETRIC_DEFAULT\n

      \n
    • \n
    \n
  • \n
  • \n

    HMAC keys (symmetric)

    \n
      \n
    • \n

      \n HMAC_224\n

      \n
    • \n
    • \n

      \n HMAC_256\n

      \n
    • \n
    • \n

      \n HMAC_384\n

      \n
    • \n
    • \n

      \n HMAC_512\n

      \n
    • \n
    \n
  • \n
  • \n

    Asymmetric RSA key pairs (encryption and decryption -or- signing and verification)

    \n
      \n
    • \n

      \n RSA_2048\n

      \n
    • \n
    • \n

      \n RSA_3072\n

      \n
    • \n
    • \n

      \n RSA_4096\n

      \n
    • \n
    \n
  • \n
  • \n

    Asymmetric NIST-recommended elliptic curve key pairs (signing and verification -or- deriving shared secrets)

    \n
      \n
    • \n

      \n ECC_NIST_P256 (secp256r1)

      \n
    • \n
    • \n

      \n ECC_NIST_P384 (secp384r1)

      \n
    • \n
    • \n

      \n ECC_NIST_P521 (secp521r1)

      \n
    • \n
    \n
  • \n
  • \n

    Other asymmetric elliptic curve key pairs (signing and verification)

    \n
      \n
    • \n

      \n ECC_SECG_P256K1 (secp256k1), commonly used for\n cryptocurrencies.

      \n
    • \n
    \n
  • \n
  • \n

    SM2 key pairs (encryption and decryption -or- signing and verification -or- deriving shared secrets)

    \n
      \n
    • \n

      \n SM2 (China Regions only)

      \n
    • \n
    \n
  • \n
" } }, "Origin": { @@ -1747,6 +1753,149 @@ "smithy.api#httpError": 503 } }, + "com.amazonaws.kms#DeriveSharedSecret": { + "type": "operation", + "input": { + "target": "com.amazonaws.kms#DeriveSharedSecretRequest" + }, + "output": { + "target": "com.amazonaws.kms#DeriveSharedSecretResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kms#DependencyTimeoutException" + }, + { + "target": "com.amazonaws.kms#DisabledException" + }, + { + "target": "com.amazonaws.kms#DryRunOperationException" + }, + { + "target": "com.amazonaws.kms#InvalidGrantTokenException" + }, + { + "target": "com.amazonaws.kms#InvalidKeyUsageException" + }, + { + "target": "com.amazonaws.kms#KeyUnavailableException" + }, + { + "target": "com.amazonaws.kms#KMSInternalException" + }, + { + "target": "com.amazonaws.kms#KMSInvalidStateException" + }, + { + "target": "com.amazonaws.kms#NotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Derives a shared secret using a key agreement algorithm.

\n \n

You must use an asymmetric NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) \n KMS key pair with a KeyUsage value of KEY_AGREEMENT to call DeriveSharedSecret.

\n
\n

DeriveSharedSecret uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive (ECDH) to \n establish a key agreement between two peers by deriving a shared secret from their elliptic curve\n public-private key pairs. You can use the raw shared secret that DeriveSharedSecret returns to derive\n a symmetric key that can encrypt and decrypt data that is sent between the two peers, or that can \n generate and verify HMACs. KMS recommends that you follow NIST recommendations for key derivation when using the raw shared secret to derive a \n symmetric key.

\n

The following workflow demonstrates how to establish key agreement over an insecure communication \n channel using DeriveSharedSecret.

\n
    \n
  1. \n

    \n Alice calls CreateKey to create an asymmetric \n KMS key pair with a KeyUsage value of KEY_AGREEMENT.

    \n

    The asymmetric KMS key must use a NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) key spec.

    \n
  2. \n
  3. \n

    \n Bob creates an elliptic curve key pair.

    \n

    Bob can call CreateKey to create an asymmetric KMS key\n pair or generate a key pair outside of KMS. Bob's key pair must use the same NIST-recommended elliptic curve (ECC) \n or SM2 (China Regions ony) curve as Alice.

    \n
  4. \n
  5. \n

    Alice and Bob exchange their public keys \n through an insecure communication channel (like the internet).

    \n

    Use GetPublicKey to download the public key of your asymmetric KMS key pair.

    \n \n

    KMS strongly recommends verifying that the public key you receive came from the expected \n party before using it to derive a shared secret.

    \n
    \n
  6. \n
  7. \n

    \n Alice calls DeriveSharedSecret.

    \n

    KMS uses the private key from the KMS key pair generated in Step 1,\n Bob's public key, and the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive to derive the \n shared secret. The private key in your KMS key pair never leaves KMS unencrypted. DeriveSharedSecret \n returns the raw shared secret.

    \n
  8. \n
  9. \n

    \n Bob uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman \n Primitive to calculate the same raw secret using his private key and Alice's public key.

    \n
  10. \n
\n

To derive a shared secret you must provide a key agreement algorithm, the private key of the caller's asymmetric NIST-recommended \n elliptic curve or SM2 (China Regions only) KMS key pair, and the public key from your peer's NIST-recommended elliptic curve \n or SM2 (China Regions only) key pair. The public key can be from another asymmetric KMS key pair or from a key pair generated outside \n of KMS, but both key pairs must be on the same elliptic curve.

\n

The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

\n

\n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

\n

\n Required permissions: kms:DeriveSharedSecret (key policy)

\n

\n Related operations:\n

\n \n

\n Eventual consistency: The KMS API follows an eventual consistency model. \n For more information, see KMS eventual consistency.

", + "smithy.api#examples": [ + { + "title": "To derive a shared secret", + "documentation": "The following example derives a shared secret using a key agreement algorithm.", + "input": { + "KeyId": "1234abcd-12ab-34cd-56ef-1234567890ab", + "KeyAgreementAlgorithm": "ECDH", + "PublicKey": "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvH3Yj0wbkLEpUl95Cv1cJVjsVNSjwGq3tCLnzXfhVwVvmzGN8pYj3U8nKwgouaHbBWNJYjP5VutbbkKS4Kv4GojwZBJyHN17kmxo8yTjRmjR15SKIQ8cqRA2uaERMLnpztIXdZp232PQPbWGxDyXYJ0aJ5EFSag" + }, + "output": { + "KeyId": "1234abcd-12ab-34cd-56ef-1234567890ab", + "SharedSecret": "MEYCIQCKZLWyTk5runarx6XiAkU9gv3lbwPO/pHa+DXFehzdDwIhANwpsIV2g/9SPWLLsF6p/hiSskuIXMTRwqrMdVKWTMHG", + "KeyAgreementAlgorithm": "ECDH", + "KeyOrigin": "AWS_KMS" + } + } + ] + } + }, + "com.amazonaws.kms#DeriveSharedSecretRequest": { + "type": "structure", + "members": { + "KeyId": { + "target": "com.amazonaws.kms#KeyIdType", + "traits": { + "smithy.api#documentation": "

Identifies an asymmetric NIST-recommended ECC or SM2 (China Regions only) KMS key. KMS \n uses the private key in the specified key pair to derive the shared secret. The key usage of\n the KMS key must be KEY_AGREEMENT. To find the \n KeyUsage of a KMS key, use the DescribeKey operation.

\n

To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". To specify a KMS key in a different Amazon Web Services account, you must use the key ARN or alias ARN.

\n

For example:

\n
    \n
  • \n

    Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Alias name: alias/ExampleAlias\n

    \n
  • \n
  • \n

    Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias\n

    \n
  • \n
\n

To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias ARN, use ListAliases.

", + "smithy.api#required": {} + } + }, + "KeyAgreementAlgorithm": { + "target": "com.amazonaws.kms#KeyAgreementAlgorithmSpec", + "traits": { + "smithy.api#documentation": "

Specifies the key agreement algorithm used to derive the shared secret. The only valid value is ECDH.

", + "smithy.api#required": {} + } + }, + "PublicKey": { + "target": "com.amazonaws.kms#PublicKeyType", + "traits": { + "smithy.api#documentation": "

Specifies the public key in your peer's NIST-recommended elliptic curve (ECC) or SM2 (China Regions only) key pair.

\n

The public key must be a DER-encoded X.509 public key, also known as SubjectPublicKeyInfo (SPKI), as defined in RFC 5280.

\n

\n GetPublicKey returns the public key of an asymmetric KMS key pair in the required DER-encoded format.

\n \n

If you use Amazon Web Services CLI version 1, \n you must provide the DER-encoded X.509 public key in a file. Otherwise, the Amazon Web Services CLI Base64-encodes the public key a \n second time, resulting in a ValidationException.

\n
\n

You can specify the public key as binary data in a file using fileb (fileb://) or\n in-line using a Base64 encoded string.

", + "smithy.api#required": {} + } + }, + "GrantTokens": { + "target": "com.amazonaws.kms#GrantTokenList", + "traits": { + "smithy.api#documentation": "

A list of grant tokens.

\n

Use a grant token when your permission to call this operation comes from a new grant that has not yet achieved eventual consistency. For more information, see Grant token and Using a grant token in the\n Key Management Service Developer Guide.

" + } + }, + "DryRun": { + "target": "com.amazonaws.kms#NullableBooleanType", + "traits": { + "smithy.api#documentation": "

Checks if your request will succeed. DryRun is an optional parameter.

\n

To learn more about how to use this parameter, see Testing your KMS API calls in the Key Management Service Developer Guide.

" + } + }, + "Recipient": { + "target": "com.amazonaws.kms#RecipientInfo", + "traits": { + "smithy.api#documentation": "

A signed attestation document from\n an Amazon Web Services Nitro enclave and the encryption algorithm to use with the enclave's public key. The\n only valid encryption algorithm is RSAES_OAEP_SHA_256.

\n

This parameter only supports attestation documents for Amazon Web Services Nitro Enclaves. To call \n DeriveSharedSecret for an Amazon Web Services Nitro Enclaves, use the Amazon Web Services Nitro Enclaves SDK to generate the attestation \n document and then use the Recipient parameter from any Amazon Web Services SDK to provide the attestation \n document for the enclave.

\n

When you use this parameter, instead of returning a plaintext copy of the shared secret,\n KMS encrypts the plaintext shared secret under the public key in the attestation\n document, and returns the resulting ciphertext in the CiphertextForRecipient\n field in the response. This ciphertext can be decrypted only with the private key in the\n enclave. The CiphertextBlob field in the response contains the encrypted shared \n secret derived from the KMS key specified by the KeyId parameter and public key\n specified by the PublicKey parameter. The SharedSecret field in \n the response is null or empty.

\n

For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.kms#DeriveSharedSecretResponse": { + "type": "structure", + "members": { + "KeyId": { + "target": "com.amazonaws.kms#KeyIdType", + "traits": { + "smithy.api#documentation": "

Identifies the KMS key used to derive the shared secret.

" + } + }, + "SharedSecret": { + "target": "com.amazonaws.kms#PlaintextType", + "traits": { + "smithy.api#documentation": "

The raw secret derived from the specified key agreement algorithm, private key in the\n asymmetric KMS key, and your peer's public key.

\n

If the response includes the CiphertextForRecipient field, the SharedSecret field is null or\n empty.

" + } + }, + "CiphertextForRecipient": { + "target": "com.amazonaws.kms#CiphertextType", + "traits": { + "smithy.api#documentation": "

The plaintext shared secret encrypted with the public key in the attestation document.

\n

This field is included in the response only when the Recipient parameter in\n the request includes a valid attestation document from an Amazon Web Services Nitro enclave.\n For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

" + } + }, + "KeyAgreementAlgorithm": { + "target": "com.amazonaws.kms#KeyAgreementAlgorithmSpec", + "traits": { + "smithy.api#documentation": "

Identifies the key agreement algorithm used to derive the shared secret.

" + } + }, + "KeyOrigin": { + "target": "com.amazonaws.kms#OriginType", + "traits": { + "smithy.api#documentation": "

The source of the key material for the specified KMS key.

\n

When this value is AWS_KMS, KMS created the key material. When this value is EXTERNAL, \n the key material was imported or the KMS key doesn't have any key material.

\n

The only valid values for DeriveSharedSecret are AWS_KMS and EXTERNAL. DeriveSharedSecret \n does not support KMS keys with a KeyOrigin value of AWS_CLOUDHSM or \n EXTERNAL_KEY_STORE.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.kms#DescribeCustomKeyStores": { "type": "operation", "input": { @@ -2782,7 +2931,7 @@ "Recipient": { "target": "com.amazonaws.kms#RecipientInfo", "traits": { - "smithy.api#documentation": "

A signed attestation document from\n an Amazon Web Services Nitro enclave and the encryption algorithm to use with the enclave's public key. The\n only valid encryption algorithm is RSAES_OAEP_SHA_256.

\n

This parameter only supports attestation documents for Amazon Web Services Nitro Enclaves. To include this\n parameter, use the Amazon Web Services Nitro Enclaves SDK or any Amazon Web Services SDK.

\n

When you use this parameter, instead of returning a plaintext copy of the private data\n key, KMS encrypts the plaintext private data key under the public key in the attestation\n document, and returns the resulting ciphertext in the CiphertextForRecipient\n field in the response. This ciphertext can be decrypted only with the private key in the\n enclave. The CiphertextBlob field in the response contains a copy of the private\n data key encrypted under the KMS key specified by the KeyId parameter. The\n PrivateKeyPlaintext field in the response is null or empty.

\n

For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

" + "smithy.api#documentation": "

A signed attestation document from\n an Amazon Web Services Nitro enclave and the encryption algorithm to use with the enclave's public key. The\n only valid encryption algorithm is RSAES_OAEP_SHA_256.

\n

This parameter only supports attestation documents for Amazon Web Services Nitro Enclaves. To call \n DeriveSharedSecret for an Amazon Web Services Nitro Enclaves, use the Amazon Web Services Nitro Enclaves SDK to generate the attestation \n document and then use the Recipient parameter from any Amazon Web Services SDK to provide the attestation \n document for the enclave.

\n

When you use this parameter, instead of returning a plaintext copy of the private data\n key, KMS encrypts the plaintext private data key under the public key in the attestation\n document, and returns the resulting ciphertext in the CiphertextForRecipient\n field in the response. This ciphertext can be decrypted only with the private key in the\n enclave. The CiphertextBlob field in the response contains a copy of the private\n data key encrypted under the KMS key specified by the KeyId parameter. The\n PrivateKeyPlaintext field in the response is null or empty.

\n

For information about the interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS in the Key Management Service Developer Guide.

" } }, "DryRun": { @@ -3686,7 +3835,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric\n KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey\n permission can download the public key of an asymmetric KMS key. You can share the public key\n to allow others to encrypt messages and verify signatures outside of KMS.\n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

\n

You do not need to download the public key. Instead, you can use the public key within\n KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the\n public key within KMS, you benefit from the authentication, authorization, and logging that\n are part of every KMS operation. You also reduce of risk of encrypting data that cannot be\n decrypted. These features are not effective outside of KMS.

\n

To help you use the public key safely outside of KMS, GetPublicKey returns\n important information about the public key in the response, including:

\n
    \n
  • \n

    \n KeySpec: The type of key material in the public key, such as\n RSA_4096 or ECC_NIST_P521.

    \n
  • \n
  • \n

    \n KeyUsage: Whether the key is used for encryption or signing.

    \n
  • \n
  • \n

    \n EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing\n algorithms for the key.

    \n
  • \n
\n

Although KMS cannot enforce these restrictions on external operations, it is crucial\n that you use this information to prevent the public key from being used improperly. For\n example, you can prevent a public signing key from being used encrypt data, or prevent a\n public key from being used with an encryption algorithm that is not supported by KMS. You\n can also avoid errors, such as using the wrong signing algorithm in a verification\n operation.

\n

To verify a signature outside of KMS with an SM2 public key (China Regions only), you\n must specify the distinguishing ID. By default, KMS uses 1234567812345678 as\n the distinguishing ID. For more information, see Offline\n verification with SM2 key pairs.

\n

The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

\n

\n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

\n

\n Required permissions: kms:GetPublicKey (key policy)

\n

\n Related operations: CreateKey\n

\n

\n Eventual consistency: The KMS API follows an eventual consistency model. \n For more information, see KMS eventual consistency.

", + "smithy.api#documentation": "

Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric\n KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey\n permission can download the public key of an asymmetric KMS key. You can share the public key\n to allow others to encrypt messages and verify signatures outside of KMS.\n For information about asymmetric KMS keys, see Asymmetric KMS keys in the Key Management Service Developer Guide.

\n

You do not need to download the public key. Instead, you can use the public key within\n KMS by calling the Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric KMS key. When you use the\n public key within KMS, you benefit from the authentication, authorization, and logging that\n are part of every KMS operation. You also reduce of risk of encrypting data that cannot be\n decrypted. These features are not effective outside of KMS.

\n

To help you use the public key safely outside of KMS, GetPublicKey returns\n important information about the public key in the response, including:

\n
    \n
  • \n

    \n KeySpec: The type of key material in the public key, such as\n RSA_4096 or ECC_NIST_P521.

    \n
  • \n
  • \n

    \n KeyUsage: Whether the key is used for encryption, signing, or \n deriving a shared secret.

    \n
  • \n
  • \n

    \n EncryptionAlgorithms or SigningAlgorithms: A list of the encryption algorithms or the signing\n algorithms for the key.

    \n
  • \n
\n

Although KMS cannot enforce these restrictions on external operations, it is crucial\n that you use this information to prevent the public key from being used improperly. For\n example, you can prevent a public signing key from being used encrypt data, or prevent a\n public key from being used with an encryption algorithm that is not supported by KMS. You\n can also avoid errors, such as using the wrong signing algorithm in a verification\n operation.

\n

To verify a signature outside of KMS with an SM2 public key (China Regions only), you\n must specify the distinguishing ID. By default, KMS uses 1234567812345678 as\n the distinguishing ID. For more information, see Offline\n verification with SM2 key pairs.

\n

The KMS key that you use for this operation must be in a compatible key state. For\ndetails, see Key states of KMS keys in the Key Management Service Developer Guide.

\n

\n Cross-account use: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify\n the key ARN or alias ARN in the value of the KeyId parameter.

\n

\n Required permissions: kms:GetPublicKey (key policy)

\n

\n Related operations: CreateKey\n

\n

\n Eventual consistency: The KMS API follows an eventual consistency model. \n For more information, see KMS eventual consistency.

", "smithy.api#examples": [ { "title": "To download the public key of an asymmetric KMS key", @@ -3762,7 +3911,7 @@ "KeyUsage": { "target": "com.amazonaws.kms#KeyUsageType", "traits": { - "smithy.api#documentation": "

The permitted use of the public key. Valid values are ENCRYPT_DECRYPT or\n SIGN_VERIFY.

\n

This information is critical. If a public key with SIGN_VERIFY key usage\n encrypts data outside of KMS, the ciphertext cannot be decrypted.

" + "smithy.api#documentation": "

The permitted use of the public key. Valid values for asymmetric key pairs are ENCRYPT_DECRYPT,\n SIGN_VERIFY, and KEY_AGREEMENT.

\n

This information is critical. For example, if a public key with SIGN_VERIFY key usage\n encrypts data outside of KMS, the ciphertext cannot be decrypted.

" } }, "EncryptionAlgorithms": { @@ -3776,6 +3925,12 @@ "traits": { "smithy.api#documentation": "

The signing algorithms that KMS supports for this key.

\n

This field appears in the response only when the KeyUsage of the public key\n is SIGN_VERIFY.

" } + }, + "KeyAgreementAlgorithms": { + "target": "com.amazonaws.kms#KeyAgreementAlgorithmSpecList", + "traits": { + "smithy.api#documentation": "

The key agreement algorithm used to derive a shared secret. This field is present only when the KMS key has a KeyUsage value of KEY_AGREEMENT.

" + } } }, "traits": { @@ -3987,6 +4142,12 @@ "traits": { "smithy.api#enumValue": "VerifyMac" } + }, + "DeriveSharedSecret": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DeriveSharedSecret" + } } } }, @@ -4286,7 +4447,7 @@ "code": "InvalidKeyUsage", "httpResponseCode": 400 }, - "smithy.api#documentation": "

The request was rejected for one of the following reasons:

\n
    \n
  • \n

    The KeyUsage value of the KMS key is incompatible with the API\n operation.

    \n
  • \n
  • \n

    The encryption algorithm or signing algorithm specified for the operation is\n incompatible with the type of key material in the KMS key (KeySpec).

    \n
  • \n
\n

For encrypting, decrypting, re-encrypting, and generating data keys, the\n KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying\n messages, the KeyUsage must be SIGN_VERIFY. For generating and\n verifying message authentication codes (MACs), the KeyUsage must be\n GENERATE_VERIFY_MAC. To find the KeyUsage of a KMS key, use the\n DescribeKey operation.

\n

To find the encryption or signing algorithms supported for a particular KMS key, use the\n DescribeKey operation.

", + "smithy.api#documentation": "

The request was rejected for one of the following reasons:

\n
    \n
  • \n

    The KeyUsage value of the KMS key is incompatible with the API\n operation.

    \n
  • \n
  • \n

    The encryption algorithm or signing algorithm specified for the operation is\n incompatible with the type of key material in the KMS key (KeySpec).

    \n
  • \n
\n

For encrypting, decrypting, re-encrypting, and generating data keys, the\n KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying\n messages, the KeyUsage must be SIGN_VERIFY. For generating and\n verifying message authentication codes (MACs), the KeyUsage must be\n GENERATE_VERIFY_MAC. For deriving key agreement secrets, the \n KeyUsage must be KEY_AGREEMENT. To find the KeyUsage of a KMS key, use the\n DescribeKey operation.

\n

To find the encryption or signing algorithms supported for a particular KMS key, use the\n DescribeKey operation.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -4376,6 +4537,23 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.kms#KeyAgreementAlgorithmSpec": { + "type": "enum", + "members": { + "ECDH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ECDH" + } + } + } + }, + "com.amazonaws.kms#KeyAgreementAlgorithmSpecList": { + "type": "list", + "member": { + "target": "com.amazonaws.kms#KeyAgreementAlgorithmSpec" + } + }, "com.amazonaws.kms#KeyEncryptionMechanism": { "type": "enum", "members": { @@ -4561,6 +4739,12 @@ "smithy.api#documentation": "

The signing algorithms that the KMS key supports. You cannot use the KMS key with other\n signing algorithms within KMS.

\n

This field appears only when the KeyUsage of the KMS key is\n SIGN_VERIFY.

" } }, + "KeyAgreementAlgorithms": { + "target": "com.amazonaws.kms#KeyAgreementAlgorithmSpecList", + "traits": { + "smithy.api#documentation": "

The key agreement algorithm used to derive a shared secret.

" + } + }, "MultiRegion": { "target": "com.amazonaws.kms#NullableBooleanType", "traits": { @@ -4779,6 +4963,12 @@ "traits": { "smithy.api#enumValue": "GENERATE_VERIFY_MAC" } + }, + "KEY_AGREEMENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "KEY_AGREEMENT" + } } } }, @@ -7009,6 +7199,9 @@ { "target": "com.amazonaws.kms#DeleteImportedKeyMaterial" }, + { + "target": "com.amazonaws.kms#DeriveSharedSecret" + }, { "target": "com.amazonaws.kms#DescribeCustomKeyStores" }, @@ -9166,6 +9359,12 @@ "traits": { "smithy.api#enumValue": "RSA_4096" } + }, + "SM2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SM2" + } } } }, diff --git a/models/lakeformation.json b/models/lakeformation.json index 00919566a0..0b9715a0a5 100644 --- a/models/lakeformation.json +++ b/models/lakeformation.json @@ -96,6 +96,9 @@ { "target": "com.amazonaws.lakeformation#GetDataCellsFilter" }, + { + "target": "com.amazonaws.lakeformation#GetDataLakePrincipal" + }, { "target": "com.amazonaws.lakeformation#GetDataLakeSettings" }, @@ -3593,6 +3596,55 @@ "smithy.api#output": {} } }, + "com.amazonaws.lakeformation#GetDataLakePrincipal": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#GetDataLakePrincipalRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#GetDataLakePrincipalResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the identity of the invoking principal.

", + "smithy.api#http": { + "method": "POST", + "uri": "/GetDataLakePrincipal", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#GetDataLakePrincipalRequest": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.lakeformation#GetDataLakePrincipalResponse": { + "type": "structure", + "members": { + "Identity": { + "target": "com.amazonaws.lakeformation#IdentityString", + "traits": { + "smithy.api#documentation": "

A unique identifier of the invoking principal.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.lakeformation#GetDataLakeSettings": { "type": "operation", "input": { @@ -4775,6 +4827,9 @@ "com.amazonaws.lakeformation#IdentityCenterInstanceArn": { "type": "string" }, + "com.amazonaws.lakeformation#IdentityString": { + "type": "string" + }, "com.amazonaws.lakeformation#InternalServiceException": { "type": "structure", "members": { diff --git a/models/launch-wizard.json b/models/launch-wizard.json index 30b6bc0437..786835f03b 100644 --- a/models/launch-wizard.json +++ b/models/launch-wizard.json @@ -1,6 +1,12 @@ { "smithy": "2.0", "shapes": { + "com.amazonaws.launchwizard#AllowedValues": { + "type": "list", + "member": { + "target": "com.amazonaws.launchwizard#ValueString" + } + }, "com.amazonaws.launchwizard#CreateDeployment": { "type": "operation", "input": { @@ -49,6 +55,34 @@ "output": { "deploymentId": "4c1b59c1-659c-467f-b6e9-6ef6f9d28e1d" } + }, + { + "title": "Deploy a given workload with given settings and passing tags for Launch Wizard deployment resource.", + "input": { + "workloadName": "SAP", + "deploymentPatternName": "SapHanaSingle", + "name": "SapHanaSingleForTest", + "dryRun": false, + "specifications": { + "applicationName": "SapHanaSingleForTest", + "deploymentScenario": "SapHanaSingle", + "DisableDeploymentRollback": "true", + "saveDeploymentArtifacts": "Yes", + "saveArtifactsS3Uri": "s3://testbucket", + "KeyName": "testLinuxInstance", + "VPCID": "vpc-1234567", + "environmentType": "production", + "SAPTZ": "America/Vancouver", + "Encryption": "Yes" + }, + "tags": { + "key1": "val1", + "key2": "val2" + } + }, + "output": { + "deploymentId": "1111111-1111-1111-1111-111111111111" + } } ], "smithy.api#http": { @@ -64,7 +98,7 @@ "workloadName": { "target": "com.amazonaws.launchwizard#WorkloadName", "traits": { - "smithy.api#documentation": "

The name of the workload. You can use the \n ListWorkloadDeploymentPatterns\n operation to discover supported\n values for this parameter.

", + "smithy.api#documentation": "

The name of the workload. You can use the \n ListWorkloads\n operation to discover supported\n values for this parameter.

", "smithy.api#required": {} } }, @@ -85,7 +119,7 @@ "specifications": { "target": "com.amazonaws.launchwizard#DeploymentSpecifications", "traits": { - "smithy.api#documentation": "

The settings specified for the deployment. For more information on the specifications\n required for creating a deployment, see Workload specifications.

", + "smithy.api#documentation": "

The settings specified for the deployment. These settings define how to deploy and configure your\n resources created by the deployment. For more information about the specifications\n required for creating a deployment for a SAP workload, see SAP deployment\n specifications. To retrieve the specifications required to create a deployment for other workloads,\n use the \n GetWorkloadDeploymentPattern\n operation.

", "smithy.api#required": {} } }, @@ -95,6 +129,12 @@ "smithy.api#default": false, "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making\n the request, and provides an error response. If you have the required permissions, the\n error response is DryRunOperation. Otherwise, it is\n UnauthorizedOperation.

" } + }, + "tags": { + "target": "com.amazonaws.launchwizard#Tags", + "traits": { + "smithy.api#documentation": "

The tags to add to the deployment.

" + } } }, "traits": { @@ -127,6 +167,9 @@ { "target": "com.amazonaws.launchwizard#InternalServerException" }, + { + "target": "com.amazonaws.launchwizard#ResourceLimitException" + }, { "target": "com.amazonaws.launchwizard#ResourceNotFoundException" }, @@ -216,6 +259,32 @@ } ] }, + "com.amazonaws.launchwizard#DeploymentConditionalField": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the deployment condition.

" + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The value of the condition.

" + } + }, + "comparator": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The comparator of the condition.

\n

Valid values: Equal | NotEqual\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A field that details a condition of the specifications for a deployment.

" + } + }, "com.amazonaws.launchwizard#DeploymentData": { "type": "structure", "members": { @@ -258,7 +327,7 @@ "specifications": { "target": "com.amazonaws.launchwizard#DeploymentSpecifications", "traits": { - "smithy.api#documentation": "

The specifications of the deployment. For more information on specifications for each\n deployment, see Workload specifications.

" + "smithy.api#documentation": "

The settings specified for the deployment. These settings define how to deploy and configure your\n resources created by the deployment. For more information about the specifications\n required for creating a deployment for a SAP workload, see SAP deployment\n specifications. To retrieve the specifications required to create a deployment for other workloads,\n use the \n GetWorkloadDeploymentPattern\n operation.

" } }, "resourceGroup": { @@ -272,6 +341,18 @@ "traits": { "smithy.api#documentation": "

The time the deployment was deleted.

" } + }, + "tags": { + "target": "com.amazonaws.launchwizard#Tags", + "traits": { + "smithy.api#documentation": "

Information about the tags attached to a deployment.

" + } + }, + "deploymentArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the deployment.

" + } } }, "traits": { @@ -461,9 +542,9 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 25 + "max": 50 }, - "smithy.api#pattern": "^[A-Za-z0-9_\\s\\.-]+$" + "smithy.api#pattern": "^[A-Za-z0-9_\\.-]+$" } }, "com.amazonaws.launchwizard#DeploymentPatternName": { @@ -473,7 +554,7 @@ "min": 1, "max": 256 }, - "smithy.api#pattern": "^[a-zA-Z0-9-]+$" + "smithy.api#pattern": "^[A-Za-z0-9][a-zA-Z0-9-]*$" } }, "com.amazonaws.launchwizard#DeploymentSpecifications": { @@ -492,6 +573,56 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.launchwizard#DeploymentSpecificationsData": { + "type": "list", + "member": { + "target": "com.amazonaws.launchwizard#DeploymentSpecificationsField" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.launchwizard#DeploymentSpecificationsField": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the deployment specification.

" + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the deployment specification.

" + } + }, + "allowedValues": { + "target": "com.amazonaws.launchwizard#AllowedValues", + "traits": { + "smithy.api#documentation": "

The allowed values of the deployment specification.

" + } + }, + "required": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Indicates if the deployment specification is required.

" + } + }, + "conditionals": { + "target": "com.amazonaws.launchwizard#SpecificationsConditionalData", + "traits": { + "smithy.api#documentation": "

The conditionals used for the deployment specification.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A field that details a specification of a deployment pattern.

" + } + }, "com.amazonaws.launchwizard#DeploymentStatus": { "type": "enum", "members": { @@ -638,12 +769,12 @@ { "title": "Get details about a given deployment.", "input": { - "deploymentId": "4c1b59c1-659c-467f-b6e9-6ef6f9d28e1d" + "deploymentId": "1111111-1111-1111-1111-111111111111" }, "output": { "deployment": { "name": "SapHanaSingleForTest", - "id": "4c1b59c1-659c-467f-b6e9-6ef6f9d28e1d", + "id": "1111111-1111-1111-1111-111111111111", "workloadName": "SapHanaSingle", "status": "FAILED", "createdAt": "2023-04-24T13:10:09.857Z", @@ -658,6 +789,11 @@ "environmentType": "production", "SAPTZ": "America/Vancouver", "Encryption": "Yes" + }, + "deploymentArn": "arn:aws:launchwizard:us-east-1:123456789012:deployment/1111111-1111-1111-1111-111111111111", + "tags": { + "key1": "val1", + "key2": "val2" } } } @@ -747,6 +883,107 @@ "smithy.api#readonly": {} } }, + "com.amazonaws.launchwizard#GetWorkloadDeploymentPattern": { + "type": "operation", + "input": { + "target": "com.amazonaws.launchwizard#GetWorkloadDeploymentPatternInput" + }, + "output": { + "target": "com.amazonaws.launchwizard#GetWorkloadDeploymentPatternOutput" + }, + "errors": [ + { + "target": "com.amazonaws.launchwizard#InternalServerException" + }, + { + "target": "com.amazonaws.launchwizard#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.launchwizard#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns details for a given workload and deployment pattern, including the available\n specifications. You can use the ListWorkloads\n operation to discover the available workload names and the ListWorkloadDeploymentPatterns operation to discover the available deployment\n pattern names of a given workload.

", + "smithy.api#examples": [ + { + "title": "Get details about a specific Workload deployment pattern", + "input": { + "workloadName": "MicrosoftActiveDirectory", + "deploymentPatternName": "adSelfManagedNewVpc" + }, + "output": { + "workloadDeploymentPattern": { + "workloadName": "MicrosoftActiveDirectory", + "deploymentPatternName": "adSelfManagedNewVpc", + "workloadVersionName": "2024-03-19-14-00-09", + "displayName": "Self-managed AD - new VPC", + "description": "Builds a new AWS environment (VPC and other components), and deploys AD DS into this new VPC.", + "status": "ACTIVE", + "specifications": [ + { + "name": "NumberOfAZs", + "description": "Number of Availability Zones to use in the VPC.", + "required": "Yes" + }, + { + "name": "AvailabilityZones", + "description": "List of Availability Zones (AZs) to use for the subnets in the VPC.", + "required": "Yes" + }, + { + "name": "VPCCIDR", + "description": "CIDR block for the VPC.", + "required": "Yes" + } + ] + } + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/getWorkloadDeploymentPattern" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.launchwizard#GetWorkloadDeploymentPatternInput": { + "type": "structure", + "members": { + "workloadName": { + "target": "com.amazonaws.launchwizard#WorkloadName", + "traits": { + "smithy.api#documentation": "

The name of the workload.

", + "smithy.api#required": {} + } + }, + "deploymentPatternName": { + "target": "com.amazonaws.launchwizard#DeploymentPatternName", + "traits": { + "smithy.api#documentation": "

The name of the deployment pattern.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.launchwizard#GetWorkloadDeploymentPatternOutput": { + "type": "structure", + "members": { + "workloadDeploymentPattern": { + "target": "com.amazonaws.launchwizard#WorkloadDeploymentPatternData", + "traits": { + "smithy.api#documentation": "

Details about the workload deployment pattern.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.launchwizard#GetWorkloadInput": { "type": "structure", "members": { @@ -802,6 +1039,17 @@ "com.amazonaws.launchwizard#LaunchWizard": { "type": "service", "version": "2018-05-10", + "operations": [ + { + "target": "com.amazonaws.launchwizard#ListTagsForResource" + }, + { + "target": "com.amazonaws.launchwizard#TagResource" + }, + { + "target": "com.amazonaws.launchwizard#UntagResource" + } + ], "resources": [ { "target": "com.amazonaws.launchwizard#Deployment" @@ -1488,17 +1736,6 @@ "expect": { "error": "Invalid Configuration: Missing Region" } - }, - { - "documentation": "Partition doesn't support DualStack", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } } ], "version": "1.0" @@ -1679,7 +1916,7 @@ "filters": { "target": "com.amazonaws.launchwizard#DeploymentFilterList", "traits": { - "smithy.api#documentation": "

Filters to scope the results. The following filters are supported:

\n
    \n
  • \n

    \n WORKLOAD_NAME\n

    \n
  • \n
  • \n

    \n DEPLOYMENT_STATUS\n

    \n
  • \n
" + "smithy.api#documentation": "

Filters to scope the results. The following filters are supported:

\n
    \n
  • \n

    \n WORKLOAD_NAME - The name used in deployments.

    \n
  • \n
  • \n

    \n DEPLOYMENT_STATUS - COMPLETED | CREATING |\n DELETE_IN_PROGRESS | DELETE_INITIATING |\n DELETE_FAILED | DELETED | FAILED |\n IN_PROGRESS | VALIDATING\n

    \n
  • \n
" } }, "maxResults": { @@ -1720,6 +1957,78 @@ "smithy.api#output": {} } }, + "com.amazonaws.launchwizard#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.launchwizard#ListTagsForResourceInput" + }, + "output": { + "target": "com.amazonaws.launchwizard#ListTagsForResourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.launchwizard#InternalServerException" + }, + { + "target": "com.amazonaws.launchwizard#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.launchwizard#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the tags associated with a specified resource.

", + "smithy.api#examples": [ + { + "title": "Listing tags on a Launch Wizard deployment resource.", + "input": { + "resourceArn": "arn:aws:launchwizard:us-east-1:123456789012:deployment/11111111-1111-1111-1111-111111111111" + }, + "output": { + "tags": { + "key1": "value1", + "key2": "value2" + } + } + } + ], + "smithy.api#http": { + "method": "GET", + "uri": "/tags/{resourceArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.launchwizard#ListTagsForResourceInput": { + "type": "structure", + "members": { + "resourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.launchwizard#ListTagsForResourceOutput": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.launchwizard#Tags", + "traits": { + "smithy.api#documentation": "

Information about the tags.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.launchwizard#ListWorkloadDeploymentPatterns": { "type": "operation", "input": { @@ -1740,7 +2049,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the workload deployment patterns.

", + "smithy.api#documentation": "

Lists the workload deployment patterns for a given workload name. You can use the ListWorkloads operation to discover the available workload names.

", "smithy.api#examples": [ { "title": "List all available workloads supported by AWS Launch Wizard.", @@ -1848,7 +2157,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the workloads.

", + "smithy.api#documentation": "

Lists the available workload names. You can use the ListWorkloadDeploymentPatterns operation to discover the available deployment patterns for a given workload.

", "smithy.api#examples": [ { "title": "List all available workloads supported by AWS Launch Wizard.", @@ -2007,6 +2316,206 @@ "smithy.api#pattern": "^[a-zA-Z0-9-]{2,50}$" } }, + "com.amazonaws.launchwizard#SpecificationsConditionalData": { + "type": "list", + "member": { + "target": "com.amazonaws.launchwizard#DeploymentConditionalField" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, + "com.amazonaws.launchwizard#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^(?!aws:)[a-zA-Z+-=._:/]+$" + } + }, + "com.amazonaws.launchwizard#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.launchwizard#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + } + } + }, + "com.amazonaws.launchwizard#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.launchwizard#TagResourceInput" + }, + "output": { + "target": "com.amazonaws.launchwizard#TagResourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.launchwizard#InternalServerException" + }, + { + "target": "com.amazonaws.launchwizard#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.launchwizard#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Adds the specified tags to the given resource.

", + "smithy.api#examples": [ + { + "title": "Adding tags to a Launch Wizard deployment resource.", + "input": { + "resourceArn": "arn:aws:launchwizard:us-east-1:123456789012:deployment/11111111-1111-1111-1111-111111111111", + "tags": { + "key1": "value1", + "key2": "value2" + } + }, + "output": {} + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/tags/{resourceArn}" + } + } + }, + "com.amazonaws.launchwizard#TagResourceInput": { + "type": "structure", + "members": { + "resourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.launchwizard#Tags", + "traits": { + "smithy.api#documentation": "

One or more tags to attach to the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.launchwizard#TagResourceOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.launchwizard#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, + "com.amazonaws.launchwizard#Tags": { + "type": "map", + "key": { + "target": "com.amazonaws.launchwizard#TagKey" + }, + "value": { + "target": "com.amazonaws.launchwizard#TagValue" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + } + } + }, + "com.amazonaws.launchwizard#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.launchwizard#UntagResourceInput" + }, + "output": { + "target": "com.amazonaws.launchwizard#UntagResourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.launchwizard#InternalServerException" + }, + { + "target": "com.amazonaws.launchwizard#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.launchwizard#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Removes the specified tags from the given resource.

", + "smithy.api#examples": [ + { + "title": "Removing tags on a Launch Wizard deployment resource.", + "input": { + "resourceArn": "arn:aws:launchwizard:us-east-1:123456789012:deployment/11111111-1111-1111-1111-111111111111", + "tagKeys": [ + "key1", + "key2" + ] + }, + "output": {} + } + ], + "smithy.api#http": { + "method": "DELETE", + "uri": "/tags/{resourceArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.launchwizard#UntagResourceInput": { + "type": "structure", + "members": { + "resourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.launchwizard#TagKeyList", + "traits": { + "smithy.api#documentation": "

Keys identifying the tags to remove.

", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.launchwizard#UntagResourceOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.launchwizard#ValidationException": { "type": "structure", "members": { @@ -2134,10 +2643,69 @@ "target": "com.amazonaws.launchwizard#DeploymentPatternName" } }, + "read": { + "target": "com.amazonaws.launchwizard#GetWorkloadDeploymentPattern" + }, "list": { "target": "com.amazonaws.launchwizard#ListWorkloadDeploymentPatterns" } }, + "com.amazonaws.launchwizard#WorkloadDeploymentPatternData": { + "type": "structure", + "members": { + "workloadName": { + "target": "com.amazonaws.launchwizard#WorkloadName", + "traits": { + "smithy.api#documentation": "

The workload name of the deployment pattern.

" + } + }, + "deploymentPatternName": { + "target": "com.amazonaws.launchwizard#DeploymentPatternName", + "traits": { + "smithy.api#documentation": "

The name of the deployment pattern.

" + } + }, + "workloadVersionName": { + "target": "com.amazonaws.launchwizard#WorkloadVersionName", + "traits": { + "smithy.api#documentation": "

The workload version name of the deployment pattern.

" + } + }, + "displayName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The display name of the deployment pattern.

" + } + }, + "description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The description of the deployment pattern.

" + } + }, + "status": { + "target": "com.amazonaws.launchwizard#WorkloadDeploymentPatternStatus", + "traits": { + "smithy.api#documentation": "

The status of the deployment pattern.

" + } + }, + "statusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status message of the deployment pattern.

" + } + }, + "specifications": { + "target": "com.amazonaws.launchwizard#DeploymentSpecificationsData", + "traits": { + "smithy.api#documentation": "

The settings specified for the deployment. These settings define how to deploy and configure your\n resources created by the deployment. For more information about the specifications\n required for creating a deployment for a SAP workload, see SAP deployment\n specifications. To retrieve the specifications required to create a deployment for other workloads,\n use the \n GetWorkloadDeploymentPattern\n operation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The data that details a workload deployment pattern.

" + } + }, "com.amazonaws.launchwizard#WorkloadDeploymentPatternDataSummary": { "type": "structure", "members": { @@ -2228,9 +2796,9 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 256 + "max": 100 }, - "smithy.api#pattern": "^[a-zA-Z0-9-]+$" + "smithy.api#pattern": "^[A-Za-z][a-zA-Z0-9-_]*$" } }, "com.amazonaws.launchwizard#WorkloadStatus": { @@ -2269,7 +2837,7 @@ "min": 5, "max": 30 }, - "smithy.api#pattern": "^[a-zA-Z0-9-]+$" + "smithy.api#pattern": "^[A-Za-z0-9][a-zA-Z0-9-]*$" } } } diff --git a/models/lightsail.json b/models/lightsail.json index 3853d51862..1b3ca119a5 100644 --- a/models/lightsail.json +++ b/models/lightsail.json @@ -1229,7 +1229,7 @@ "blueprintId": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The ID for the virtual private server image (app_wordpress_4_4 or\n app_lamp_7_0).

" + "smithy.api#documentation": "

The ID for the virtual private server image (app_wordpress_x_x or\n app_lamp_x_x).

" } }, "name": { @@ -1271,7 +1271,7 @@ "version": { "target": "com.amazonaws.lightsail#string", "traits": { - "smithy.api#documentation": "

The version number of the operating system, application, or stack (\n 2016.03.0).

" + "smithy.api#documentation": "

The version number of the operating system, application, or stack (\n 2016.03.0).

" } }, "versionCode": { @@ -1600,13 +1600,13 @@ "bundleId": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The bundle ID (micro_1_0).

" + "smithy.api#documentation": "

The bundle ID (micro_x_x).

" } }, "instanceType": { "target": "com.amazonaws.lightsail#string", "traits": { - "smithy.api#documentation": "

The Amazon EC2 instance type (t2.micro).

" + "smithy.api#documentation": "

The instance type (micro).

" } }, "isActive": { @@ -1624,7 +1624,7 @@ "power": { "target": "com.amazonaws.lightsail#integer", "traits": { - "smithy.api#documentation": "

A numeric value that represents the power of the bundle (500). You can\n use the bundle's power value in conjunction with a blueprint's minimum power value to\n determine whether the blueprint will run on the bundle. For example, you need a bundle with a\n power value of 500 or more to create an instance that uses a blueprint with a minimum power\n value of 500.

" + "smithy.api#documentation": "

A numeric value that represents the power of the bundle (500). You can use\n the bundle's power value in conjunction with a blueprint's minimum power value to determine\n whether the blueprint will run on the bundle. For example, you need a bundle with a power\n value of 500 or more to create an instance that uses a blueprint with a minimum power value of\n 500.

" } }, "ramSizeInGb": { @@ -1654,7 +1654,7 @@ "publicIpv4AddressCount": { "target": "com.amazonaws.lightsail#integer", "traits": { - "smithy.api#documentation": "

An integer that indicates the public ipv4 address count included in the bundle, the value is either 0 or 1.

" + "smithy.api#documentation": "

An integer that indicates the public ipv4 address count included in the bundle, the value\n is either 0 or 1.

" } } }, @@ -1800,7 +1800,7 @@ "subjectAlternativeNames": { "target": "com.amazonaws.lightsail#SubjectAlternativeNameList", "traits": { - "smithy.api#documentation": "

An array of strings that specify the alternate domains (example2.com)\n and subdomains (blog.example.com) of the certificate.

" + "smithy.api#documentation": "

An array of strings that specify the alternate domains (example2.com) and\n subdomains (blog.example.com) of the certificate.

" } }, "domainValidationRecords": { @@ -3524,7 +3524,7 @@ "subjectAlternativeNames": { "target": "com.amazonaws.lightsail#SubjectAlternativeNameList", "traits": { - "smithy.api#documentation": "

An array of strings that specify the alternate domains (example2.com)\n and subdomains (blog.example.com) for the certificate.

\n

You can specify a maximum of nine alternate domains (in addition to the primary domain\n name).

\n

Wildcard domain entries (*.example.com) are not supported.

" + "smithy.api#documentation": "

An array of strings that specify the alternate domains (example2.com) and\n subdomains (blog.example.com) for the certificate.

\n

You can specify a maximum of nine alternate domains (in addition to the primary domain\n name).

\n

Wildcard domain entries (*.example.com) are not supported.

" } }, "tags": { @@ -4004,7 +4004,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a block storage disk from a manual or automatic snapshot of a disk. The resulting\n disk can be attached to an Amazon Lightsail instance in the same Availability Zone (us-east-2a).

\n

The create disk from snapshot operation supports tag-based access control via\n request tags and resource tags applied to the resource identified by disk snapshot\n name. For more information, see the Amazon Lightsail Developer Guide.

", + "smithy.api#documentation": "

Creates a block storage disk from a manual or automatic snapshot of a disk. The resulting\n disk can be attached to an Amazon Lightsail instance in the same Availability Zone\n (us-east-2a).

\n

The create disk from snapshot operation supports tag-based access control via\n request tags and resource tags applied to the resource identified by disk snapshot\n name. For more information, see the Amazon Lightsail Developer Guide.

", "smithy.api#http": { "method": "POST", "uri": "/ls/api/2016-11-28/CreateDiskFromSnapshot", @@ -4025,13 +4025,13 @@ "diskSnapshotName": { "target": "com.amazonaws.lightsail#ResourceName", "traits": { - "smithy.api#documentation": "

The name of the disk snapshot (my-snapshot) from which to create the\n new storage disk.

\n

Constraint:

\n
    \n
  • \n

    This parameter cannot be defined together with the source disk name\n parameter. The disk snapshot name and source disk name\n parameters are mutually exclusive.

    \n
  • \n
" + "smithy.api#documentation": "

The name of the disk snapshot (my-snapshot) from which to create the new\n storage disk.

\n

Constraint:

\n
    \n
  • \n

    This parameter cannot be defined together with the source disk name\n parameter. The disk snapshot name and source disk name\n parameters are mutually exclusive.

    \n
  • \n
" } }, "availabilityZone": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The Availability Zone where you want to create the disk (us-east-2a).\n Choose the same Availability Zone as the Lightsail instance where you want to create the\n disk.

\n

Use the GetRegions operation to list the Availability Zones where Lightsail is currently\n available.

", + "smithy.api#documentation": "

The Availability Zone where you want to create the disk (us-east-2a). Choose\n the same Availability Zone as the Lightsail instance where you want to create the\n disk.

\n

Use the GetRegions operation to list the Availability Zones where Lightsail is currently\n available.

", "smithy.api#required": {} } }, @@ -4104,7 +4104,7 @@ "availabilityZone": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The Availability Zone where you want to create the disk (us-east-2a).\n Use the same Availability Zone as the Lightsail instance to which you want to attach the\n disk.

\n

Use the get regions operation to list the Availability Zones where\n Lightsail is currently available.

", + "smithy.api#documentation": "

The Availability Zone where you want to create the disk (us-east-2a). Use the\n same Availability Zone as the Lightsail instance to which you want to attach the\n disk.

\n

Use the get regions operation to list the Availability Zones where\n Lightsail is currently available.

", "smithy.api#required": {} } }, @@ -4198,14 +4198,14 @@ "diskSnapshotName": { "target": "com.amazonaws.lightsail#ResourceName", "traits": { - "smithy.api#documentation": "

The name of the destination disk snapshot (my-disk-snapshot) based on\n the source disk.

", + "smithy.api#documentation": "

The name of the destination disk snapshot (my-disk-snapshot) based on the\n source disk.

", "smithy.api#required": {} } }, "instanceName": { "target": "com.amazonaws.lightsail#ResourceName", "traits": { - "smithy.api#documentation": "

The unique name of the source instance (Amazon_Linux-512MB-Virginia-1).\n When this is defined, a snapshot of the instance's system volume is created.

\n \n

This parameter cannot be defined together with the disk name parameter. The\n instance name and disk name parameters are mutually\n exclusive.

\n
" + "smithy.api#documentation": "

The unique name of the source instance (Amazon_Linux-512MB-Virginia-1). When\n this is defined, a snapshot of the instance's system volume is created.

\n \n

This parameter cannot be defined together with the disk name parameter. The\n instance name and disk name parameters are mutually\n exclusive.

\n
" } }, "tags": { @@ -4328,7 +4328,7 @@ "certificateName": { "target": "com.amazonaws.lightsail#ResourceName", "traits": { - "smithy.api#documentation": "

The name of the SSL/TLS certificate that you want to attach to the distribution.

\n

Use the GetCertificates action to get a list of certificate names that you can specify.

" + "smithy.api#documentation": "

The name of the SSL/TLS certificate that you want to attach to the distribution.

\n

Use the GetCertificates\n action to get a list of certificate names that you can specify.

" } }, "viewerMinimumTlsProtocolVersion": { @@ -4794,7 +4794,7 @@ "bundleId": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The bundle of specification information for your virtual private server (or\n instance), including the pricing plan (micro_1_0).

", + "smithy.api#documentation": "

The bundle of specification information for your virtual private server (or\n instance), including the pricing plan (micro_x_x).

", "smithy.api#required": {} } }, @@ -4825,7 +4825,7 @@ "ipAddressType": { "target": "com.amazonaws.lightsail#IpAddressType", "traits": { - "smithy.api#documentation": "

The IP address type for the instance.

\n

The possible values are ipv4 for IPv4 only, and dualstack for\n IPv4 and IPv6.

\n

The default value is dualstack.

" + "smithy.api#documentation": "

The IP address type for the instance.

\n

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only,\n and dualstack for IPv4 and IPv6.

\n

The default value is dualstack.

" } }, "sourceInstanceName": { @@ -4892,14 +4892,14 @@ "blueprintId": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The ID for a virtual private server image (app_wordpress_4_4 or\n app_lamp_7_0). Use the get blueprints operation to return a list\n of available images (or blueprints).

\n \n

Use active blueprints when creating new instances. Inactive blueprints are listed to\n support customers with existing instances and are not necessarily available to create new\n instances. Blueprints are marked inactive when they become outdated due to operating system\n updates or new application releases.

\n
", + "smithy.api#documentation": "

The ID for a virtual private server image (app_wordpress_x_x or\n app_lamp_x_x). Use the get blueprints operation to return a list\n of available images (or blueprints).

\n \n

Use active blueprints when creating new instances. Inactive blueprints are listed to\n support customers with existing instances and are not necessarily available to create new\n instances. Blueprints are marked inactive when they become outdated due to operating system\n updates or new application releases.

\n
", "smithy.api#required": {} } }, "bundleId": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The bundle of specification information for your virtual private server (or\n instance), including the pricing plan (micro_1_0).

", + "smithy.api#documentation": "

The bundle of specification information for your virtual private server (or\n instance), including the pricing plan (medium_x_x).

", "smithy.api#required": {} } }, @@ -4930,7 +4930,7 @@ "ipAddressType": { "target": "com.amazonaws.lightsail#IpAddressType", "traits": { - "smithy.api#documentation": "

The IP address type for the instance.

\n

The possible values are ipv4 for IPv4 only, and dualstack for\n IPv4 and IPv6.

\n

The default value is dualstack.

" + "smithy.api#documentation": "

The IP address type for the instance.

\n

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only,\n and dualstack for IPv4 and IPv6.

\n

The default value is dualstack.

" } } }, @@ -5118,13 +5118,13 @@ "certificateDomainName": { "target": "com.amazonaws.lightsail#DomainName", "traits": { - "smithy.api#documentation": "

The domain name with which your certificate is associated (example.com).

\n

If you specify certificateDomainName, then certificateName is\n required (and vice-versa).

" + "smithy.api#documentation": "

The domain name with which your certificate is associated\n (example.com).

\n

If you specify certificateDomainName, then certificateName is\n required (and vice-versa).

" } }, "certificateAlternativeNames": { "target": "com.amazonaws.lightsail#DomainNameList", "traits": { - "smithy.api#documentation": "

The optional alternative domains and subdomains to use with your SSL/TLS certificate\n (www.example.com, example.com, m.example.com,\n blog.example.com).

" + "smithy.api#documentation": "

The optional alternative domains and subdomains to use with your SSL/TLS certificate\n (www.example.com, example.com, m.example.com,\n blog.example.com).

" } }, "tags": { @@ -5136,7 +5136,7 @@ "ipAddressType": { "target": "com.amazonaws.lightsail#IpAddressType", "traits": { - "smithy.api#documentation": "

The IP address type for the load balancer.

\n

The possible values are ipv4 for IPv4 only, and dualstack for\n IPv4 and IPv6.

\n

The default value is dualstack.

" + "smithy.api#documentation": "

The IP address type for the load balancer.

\n

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only,\n and dualstack for IPv4 and IPv6.

\n

The default value is dualstack.

" } }, "tlsPolicyName": { @@ -5231,7 +5231,7 @@ "certificateAlternativeNames": { "target": "com.amazonaws.lightsail#DomainNameList", "traits": { - "smithy.api#documentation": "

An array of strings listing alternative domains and subdomains for your SSL/TLS\n certificate. Lightsail will de-dupe the names for you. You can have a maximum of 9\n alternative names (in addition to the 1 primary domain). We do not support wildcards (*.example.com).

" + "smithy.api#documentation": "

An array of strings listing alternative domains and subdomains for your SSL/TLS\n certificate. Lightsail will de-dupe the names for you. You can have a maximum of 9\n alternative names (in addition to the 1 primary domain). We do not support wildcards\n (*.example.com).

" } }, "tags": { @@ -7206,7 +7206,7 @@ "diskName": { "target": "com.amazonaws.lightsail#ResourceName", "traits": { - "smithy.api#documentation": "

The unique name of the disk you want to detach from your instance (my-disk).

", + "smithy.api#documentation": "

The unique name of the disk you want to detach from your instance\n (my-disk).

", "smithy.api#required": {} } } @@ -7963,7 +7963,7 @@ "arn": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the domain recordset (arn:aws:lightsail:global:123456789101:Domain/824cede0-abc7-4f84-8dbc-12345EXAMPLE).

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the domain recordset\n (arn:aws:lightsail:global:123456789101:Domain/824cede0-abc7-4f84-8dbc-12345EXAMPLE).

" } }, "supportCode": { @@ -8031,7 +8031,7 @@ "target": { "target": "com.amazonaws.lightsail#string", "traits": { - "smithy.api#documentation": "

The target IP address (192.0.2.0), or AWS name server (ns-111.awsdns-22.com.).

\n

For Lightsail load balancers, the value looks like\n ab1234c56789c6b86aba6fb203d443bc-123456789.us-east-2.elb.amazonaws.com. For\n Lightsail distributions, the value looks like exampled1182ne.cloudfront.net.\n For Lightsail container services, the value looks like\n container-service-1.example23scljs.us-west-2.cs.amazonlightsail.com. Be sure to\n also set isAlias to true when setting up an A record for a\n Lightsail load balancer, distribution, or container service.

" + "smithy.api#documentation": "

The target IP address (192.0.2.0), or AWS name server\n (ns-111.awsdns-22.com.).

\n

For Lightsail load balancers, the value looks like\n ab1234c56789c6b86aba6fb203d443bc-123456789.us-east-2.elb.amazonaws.com. For\n Lightsail distributions, the value looks like exampled1182ne.cloudfront.net.\n For Lightsail container services, the value looks like\n container-service-1.example23scljs.us-west-2.cs.amazonlightsail.com. Be sure to\n also set isAlias to true when setting up an A record for a\n Lightsail load balancer, distribution, or container service.

" } }, "isAlias": { @@ -8481,7 +8481,7 @@ "resourceType": { "target": "com.amazonaws.lightsail#ExportSnapshotRecordSourceType", "traits": { - "smithy.api#documentation": "

The Lightsail resource type (InstanceSnapshot or\n DiskSnapshot).

" + "smithy.api#documentation": "

The Lightsail resource type (InstanceSnapshot or\n DiskSnapshot).

" } }, "createdAt": { @@ -8637,7 +8637,20 @@ "method": "POST", "uri": "/ls/api/2016-11-28/GetActiveNames", "code": 200 - } + }, + "smithy.test#smokeTests": [ + { + "id": "GetActiveNamesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.lightsail#GetActiveNamesRequest": { @@ -8821,7 +8834,7 @@ "resourceType": { "target": "com.amazonaws.lightsail#ResourceType", "traits": { - "smithy.api#documentation": "

The resource type of the automatic snapshot. The possible values are Instance, and Disk.

" + "smithy.api#documentation": "

The resource type of the automatic snapshot. The possible values are\n Instance, and Disk.

" } }, "autoSnapshots": { @@ -12394,7 +12407,7 @@ "includeRelationalDatabaseAvailabilityZones": { "target": "com.amazonaws.lightsail#boolean", "traits": { - "smithy.api#documentation": "

A Boolean value indicating whether to also include Availability Zones for databases in\n your get regions request. Availability Zones are indicated with a letter (us-east-2a).

" + "smithy.api#documentation": "

A Boolean value indicating whether to also include Availability Zones for databases in\n your get regions request. Availability Zones are indicated with a letter\n (us-east-2a).

" } } }, @@ -13429,7 +13442,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns detailed information for five of the most recent SetupInstanceHttps requests that were ran on the target instance.

", + "smithy.api#documentation": "

Returns detailed information for five of the most recent SetupInstanceHttps\n requests that were ran on the target instance.

", "smithy.api#http": { "method": "POST", "uri": "/ls/api/2016-11-28/get-setup-history", @@ -13450,7 +13463,7 @@ "pageToken": { "target": "com.amazonaws.lightsail#SetupHistoryPageToken", "traits": { - "smithy.api#documentation": "

The token to advance to the next page of results from your request.

\n

To get a page token, perform an initial GetSetupHistory request. If your results\n are paginated, the response will return a next page token that you can specify as the page\n token in a subsequent request.

" + "smithy.api#documentation": "

The token to advance to the next page of results from your request.

\n

To get a page token, perform an initial GetSetupHistory request. If your\n results are paginated, the response will return a next page token that you can specify as the\n page token in a subsequent request.

" } } }, @@ -13470,7 +13483,7 @@ "nextPageToken": { "target": "com.amazonaws.lightsail#SetupHistoryPageToken", "traits": { - "smithy.api#documentation": "

The token to advance to the next page of results from your request.

\n

A next page token is not returned if there are no more results to display.

\n

To get the next page of results, perform another GetSetupHistory request and specify\n the next page token using the pageToken parameter.

" + "smithy.api#documentation": "

The token to advance to the next page of results from your request.

\n

A next page token is not returned if there are no more results to display.

\n

To get the next page of results, perform another GetSetupHistory request and\n specify the next page token using the pageToken parameter.

" } } }, @@ -13972,12 +13985,12 @@ "responseTimeout": { "target": "com.amazonaws.lightsail#integer", "traits": { - "smithy.api#documentation": "

The amount of time, in seconds, that the distribution waits for a response after forwarding a request to the origin.\n The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 30 seconds.

" + "smithy.api#documentation": "

The amount of time, in seconds, that the distribution waits for a response after\n forwarding a request to the origin. The minimum timeout is 1 second, the maximum is 60\n seconds, and the default (if you don't specify otherwise) is 30 seconds.

" } } }, "traits": { - "smithy.api#documentation": "

Describes the origin resource of an Amazon Lightsail content delivery network (CDN)\n distribution.

\n

An origin can be a Lightsail instance, bucket, container service, or load balancer. A distribution pulls\n content from an origin, caches it, and serves it to viewers via a worldwide network of edge\n servers.

" + "smithy.api#documentation": "

Describes the origin resource of an Amazon Lightsail content delivery network (CDN)\n distribution.

\n

An origin can be a Lightsail instance, bucket, container service, or load balancer. A\n distribution pulls content from an origin, caches it, and serves it to viewers via a worldwide\n network of edge servers.

" } }, "com.amazonaws.lightsail#Instance": { @@ -13986,13 +13999,13 @@ "name": { "target": "com.amazonaws.lightsail#ResourceName", "traits": { - "smithy.api#documentation": "

The name the user gave the instance (Amazon_Linux-1GB-Ohio-1).

" + "smithy.api#documentation": "

The name the user gave the instance (Amazon_Linux_2023-1).

" } }, "arn": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the instance (arn:aws:lightsail:us-east-2:123456789101:Instance/244ad76f-8aad-4741-809f-12345EXAMPLE).

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the instance\n (arn:aws:lightsail:us-east-2:123456789101:Instance/244ad76f-8aad-4741-809f-12345EXAMPLE).

" } }, "supportCode": { @@ -14004,7 +14017,7 @@ "createdAt": { "target": "com.amazonaws.lightsail#IsoDate", "traits": { - "smithy.api#documentation": "

The timestamp when the instance was created (1479734909.17) in Unix\n time format.

" + "smithy.api#documentation": "

The timestamp when the instance was created (1479734909.17) in Unix time\n format.

" } }, "location": { @@ -14028,19 +14041,19 @@ "blueprintId": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The blueprint ID (os_amlinux_2016_03).

" + "smithy.api#documentation": "

The blueprint ID (amazon_linux_2023).

" } }, "blueprintName": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The friendly name of the blueprint (Amazon Linux).

" + "smithy.api#documentation": "

The friendly name of the blueprint (Amazon Linux 2023).

" } }, "bundleId": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The bundle for the instance (micro_1_0).

" + "smithy.api#documentation": "

The bundle for the instance (micro_x_x).

" } }, "addOns": { @@ -14076,7 +14089,7 @@ "ipAddressType": { "target": "com.amazonaws.lightsail#IpAddressType", "traits": { - "smithy.api#documentation": "

The IP address type of the instance.

\n

The possible values are ipv4 for IPv4 only, and dualstack for\n IPv4 and IPv6.

" + "smithy.api#documentation": "

The IP address type of the instance.

\n

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only,\n and dualstack for IPv4 and IPv6.

" } }, "hardware": { @@ -14106,7 +14119,7 @@ "sshKeyName": { "target": "com.amazonaws.lightsail#ResourceName", "traits": { - "smithy.api#documentation": "

The name of the SSH key being used to connect to the instance (LightsailDefaultKeyPair).

" + "smithy.api#documentation": "

The name of the SSH key being used to connect to the instance\n (LightsailDefaultKeyPair).

" } }, "metadataOptions": { @@ -14162,7 +14175,7 @@ "privateKey": { "target": "com.amazonaws.lightsail#string", "traits": { - "smithy.api#documentation": "

For SSH access, the temporary private key. For OpenSSH clients (command line SSH),\n you should save this value to tempkey).

" + "smithy.api#documentation": "

For SSH access, the temporary private key. For OpenSSH clients (command line SSH), you\n should save this value to tempkey).

" } }, "protocol": { @@ -14612,7 +14625,7 @@ "protocol": { "target": "com.amazonaws.lightsail#NetworkProtocol", "traits": { - "smithy.api#documentation": "

The IP protocol name.

\n

The name can be one of the following:

\n
    \n
  • \n

    \n tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and\n error-checked delivery of streamed data between applications running on hosts\n communicating by an IP network. If you have an application that doesn't require reliable\n data stream service, use UDP instead.

    \n
  • \n
  • \n

    \n all - All transport layer protocol types. For more general information,\n see Transport layer on\n Wikipedia.

    \n
  • \n
  • \n

    \n udp - With User Datagram Protocol (UDP), computer applications can send\n messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior\n communications are not required to set up transmission channels or data paths.\n Applications that don't require reliable data stream service can use UDP, which provides a\n connectionless datagram service that emphasizes reduced latency over reliability. If you\n do require reliable data stream service, use TCP instead.

    \n
  • \n
  • \n

    \n icmp - Internet Control Message Protocol (ICMP) is used to send error\n messages and operational information indicating success or failure when communicating with\n an instance. For example, an error is indicated when an instance could not be reached.\n When you specify icmp as the protocol, you must specify the ICMP\n type using the fromPort parameter, and ICMP code using the\n toPort parameter.

    \n
  • \n
" + "smithy.api#documentation": "

The IP protocol name.

\n

The name can be one of the following:

\n
    \n
  • \n

    \n tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and\n error-checked delivery of streamed data between applications running on hosts\n communicating by an IP network. If you have an application that doesn't require reliable\n data stream service, use UDP instead.

    \n
  • \n
  • \n

    \n all - All transport layer protocol types. For more general information,\n see Transport layer on\n Wikipedia.

    \n
  • \n
  • \n

    \n udp - With User Datagram Protocol (UDP), computer applications can send\n messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior\n communications are not required to set up transmission channels or data paths.\n Applications that don't require reliable data stream service can use UDP, which provides a\n connectionless datagram service that emphasizes reduced latency over reliability. If you\n do require reliable data stream service, use TCP instead.

    \n
  • \n
  • \n

    \n icmp - Internet Control Message Protocol (ICMP) is used to send error\n messages and operational information indicating success or failure when communicating with\n an instance. For example, an error is indicated when an instance could not be reached.\n When you specify icmp as the protocol, you must specify the ICMP\n type using the fromPort parameter, and ICMP code using the\n toPort parameter.

    \n
  • \n
  • \n

    \n icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you\n specify icmp6 as the protocol, you must specify the ICMP type\n using the fromPort parameter, and ICMP code using the toPort\n parameter.

    \n
  • \n
" } }, "accessFrom": { @@ -14688,7 +14701,7 @@ "protocol": { "target": "com.amazonaws.lightsail#NetworkProtocol", "traits": { - "smithy.api#documentation": "

The IP protocol name.

\n

The name can be one of the following:

\n
    \n
  • \n

    \n tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and\n error-checked delivery of streamed data between applications running on hosts\n communicating by an IP network. If you have an application that doesn't require reliable\n data stream service, use UDP instead.

    \n
  • \n
  • \n

    \n all - All transport layer protocol types. For more general information,\n see Transport layer on\n Wikipedia.

    \n
  • \n
  • \n

    \n udp - With User Datagram Protocol (UDP), computer applications can send\n messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior\n communications are not required to set up transmission channels or data paths.\n Applications that don't require reliable data stream service can use UDP, which provides a\n connectionless datagram service that emphasizes reduced latency over reliability. If you\n do require reliable data stream service, use TCP instead.

    \n
  • \n
  • \n

    \n icmp - Internet Control Message Protocol (ICMP) is used to send error\n messages and operational information indicating success or failure when communicating with\n an instance. For example, an error is indicated when an instance could not be reached.\n When you specify icmp as the protocol, you must specify the ICMP\n type using the fromPort parameter, and ICMP code using the\n toPort parameter.

    \n
  • \n
" + "smithy.api#documentation": "

The IP protocol name.

\n

The name can be one of the following:

\n
    \n
  • \n

    \n tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and\n error-checked delivery of streamed data between applications running on hosts\n communicating by an IP network. If you have an application that doesn't require reliable\n data stream service, use UDP instead.

    \n
  • \n
  • \n

    \n all - All transport layer protocol types. For more general information,\n see Transport layer on\n Wikipedia.

    \n
  • \n
  • \n

    \n udp - With User Datagram Protocol (UDP), computer applications can send\n messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior\n communications are not required to set up transmission channels or data paths.\n Applications that don't require reliable data stream service can use UDP, which provides a\n connectionless datagram service that emphasizes reduced latency over reliability. If you\n do require reliable data stream service, use TCP instead.

    \n
  • \n
  • \n

    \n icmp - Internet Control Message Protocol (ICMP) is used to send error\n messages and operational information indicating success or failure when communicating with\n an instance. For example, an error is indicated when an instance could not be reached.\n When you specify icmp as the protocol, you must specify the ICMP\n type using the fromPort parameter, and ICMP code using the\n toPort parameter.

    \n
  • \n
  • \n

    \n icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you\n specify icmp6 as the protocol, you must specify the ICMP type\n using the fromPort parameter, and ICMP code using the toPort\n parameter.

    \n
  • \n
" } }, "state": { @@ -14738,7 +14751,7 @@ "arn": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the snapshot (arn:aws:lightsail:us-east-2:123456789101:InstanceSnapshot/d23b5706-3322-4d83-81e5-12345EXAMPLE).

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the snapshot\n (arn:aws:lightsail:us-east-2:123456789101:InstanceSnapshot/d23b5706-3322-4d83-81e5-12345EXAMPLE).

" } }, "supportCode": { @@ -14798,19 +14811,19 @@ "fromInstanceArn": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the instance from which the snapshot was created (arn:aws:lightsail:us-east-2:123456789101:Instance/64b8404c-ccb1-430b-8daf-12345EXAMPLE).

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the instance from which the snapshot was created\n (arn:aws:lightsail:us-east-2:123456789101:Instance/64b8404c-ccb1-430b-8daf-12345EXAMPLE).

" } }, "fromBlueprintId": { "target": "com.amazonaws.lightsail#string", "traits": { - "smithy.api#documentation": "

The blueprint ID from which you created the snapshot (os_debian_8_3). A\n blueprint is a virtual private server (or instance) image used to create\n instances quickly.

" + "smithy.api#documentation": "

The blueprint ID from which you created the snapshot (amazon_linux_2023). A\n blueprint is a virtual private server (or instance) image used to create\n instances quickly.

" } }, "fromBundleId": { "target": "com.amazonaws.lightsail#string", "traits": { - "smithy.api#documentation": "

The bundle ID from which you created the snapshot (micro_1_0).

" + "smithy.api#documentation": "

The bundle ID from which you created the snapshot (micro_x_x).

" } }, "isFromAutoSnapshot": { @@ -14836,13 +14849,13 @@ "fromBundleId": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The bundle ID from which the source instance was created (micro_1_0).

" + "smithy.api#documentation": "

The bundle ID from which the source instance was created (micro_x_x).

" } }, "fromBlueprintId": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The blueprint ID from which the source instance (os_debian_8_3).

" + "smithy.api#documentation": "

The blueprint ID from which the source instance (amazon_linux_2023).

" } }, "fromDiskInfo": { @@ -14947,6 +14960,12 @@ "traits": { "smithy.api#enumValue": "ipv4" } + }, + "IPV6": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ipv6" + } } } }, @@ -15044,7 +15063,7 @@ "arn": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the key pair (arn:aws:lightsail:us-east-2:123456789101:KeyPair/05859e3d-331d-48ba-9034-12345EXAMPLE).

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the key pair\n (arn:aws:lightsail:us-east-2:123456789101:KeyPair/05859e3d-331d-48ba-9034-12345EXAMPLE).

" } }, "supportCode": { @@ -15220,7 +15239,7 @@ "viewerMinimumTlsProtocolVersion": { "target": "com.amazonaws.lightsail#string", "traits": { - "smithy.api#documentation": "

The minimum TLS protocol version that the distribution can use to communicate with viewers.

" + "smithy.api#documentation": "

The minimum TLS protocol version that the distribution can use to communicate with\n viewers.

" } } }, @@ -16559,7 +16578,7 @@ "location": { "target": "com.amazonaws.lightsail#ResourceLocation", "traits": { - "smithy.api#documentation": "

The AWS Region where your load balancer was created (us-east-2a).\n Lightsail automatically creates your load balancer across Availability Zones.

" + "smithy.api#documentation": "

The AWS Region where your load balancer was created (us-east-2a). Lightsail\n automatically creates your load balancer across Availability Zones.

" } }, "resourceType": { @@ -16631,7 +16650,7 @@ "ipAddressType": { "target": "com.amazonaws.lightsail#IpAddressType", "traits": { - "smithy.api#documentation": "

The IP address type of the load balancer.

\n

The possible values are ipv4 for IPv4 only, and dualstack for\n IPv4 and IPv6.

" + "smithy.api#documentation": "

The IP address type of the load balancer.

\n

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only,\n and dualstack for IPv4 and IPv6.

" } }, "httpsRedirectionEnabled": { @@ -16980,7 +16999,7 @@ "subjectAlternativeNames": { "target": "com.amazonaws.lightsail#StringList", "traits": { - "smithy.api#documentation": "

An array of strings that specify the alternate domains (example2.com)\n and subdomains (blog.example.com) for the certificate.

" + "smithy.api#documentation": "

An array of strings that specify the alternate domains (example2.com) and\n subdomains (blog.example.com) for the certificate.

" } } }, @@ -17977,6 +17996,12 @@ "traits": { "smithy.api#enumValue": "icmp" } + }, + "ICMPV6": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "icmpv6" + } } } }, @@ -18766,7 +18791,7 @@ "responseTimeout": { "target": "com.amazonaws.lightsail#integer", "traits": { - "smithy.api#documentation": "

The amount of time, in seconds, that the distribution waits for a response after forwarding a request to the origin.\n The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 30 seconds.

" + "smithy.api#documentation": "

The amount of time, in seconds, that the distribution waits for a response after\n forwarding a request to the origin. The minimum timeout is 1 second, the maximum is 60\n seconds, and the default (if you don't specify otherwise) is 30 seconds.

" } } }, @@ -18989,7 +19014,7 @@ "protocol": { "target": "com.amazonaws.lightsail#NetworkProtocol", "traits": { - "smithy.api#documentation": "

The IP protocol name.

\n

The name can be one of the following:

\n
    \n
  • \n

    \n tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and\n error-checked delivery of streamed data between applications running on hosts\n communicating by an IP network. If you have an application that doesn't require reliable\n data stream service, use UDP instead.

    \n
  • \n
  • \n

    \n all - All transport layer protocol types. For more general information,\n see Transport layer on\n Wikipedia.

    \n
  • \n
  • \n

    \n udp - With User Datagram Protocol (UDP), computer applications can send\n messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior\n communications are not required to set up transmission channels or data paths.\n Applications that don't require reliable data stream service can use UDP, which provides a\n connectionless datagram service that emphasizes reduced latency over reliability. If you\n do require reliable data stream service, use TCP instead.

    \n
  • \n
  • \n

    \n icmp - Internet Control Message Protocol (ICMP) is used to send error\n messages and operational information indicating success or failure when communicating with\n an instance. For example, an error is indicated when an instance could not be reached.\n When you specify icmp as the protocol, you must specify the ICMP\n type using the fromPort parameter, and ICMP code using the\n toPort parameter.

    \n
  • \n
" + "smithy.api#documentation": "

The IP protocol name.

\n

The name can be one of the following:

\n
    \n
  • \n

    \n tcp - Transmission Control Protocol (TCP) provides reliable, ordered, and\n error-checked delivery of streamed data between applications running on hosts\n communicating by an IP network. If you have an application that doesn't require reliable\n data stream service, use UDP instead.

    \n
  • \n
  • \n

    \n all - All transport layer protocol types. For more general information,\n see Transport layer on\n Wikipedia.

    \n
  • \n
  • \n

    \n udp - With User Datagram Protocol (UDP), computer applications can send\n messages (or datagrams) to other hosts on an Internet Protocol (IP) network. Prior\n communications are not required to set up transmission channels or data paths.\n Applications that don't require reliable data stream service can use UDP, which provides a\n connectionless datagram service that emphasizes reduced latency over reliability. If you\n do require reliable data stream service, use TCP instead.

    \n
  • \n
  • \n

    \n icmp - Internet Control Message Protocol (ICMP) is used to send error\n messages and operational information indicating success or failure when communicating with\n an instance. For example, an error is indicated when an instance could not be reached.\n When you specify icmp as the protocol, you must specify the ICMP\n type using the fromPort parameter, and ICMP code using the\n toPort parameter.

    \n
  • \n
  • \n

    \n icmp6 - Internet Control Message Protocol (ICMP) for IPv6. When you\n specify icmp6 as the protocol, you must specify the ICMP type\n using the fromPort parameter, and ICMP code using the toPort\n parameter.

    \n
  • \n
" } }, "cidrs": { @@ -19594,7 +19619,7 @@ "description": { "target": "com.amazonaws.lightsail#string", "traits": { - "smithy.api#documentation": "

The description of the Amazon Web Services Region (This region is recommended\n to serve users in the eastern United States and eastern Canada).

" + "smithy.api#documentation": "

The description of the Amazon Web Services Region (This region is recommended to\n serve users in the eastern United States and eastern Canada).

" } }, "displayName": { @@ -21120,9 +21145,15 @@ "ipAddressType": { "target": "com.amazonaws.lightsail#IpAddressType", "traits": { - "smithy.api#documentation": "

The IP address type to set for the specified resource.

\n

The possible values are ipv4 for IPv4 only, and dualstack for\n IPv4 and IPv6.

", + "smithy.api#documentation": "

The IP address type to set for the specified resource.

\n

The possible values are ipv4 for IPv4 only, ipv6 for IPv6 only,\n and dualstack for IPv4 and IPv6.

", "smithy.api#required": {} } + }, + "acceptBundleUpdate": { + "target": "com.amazonaws.lightsail#boolean", + "traits": { + "smithy.api#documentation": "

Required parameter to accept the instance bundle update when changing to, and from,\n IPv6-only.

\n \n

An instance bundle will change when switching from dual-stack or\n ipv4, to ipv6. It also changes when switching from\n ipv6, to dual-stack or ipv4.

\n

You must include this parameter in the command to update the bundle. For example, if you\n switch from dual-stack to ipv6, the bundle will be updated, and\n billing for the IPv6-only instance bundle begins immediately.

\n
" + } } }, "traits": { @@ -21407,7 +21438,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an SSL/TLS certificate that secures traffic for your website. After the\n certificate is created, it is installed on the specified Lightsail instance.

\n

If you provide more than one domain name in the request, at least one name must be less than or equal to 63 characters in length.

", + "smithy.api#documentation": "

Creates an SSL/TLS certificate that secures traffic for your website. After the\n certificate is created, it is installed on the specified Lightsail instance.

\n

If you provide more than one domain name in the request, at least one name must be less\n than or equal to 63 characters in length.

", "smithy.api#http": { "method": "POST", "uri": "/ls/api/2016-11-28/setup-instance-https", @@ -21488,7 +21519,7 @@ } }, "traits": { - "smithy.api#documentation": "

Returns information that was submitted during the SetupInstanceHttps request. Email\n information is redacted for privacy.

" + "smithy.api#documentation": "

Returns information that was submitted during the SetupInstanceHttps request.\n Email information is redacted for privacy.

" } }, "com.amazonaws.lightsail#SetupStatus": { @@ -21727,7 +21758,7 @@ "arn": { "target": "com.amazonaws.lightsail#NonEmptyString", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the static IP (arn:aws:lightsail:us-east-2:123456789101:StaticIp/9cbb4a9e-f8e3-4dfe-b57e-12345EXAMPLE).

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the static IP\n (arn:aws:lightsail:us-east-2:123456789101:StaticIp/9cbb4a9e-f8e3-4dfe-b57e-12345EXAMPLE).

" } }, "supportCode": { @@ -21763,7 +21794,7 @@ "attachedTo": { "target": "com.amazonaws.lightsail#ResourceName", "traits": { - "smithy.api#documentation": "

The instance where the static IP is attached (Amazon_Linux-1GB-Ohio-1).

" + "smithy.api#documentation": "

The instance where the static IP is attached\n (Amazon_Linux-1GB-Ohio-1).

" } }, "isAttached": { @@ -22940,19 +22971,19 @@ "viewerMinimumTlsProtocolVersion": { "target": "com.amazonaws.lightsail#ViewerMinimumTlsProtocolVersionEnum", "traits": { - "smithy.api#documentation": "

Use this parameter to update the minimum TLS protocol version for the SSL/TLS certificate that's attached to the distribution.

" + "smithy.api#documentation": "

Use this parameter to update the minimum TLS protocol version for the SSL/TLS certificate\n that's attached to the distribution.

" } }, "certificateName": { "target": "com.amazonaws.lightsail#ResourceName", "traits": { - "smithy.api#documentation": "

The name of the SSL/TLS certificate that you want to attach to the distribution.

\n

Only certificates with a status of ISSUED can be attached to a distribution.

\n

Use the GetCertificates action to get a list of certificate names that you can specify.

" + "smithy.api#documentation": "

The name of the SSL/TLS certificate that you want to attach to the distribution.

\n

Only certificates with a status of ISSUED can be attached to a\n distribution.

\n

Use the GetCertificates\n action to get a list of certificate names that you can specify.

" } }, "useDefaultCertificate": { "target": "com.amazonaws.lightsail#boolean", "traits": { - "smithy.api#documentation": "

Indicates whether the default SSL/TLS certificate is attached to the distribution. The default value is true. When true, the distribution uses the default domain name such as d111111abcdef8.cloudfront.net.

\n

Set this value to false to attach a new certificate to the distribution.

" + "smithy.api#documentation": "

Indicates whether the default SSL/TLS certificate is attached to the distribution. The\n default value is true. When true, the distribution uses the default\n domain name such as d111111abcdef8.cloudfront.net.

\n

Set this value to false to attach a new certificate to the\n distribution.

" } } }, @@ -23189,7 +23220,7 @@ "loadBalancerName": { "target": "com.amazonaws.lightsail#ResourceName", "traits": { - "smithy.api#documentation": "

The name of the load balancer that you want to modify (my-load-balancer.

", + "smithy.api#documentation": "

The name of the load balancer that you want to modify\n (my-load-balancer.

", "smithy.api#required": {} } }, diff --git a/models/location.json b/models/location.json index 29c384c36f..0e2b3d4021 100644 --- a/models/location.json +++ b/models/location.json @@ -37,7 +37,7 @@ "min": 5, "max": 200 }, - "smithy.api#pattern": "^geo:\\w*\\*?$" + "smithy.api#pattern": "^(geo|geo-routes|geo-places|geo-maps):\\w*\\*?$" } }, "com.amazonaws.location#ApiKeyActionList": { @@ -214,6 +214,15 @@ "type": "structure", "members": {} }, + "com.amazonaws.location#Base64EncodedGeobuf": { + "type": "blob", + "traits": { + "smithy.api#length": { + "max": 600000 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.location#BatchDeleteDevicePositionHistory": { "type": "operation", "input": { @@ -783,7 +792,7 @@ "Geometry": { "target": "com.amazonaws.location#GeofenceGeometry", "traits": { - "smithy.api#documentation": "

Contains the details of the position of the geofence. Can be either a \n polygon or a circle. Including both will return a validation error.

\n \n

Each \n geofence polygon can have a maximum of 1,000 vertices.

\n
", + "smithy.api#documentation": "

Contains the details to specify the position of the geofence. Can be a\n polygon, a circle or a polygon encoded in Geobuf format. Including multiple selections will return a validation error.

\n \n

The \n geofence polygon format supports a maximum of 1,000 vertices. The Geofence geobuf format supports a maximum of 100,000 vertices.

\n
", "smithy.api#required": {} } }, @@ -1439,6 +1448,25 @@ "smithy.api#documentation": "

Contains details about additional route preferences for requests that specify\n TravelMode as Truck.

" } }, + "com.amazonaws.location#CellSignals": { + "type": "structure", + "members": { + "LteCellDetails": { + "target": "com.amazonaws.location#LteCellDetailsList", + "traits": { + "smithy.api#documentation": "

Information about the Long-Term Evolution (LTE) network the device is connected to.

", + "smithy.api#length": { + "min": 1, + "max": 16 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The cellular network communication infrastructure that the device uses.

" + } + }, "com.amazonaws.location#Circle": { "type": "structure", "members": { @@ -1479,12 +1507,6 @@ "smithy.api#httpError": 409 } }, - "com.amazonaws.location#CountryCode": { - "type": "string", - "traits": { - "smithy.api#pattern": "^[A-Z]{3}$" - } - }, "com.amazonaws.location#CountryCode3": { "type": "string", "traits": { @@ -1508,7 +1530,7 @@ "com.amazonaws.location#CountryCodeList": { "type": "list", "member": { - "target": "com.amazonaws.location#CountryCode" + "target": "com.amazonaws.location#CountryCode3" }, "traits": { "smithy.api#length": { @@ -1603,7 +1625,7 @@ "target": "com.amazonaws.location#KmsKeyId", "traits": { "aws.cloudformation#cfnMutability": "create-and-read", - "smithy.api#documentation": "

A key identifier for an \n Amazon Web Services \n KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN.\n\t

" + "smithy.api#documentation": "

A key identifier for an\n Amazon Web Services\n KMS customer managed key. Enter a key ID, key ARN, alias name, or alias ARN.\n\t

" } } } @@ -2682,7 +2704,7 @@ "KmsKeyId": { "target": "com.amazonaws.location#KmsKeyId", "traits": { - "smithy.api#documentation": "

A key identifier for an \n Amazon Web Services \n KMS customer managed key assigned to the Amazon Location resource

" + "smithy.api#documentation": "

A key identifier for an\n Amazon Web Services\n KMS customer managed key assigned to the Amazon Location resource

" } }, "Tags": { @@ -3434,6 +3456,57 @@ "target": "com.amazonaws.location#DevicePositionUpdate" } }, + "com.amazonaws.location#DeviceState": { + "type": "structure", + "members": { + "DeviceId": { + "target": "com.amazonaws.location#Id", + "traits": { + "smithy.api#documentation": "

The device identifier.

", + "smithy.api#required": {} + } + }, + "SampleTime": { + "target": "com.amazonaws.location#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp at which the device's position was determined. Uses ISO 8601 \n format: YYYY-MM-DDThh:mm:ss.sssZ.

", + "smithy.api#required": {} + } + }, + "Position": { + "target": "com.amazonaws.location#Position", + "traits": { + "smithy.api#documentation": "

The last known device position.

", + "smithy.api#required": {} + } + }, + "Accuracy": { + "target": "com.amazonaws.location#PositionalAccuracy" + }, + "Ipv4Address": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The device's Ipv4 address.

", + "smithy.api#pattern": "^(?:(?:25[0-5]|(?:2[0-4]|1\\d|[0-9]|)\\d)\\.?\\b){4}$" + } + }, + "WiFiAccessPoints": { + "target": "com.amazonaws.location#WiFiAccessPointList", + "traits": { + "smithy.api#documentation": "

The Wi-Fi access points the device is using.

" + } + }, + "CellSignals": { + "target": "com.amazonaws.location#CellSignals", + "traits": { + "smithy.api#documentation": "

The cellular network infrastructure that the device is connected to.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The device's position, IP address, and Wi-Fi access points.

" + } + }, "com.amazonaws.location#DimensionUnit": { "type": "string", "traits": { @@ -3521,173 +3594,45 @@ ] } }, - "com.amazonaws.location#FilterPlaceCategoryList": { - "type": "list", - "member": { - "target": "com.amazonaws.location#PlaceCategory" - }, + "com.amazonaws.location#Earfcn": { + "type": "integer", "traits": { - "smithy.api#length": { - "min": 1, - "max": 5 - } - } - }, - "com.amazonaws.location#GenericResource": { - "type": "resource", - "identifiers": { - "ResourceArn": { - "target": "com.amazonaws.location#Arn" - } - }, - "operations": [ - { - "target": "com.amazonaws.location#ListTagsForResource" - }, - { - "target": "com.amazonaws.location#TagResource" - }, - { - "target": "com.amazonaws.location#UntagResource" + "smithy.api#default": 0, + "smithy.api#range": { + "min": 0, + "max": 262143 } - ], - "traits": { - "aws.api#controlPlane": {}, - "aws.iam#disableConditionKeyInference": {} } }, - "com.amazonaws.location#GeoArn": { - "type": "string", + "com.amazonaws.location#EutranCellId": { + "type": "integer", "traits": { - "aws.api#arnReference": {}, - "smithy.api#length": { - "max": 1600 - }, - "smithy.api#pattern": "^arn(:[a-z0-9]+([.-][a-z0-9]+)*):geo(:([a-z0-9]+([.-][a-z0-9]+)*))(:[0-9]+):((\\*)|([-a-z]+[/][*-._\\w]+))$" + "smithy.api#default": 0, + "smithy.api#range": { + "min": 0, + "max": 268435455 + } } }, - "com.amazonaws.location#GeoArnList": { + "com.amazonaws.location#FilterPlaceCategoryList": { "type": "list", "member": { - "target": "com.amazonaws.location#GeoArn" - } - }, - "com.amazonaws.location#GeofenceCollectionResource": { - "type": "resource", - "identifiers": { - "CollectionName": { - "target": "com.amazonaws.location#ResourceName" - } - }, - "put": { - "target": "com.amazonaws.location#CreateGeofenceCollection" - }, - "read": { - "target": "com.amazonaws.location#DescribeGeofenceCollection" - }, - "update": { - "target": "com.amazonaws.location#UpdateGeofenceCollection" - }, - "delete": { - "target": "com.amazonaws.location#DeleteGeofenceCollection" - }, - "list": { - "target": "com.amazonaws.location#ListGeofenceCollections" - }, - "operations": [ - { - "target": "com.amazonaws.location#BatchDeleteGeofence" - }, - { - "target": "com.amazonaws.location#BatchEvaluateGeofences" - }, - { - "target": "com.amazonaws.location#BatchPutGeofence" - }, - { - "target": "com.amazonaws.location#GetGeofence" - }, - { - "target": "com.amazonaws.location#ListGeofences" - }, - { - "target": "com.amazonaws.location#PutGeofence" - } - ], - "traits": { - "aws.api#arn": { - "template": "geofence-collection/{CollectionName}" - }, - "aws.cloudformation#cfnResource": { - "name": "GeofenceCollection" - }, - "aws.iam#disableConditionKeyInference": {} - } - }, - "com.amazonaws.location#GeofenceGeometry": { - "type": "structure", - "members": { - "Polygon": { - "target": "com.amazonaws.location#LinearRings", - "traits": { - "smithy.api#documentation": "

A polygon is a list of linear rings which are each made up of a list of\n vertices.

\n

Each vertex is a 2-dimensional point of the form: [longitude, latitude].\n This is represented as an array of doubles of length 2 (so [double,\n double]).

\n

An array of 4 or more vertices, where the first and last vertex are the same (to form\n a closed boundary), is called a linear ring. The linear ring vertices must be listed in\n counter-clockwise order around the ring’s interior. The linear ring is represented as an\n array of vertices, or an array of arrays of doubles ([[double, double], ...]).

\n

A geofence consists of a single linear ring. To allow for future expansion, the\n Polygon parameter takes an array of linear rings, which is represented as an array of\n arrays of arrays of doubles ([[[double, double], ...], ...]).

\n

A linear ring for use in geofences can consist of between 4 and 1,000 vertices.

" - } - }, - "Circle": { - "target": "com.amazonaws.location#Circle", - "traits": { - "smithy.api#documentation": "

A circle on the earth, as defined by a center point and a radius.

" - } - } + "target": "com.amazonaws.location#PlaceCategory" }, "traits": { - "smithy.api#documentation": "

Contains the geofence geometry details.

\n

A geofence geometry is made up of either a polygon or a circle. Can be either a \n polygon or a circle. Including both will return a validation error.

\n \n

Amazon Location doesn't currently support polygons with holes, multipolygons, polygons\n that are wound clockwise, or that cross the antimeridian.

\n
" - } - }, - "com.amazonaws.location#GetDevicePosition": { - "type": "operation", - "input": { - "target": "com.amazonaws.location#GetDevicePositionRequest" - }, - "output": { - "target": "com.amazonaws.location#GetDevicePositionResponse" - }, - "errors": [ - { - "target": "com.amazonaws.location#AccessDeniedException" - }, - { - "target": "com.amazonaws.location#InternalServerException" - }, - { - "target": "com.amazonaws.location#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.location#ThrottlingException" - }, - { - "target": "com.amazonaws.location#ValidationException" + "smithy.api#length": { + "min": 1, + "max": 5 } - ], - "traits": { - "smithy.api#documentation": "

Retrieves a device's most recent position according to its sample time.

\n \n

Device positions are deleted after 30 days.

\n
", - "smithy.api#endpoint": { - "hostPrefix": "tracking." - }, - "smithy.api#http": { - "uri": "/tracking/v0/trackers/{TrackerName}/devices/{DeviceId}/positions/latest", - "method": "GET" - }, - "smithy.api#readonly": {} } }, - "com.amazonaws.location#GetDevicePositionHistory": { + "com.amazonaws.location#ForecastGeofenceEvents": { "type": "operation", "input": { - "target": "com.amazonaws.location#GetDevicePositionHistoryRequest" + "target": "com.amazonaws.location#ForecastGeofenceEventsRequest" }, "output": { - "target": "com.amazonaws.location#GetDevicePositionHistoryResponse" + "target": "com.amazonaws.location#ForecastGeofenceEventsResponse" }, "errors": [ { @@ -3707,49 +3652,460 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the device position history from a tracker resource within a specified range\n of time.

\n \n

Device positions are deleted after 30 days.

\n
", + "smithy.api#documentation": "

Evaluates device positions against\n geofence geometries from a given geofence collection. The event forecasts three states for which\n a device can be in relative to a geofence:

\n

\n ENTER: If a device is outside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window.

\n

\n EXIT: If a device is inside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window.

\n

\n IDLE: If a device is inside of a geofence, and the device is not moving.

", "smithy.api#endpoint": { - "hostPrefix": "tracking." + "hostPrefix": "geofencing." }, "smithy.api#http": { - "uri": "/tracking/v0/trackers/{TrackerName}/devices/{DeviceId}/list-positions", + "uri": "/geofencing/v0/collections/{CollectionName}/forecast-geofence-events", "method": "POST" }, "smithy.api#paginated": { "pageSize": "MaxResults", "inputToken": "NextToken", "outputToken": "NextToken", - "items": "DevicePositions" + "items": "ForecastedEvents" }, "smithy.api#readonly": {} } }, - "com.amazonaws.location#GetDevicePositionHistoryRequest": { + "com.amazonaws.location#ForecastGeofenceEventsDeviceState": { "type": "structure", "members": { - "TrackerName": { - "target": "com.amazonaws.location#ResourceName", + "Position": { + "target": "com.amazonaws.location#Position", "traits": { - "smithy.api#documentation": "

The tracker resource receiving the request for the device position history.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The device's position.

", "smithy.api#required": {} } }, - "DeviceId": { - "target": "com.amazonaws.location#Id", + "Speed": { + "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

The device whose position history you want to retrieve.

", + "smithy.api#documentation": "

The device's speed.

", + "smithy.api#range": { + "min": 0 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

The device's position, IP address, and WiFi access points.

" + } + }, + "com.amazonaws.location#ForecastGeofenceEventsRequest": { + "type": "structure", + "members": { + "CollectionName": { + "target": "com.amazonaws.location#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the geofence collection.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "NextToken": { - "target": "com.amazonaws.location#Token", + "DeviceState": { + "target": "com.amazonaws.location#ForecastGeofenceEventsDeviceState", "traits": { - "smithy.api#documentation": "

The pagination token specifying which page of results to return in the response. If no\n token is provided, the default page is the first page.

\n

Default value: null\n

" + "smithy.api#documentation": "

The device's state, including current position and speed.

", + "smithy.api#required": {} } }, - "StartTimeInclusive": { + "TimeHorizonMinutes": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

Specifies the time horizon in minutes for the forecasted events.

", + "smithy.api#range": { + "min": 0 + } + } + }, + "DistanceUnit": { + "target": "com.amazonaws.location#DistanceUnit", + "traits": { + "smithy.api#documentation": "

The distance unit used for the NearestDistance property returned in a forecasted event.\n The measurement system must match for DistanceUnit and SpeedUnit; if Kilometers\n is specified for DistanceUnit, then SpeedUnit must be KilometersPerHour.\n

\n

Default Value: Kilometers\n

" + } + }, + "SpeedUnit": { + "target": "com.amazonaws.location#SpeedUnit", + "traits": { + "smithy.api#documentation": "

The speed unit for the device captured by the device state. The measurement system must match for DistanceUnit and SpeedUnit; if Kilometers\n is specified for DistanceUnit, then SpeedUnit must be KilometersPerHour.

\n

Default Value: KilometersPerHour.

" + } + }, + "NextToken": { + "target": "com.amazonaws.location#LargeToken", + "traits": { + "smithy.api#documentation": "

The pagination token specifying which page of results to return in the response. If no\n token is provided, the default page is the first page.

\n

Default value: null\n

" + } + }, + "MaxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

An optional limit for the number of resources returned in a single call.

\n

Default value: 20\n

", + "smithy.api#range": { + "min": 1, + "max": 20 + } + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.location#ForecastGeofenceEventsResponse": { + "type": "structure", + "members": { + "ForecastedEvents": { + "target": "com.amazonaws.location#ForecastedEventsList", + "traits": { + "smithy.api#documentation": "

The list of forecasted events.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.location#LargeToken", + "traits": { + "smithy.api#documentation": "

The pagination token specifying which page of results to return in the response. If no\n token is provided, the default page is the first page.

" + } + }, + "DistanceUnit": { + "target": "com.amazonaws.location#DistanceUnit", + "traits": { + "smithy.api#documentation": "

The distance unit for the forecasted events.

", + "smithy.api#required": {} + } + }, + "SpeedUnit": { + "target": "com.amazonaws.location#SpeedUnit", + "traits": { + "smithy.api#documentation": "

The speed unit for the forecasted events.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.location#ForecastedEvent": { + "type": "structure", + "members": { + "EventId": { + "target": "com.amazonaws.location#Uuid", + "traits": { + "smithy.api#documentation": "

The forecasted event identifier.

", + "smithy.api#required": {} + } + }, + "GeofenceId": { + "target": "com.amazonaws.location#Id", + "traits": { + "smithy.api#documentation": "

The geofence identifier pertaining to the forecasted event.

", + "smithy.api#required": {} + } + }, + "IsDeviceInGeofence": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates if the device is located within the geofence.

", + "smithy.api#required": {} + } + }, + "NearestDistance": { + "target": "com.amazonaws.location#NearestDistance", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The closest distance from the device's position to the geofence.

", + "smithy.api#required": {} + } + }, + "EventType": { + "target": "com.amazonaws.location#ForecastedGeofenceEventType", + "traits": { + "smithy.api#documentation": "

The event type, forecasting three states for which\n a device can be in relative to a geofence:

\n

\n ENTER: If a device is outside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window.

\n

\n EXIT: If a device is inside of a geofence, but would breach the fence if the device is moving at its current speed within time horizon window.

\n

\n IDLE: If a device is inside of a geofence, and the device is not moving.

", + "smithy.api#required": {} + } + }, + "ForecastedBreachTime": { + "target": "com.amazonaws.location#Timestamp", + "traits": { + "smithy.api#documentation": "

The forecasted time the device will breach the geofence in ISO 8601\n format: YYYY-MM-DDThh:mm:ss.sssZ\n

" + } + }, + "GeofenceProperties": { + "target": "com.amazonaws.location#PropertyMap", + "traits": { + "smithy.api#documentation": "

The geofence properties.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A forecasted event represents a geofence event in relation to the requested device state, that may occur\n given the provided device state and time horizon.

" + } + }, + "com.amazonaws.location#ForecastedEventsList": { + "type": "list", + "member": { + "target": "com.amazonaws.location#ForecastedEvent" + } + }, + "com.amazonaws.location#ForecastedGeofenceEventType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ENTER", + "documentation": "This event type signifies that a device is forecasted to enter the geofence" + }, + { + "value": "EXIT", + "documentation": "This event type signifies that a device is forecasted to exit the geofence" + }, + { + "value": "IDLE", + "documentation": "This event type signifies that a device is stationary in the geofence and an exit/enter cannot be forecasted" + } + ] + } + }, + "com.amazonaws.location#GenericResource": { + "type": "resource", + "identifiers": { + "ResourceArn": { + "target": "com.amazonaws.location#Arn" + } + }, + "operations": [ + { + "target": "com.amazonaws.location#ListTagsForResource" + }, + { + "target": "com.amazonaws.location#TagResource" + }, + { + "target": "com.amazonaws.location#UntagResource" + } + ], + "traits": { + "aws.api#controlPlane": {}, + "aws.iam#disableConditionKeyInference": {} + } + }, + "com.amazonaws.location#GeoArn": { + "type": "string", + "traits": { + "aws.api#arnReference": {}, + "smithy.api#length": { + "max": 1600 + }, + "smithy.api#pattern": "^arn(:[a-z0-9]+([.-][a-z0-9]+)*):geo(:([a-z0-9]+([.-][a-z0-9]+)*))(:[0-9]+):((\\*)|([-a-z]+[/][*-._\\w]+))$" + } + }, + "com.amazonaws.location#GeoArnList": { + "type": "list", + "member": { + "target": "com.amazonaws.location#GeoArnV2" + } + }, + "com.amazonaws.location#GeoArnV2": { + "type": "string", + "traits": { + "aws.api#arnReference": {}, + "smithy.api#length": { + "max": 1600 + }, + "smithy.api#pattern": "(^arn(:[a-z0-9]+([.-][a-z0-9]+)*):geo(:([a-z0-9]+([.-][a-z0-9]+)*))(:[0-9]+):((\\*)|([-a-z]+[/][*-._\\w]+))$)|(^arn(:[a-z0-9]+([.-][a-z0-9]+)*):(geo-routes|geo-places|geo-maps)(:((\\*)|([a-z0-9]+([.-][a-z0-9]+)*)))::((provider[\\/][*-._\\w]+))$)" + } + }, + "com.amazonaws.location#GeofenceCollectionResource": { + "type": "resource", + "identifiers": { + "CollectionName": { + "target": "com.amazonaws.location#ResourceName" + } + }, + "put": { + "target": "com.amazonaws.location#CreateGeofenceCollection" + }, + "read": { + "target": "com.amazonaws.location#DescribeGeofenceCollection" + }, + "update": { + "target": "com.amazonaws.location#UpdateGeofenceCollection" + }, + "delete": { + "target": "com.amazonaws.location#DeleteGeofenceCollection" + }, + "list": { + "target": "com.amazonaws.location#ListGeofenceCollections" + }, + "operations": [ + { + "target": "com.amazonaws.location#BatchDeleteGeofence" + }, + { + "target": "com.amazonaws.location#BatchEvaluateGeofences" + }, + { + "target": "com.amazonaws.location#BatchPutGeofence" + }, + { + "target": "com.amazonaws.location#ForecastGeofenceEvents" + }, + { + "target": "com.amazonaws.location#GetGeofence" + }, + { + "target": "com.amazonaws.location#ListGeofences" + }, + { + "target": "com.amazonaws.location#PutGeofence" + } + ], + "traits": { + "aws.api#arn": { + "template": "geofence-collection/{CollectionName}" + }, + "aws.cloudformation#cfnResource": { + "name": "GeofenceCollection" + }, + "aws.iam#disableConditionKeyInference": {} + } + }, + "com.amazonaws.location#GeofenceGeometry": { + "type": "structure", + "members": { + "Polygon": { + "target": "com.amazonaws.location#LinearRings", + "traits": { + "smithy.api#documentation": "

A polygon is a list of linear rings which are each made up of a list of\n vertices.

\n

Each vertex is a 2-dimensional point of the form: [longitude, latitude].\n This is represented as an array of doubles of length 2 (so [double,\n double]).

\n

An array of 4 or more vertices, where the first and last vertex are the same (to form\n a closed boundary), is called a linear ring. The linear ring vertices must be listed in\n counter-clockwise order around the ring’s interior. The linear ring is represented as an\n array of vertices, or an array of arrays of doubles ([[double, double], ...]).

\n

A geofence consists of a single linear ring. To allow for future expansion, the\n Polygon parameter takes an array of linear rings, which is represented as an array of\n arrays of arrays of doubles ([[[double, double], ...], ...]).

\n

A linear ring for use in geofences can consist of between 4 and 1,000 vertices.

" + } + }, + "Circle": { + "target": "com.amazonaws.location#Circle", + "traits": { + "smithy.api#documentation": "

A circle on the earth, as defined by a center point and a radius.

" + } + }, + "Geobuf": { + "target": "com.amazonaws.location#Base64EncodedGeobuf", + "traits": { + "smithy.api#documentation": "

Geobuf is a compact binary encoding for geographic data that provides lossless compression of GeoJSON polygons. The Geobuf must be Base64-encoded.

\n

A polygon in Geobuf format can have up to 100,000 vertices.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the geofence geometry details.

\n

A geofence geometry is made up of either a polygon or a circle. Can be a\n polygon, a circle or a polygon encoded in Geobuf format. Including multiple selections will return a validation error.

\n \n

Amazon Location doesn't currently support polygons with holes, multipolygons, polygons\n that are wound clockwise, or that cross the antimeridian.

\n
" + } + }, + "com.amazonaws.location#GetDevicePosition": { + "type": "operation", + "input": { + "target": "com.amazonaws.location#GetDevicePositionRequest" + }, + "output": { + "target": "com.amazonaws.location#GetDevicePositionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.location#AccessDeniedException" + }, + { + "target": "com.amazonaws.location#InternalServerException" + }, + { + "target": "com.amazonaws.location#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.location#ThrottlingException" + }, + { + "target": "com.amazonaws.location#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves a device's most recent position according to its sample time.

\n \n

Device positions are deleted after 30 days.

\n
", + "smithy.api#endpoint": { + "hostPrefix": "tracking." + }, + "smithy.api#http": { + "uri": "/tracking/v0/trackers/{TrackerName}/devices/{DeviceId}/positions/latest", + "method": "GET" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.location#GetDevicePositionHistory": { + "type": "operation", + "input": { + "target": "com.amazonaws.location#GetDevicePositionHistoryRequest" + }, + "output": { + "target": "com.amazonaws.location#GetDevicePositionHistoryResponse" + }, + "errors": [ + { + "target": "com.amazonaws.location#AccessDeniedException" + }, + { + "target": "com.amazonaws.location#InternalServerException" + }, + { + "target": "com.amazonaws.location#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.location#ThrottlingException" + }, + { + "target": "com.amazonaws.location#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the device position history from a tracker resource within a specified range\n of time.

\n \n

Device positions are deleted after 30 days.

\n
", + "smithy.api#endpoint": { + "hostPrefix": "tracking." + }, + "smithy.api#http": { + "uri": "/tracking/v0/trackers/{TrackerName}/devices/{DeviceId}/list-positions", + "method": "POST" + }, + "smithy.api#paginated": { + "pageSize": "MaxResults", + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "DevicePositions" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.location#GetDevicePositionHistoryRequest": { + "type": "structure", + "members": { + "TrackerName": { + "target": "com.amazonaws.location#ResourceName", + "traits": { + "smithy.api#documentation": "

The tracker resource receiving the request for the device position history.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "DeviceId": { + "target": "com.amazonaws.location#Id", + "traits": { + "smithy.api#documentation": "

The device whose position history you want to retrieve.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.location#Token", + "traits": { + "smithy.api#documentation": "

The pagination token specifying which page of results to return in the response. If no\n token is provided, the default page is the first page.

\n

Default value: null\n

" + } + }, + "StartTimeInclusive": { "target": "com.amazonaws.location#Timestamp", "traits": { "smithy.api#documentation": "

Specify the start time for the position history in ISO 8601\n format: YYYY-MM-DDThh:mm:ss.sssZ. By default, the value will be 24 hours\n prior to the time that the request is made.

\n

Requirement:

\n
    \n
  • \n

    The time specified for StartTimeInclusive must be before\n EndTimeExclusive.

    \n
  • \n
" @@ -3832,7 +4188,7 @@ "ReceivedTime": { "target": "com.amazonaws.location#Timestamp", "traits": { - "smithy.api#documentation": "

The timestamp for when the tracker resource received the device position in ISO 8601 \n format: YYYY-MM-DDThh:mm:ss.sssZ.

", + "smithy.api#documentation": "

The timestamp for when the tracker resource received the device position. Uses ISO 8601 \n format: YYYY-MM-DDThh:mm:ss.sssZ.

", "smithy.api#required": {} } }, @@ -3883,7 +4239,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the geofence details from a geofence collection.

", + "smithy.api#documentation": "

Retrieves the geofence details from a geofence collection.

\n \n

The returned geometry will always match the geometry format used when the geofence was created.

\n
", "smithy.api#endpoint": { "hostPrefix": "geofencing." }, @@ -4012,7 +4368,7 @@ "FontStack": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

A comma-separated list of fonts to load glyphs from in order of preference. For\n example, Noto Sans Regular, Arial Unicode.

\n

Valid font stacks for Esri styles:

\n
    \n
  • \n

    VectorEsriDarkGrayCanvas – Ubuntu Medium Italic | Ubuntu\n Medium | Ubuntu Italic | Ubuntu Regular |\n Ubuntu Bold\n

    \n
  • \n
  • \n

    VectorEsriLightGrayCanvas – Ubuntu Italic | Ubuntu\n Regular | Ubuntu Light | Ubuntu Bold\n

    \n
  • \n
  • \n

    VectorEsriTopographic – Noto Sans Italic | Noto Sans\n Regular | Noto Sans Bold | Noto Serif\n Regular | Roboto Condensed Light Italic\n

    \n
  • \n
  • \n

    VectorEsriStreets – Arial Regular | Arial Italic |\n Arial Bold\n

    \n
  • \n
  • \n

    VectorEsriNavigation – Arial Regular | Arial Italic\n | Arial Bold | Arial Unicode MS Bold | Arial Unicode MS Regular\n

    \n
  • \n
\n

Valid font stacks for HERE Technologies styles:

\n
    \n
  • \n

    VectorHereContrast – Fira \n GO Regular | Fira GO Bold\n

    \n
  • \n
  • \n

    VectorHereExplore, VectorHereExploreTruck, HybridHereExploreSatellite – \n Fira GO Italic | Fira GO Map | \n Fira GO Map Bold | Noto Sans CJK JP Bold | \n Noto Sans CJK JP Light | \n Noto Sans CJK JP Regular\n

    \n
  • \n
\n

Valid font stacks for GrabMaps styles:

\n
    \n
  • \n

    VectorGrabStandardLight, VectorGrabStandardDark – \n Noto Sans Regular |\n Noto Sans Medium |\n Noto Sans Bold\n

    \n
  • \n
\n

Valid font stacks for Open Data styles:

\n
    \n
  • \n

    VectorOpenDataStandardLight, VectorOpenDataStandardDark,\n VectorOpenDataVisualizationLight, VectorOpenDataVisualizationDark – \n Amazon Ember Regular,Noto Sans Regular |\n Amazon Ember Bold,Noto Sans Bold | \n Amazon Ember Medium,Noto Sans Medium |\n Amazon Ember Regular Italic,Noto Sans Italic | \n Amazon Ember Condensed RC Regular,Noto Sans Regular | \n Amazon Ember Condensed RC Bold,Noto Sans Bold |\n Amazon Ember Regular,Noto Sans Regular,Noto Sans Arabic Regular |\n Amazon Ember Condensed RC Bold,Noto Sans Bold,Noto Sans Arabic \n Condensed Bold |\n Amazon Ember Bold,Noto Sans Bold,Noto Sans Arabic Bold |\n Amazon Ember Regular Italic,Noto Sans Italic,Noto Sans Arabic \n Regular |\n Amazon Ember Condensed RC Regular,Noto Sans Regular,Noto Sans Arabic \n Condensed Regular |\n Amazon Ember Medium,Noto Sans Medium,Noto Sans Arabic Medium\n

    \n
  • \n
\n \n

The fonts used by the Open Data map styles are combined fonts\n that use Amazon Ember for most glyphs but Noto Sans \n for glyphs unsupported by Amazon Ember.

\n
", + "smithy.api#documentation": "

A comma-separated list of fonts to load glyphs from in order of preference. For\n example, Noto Sans Regular, Arial Unicode.

\n

Valid font stacks for Esri styles:

\n
    \n
  • \n

    VectorEsriDarkGrayCanvas – Ubuntu Medium Italic | Ubuntu\n Medium | Ubuntu Italic | Ubuntu Regular |\n Ubuntu Bold\n

    \n
  • \n
  • \n

    VectorEsriLightGrayCanvas – Ubuntu Italic | Ubuntu\n Regular | Ubuntu Light | Ubuntu Bold\n

    \n
  • \n
  • \n

    VectorEsriTopographic – Noto Sans Italic | Noto Sans\n Regular | Noto Sans Bold | Noto Serif\n Regular | Roboto Condensed Light Italic\n

    \n
  • \n
  • \n

    VectorEsriStreets – Arial Regular | Arial Italic |\n Arial Bold\n

    \n
  • \n
  • \n

    VectorEsriNavigation – Arial Regular | Arial Italic\n | Arial Bold\n

    \n
  • \n
\n

Valid font stacks for HERE Technologies styles:

\n
    \n
  • \n

    VectorHereContrast – Fira \n GO Regular | Fira GO Bold\n

    \n
  • \n
  • \n

    VectorHereExplore, VectorHereExploreTruck, HybridHereExploreSatellite – \n Fira GO Italic | Fira GO Map | \n Fira GO Map Bold | Noto Sans CJK JP Bold | \n Noto Sans CJK JP Light | \n Noto Sans CJK JP Regular\n

    \n
  • \n
\n

Valid font stacks for GrabMaps styles:

\n
    \n
  • \n

    VectorGrabStandardLight, VectorGrabStandardDark – \n Noto Sans Regular |\n Noto Sans Medium |\n Noto Sans Bold\n

    \n
  • \n
\n

Valid font stacks for Open Data styles:

\n
    \n
  • \n

    VectorOpenDataStandardLight, VectorOpenDataStandardDark,\n VectorOpenDataVisualizationLight, VectorOpenDataVisualizationDark – \n Amazon Ember Regular,Noto Sans Regular |\n Amazon Ember Bold,Noto Sans Bold | \n Amazon Ember Medium,Noto Sans Medium |\n Amazon Ember Regular Italic,Noto Sans Italic | \n Amazon Ember Condensed RC Regular,Noto Sans Regular | \n Amazon Ember Condensed RC Bold,Noto Sans Bold |\n Amazon Ember Regular,Noto Sans Regular,Noto Sans Arabic Regular |\n Amazon Ember Condensed RC Bold,Noto Sans Bold,Noto Sans Arabic \n Condensed Bold |\n Amazon Ember Bold,Noto Sans Bold,Noto Sans Arabic Bold |\n Amazon Ember Regular Italic,Noto Sans Italic,Noto Sans Arabic \n Regular |\n Amazon Ember Condensed RC Regular,Noto Sans Regular,Noto Sans Arabic \n Condensed Regular |\n Amazon Ember Medium,Noto Sans Medium,Noto Sans Arabic Medium\n

    \n
  • \n
\n \n

The fonts used by the Open Data map styles are combined fonts\n that use Amazon Ember for most glyphs but Noto Sans \n for glyphs unsupported by Amazon Ember.

\n
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4405,7 +4761,7 @@ "PlaceId": { "target": "com.amazonaws.location#PlaceId", "traits": { - "smithy.api#documentation": "

The identifier of the place to find.

\n

While you can use PlaceID in subsequent requests, \n PlaceID is not intended to be a permanent \n identifier and the ID can change between consecutive API calls. \n Please see the following PlaceID behaviour for each data provider:

\n
    \n
  • \n

    Esri: Place IDs will change every quarter at a minimum. The typical time period for these changes would be March, June, September, and December. Place IDs might also change between the typical quarterly change but that will be much less frequent.

    \n
  • \n
  • \n

    HERE: We recommend \n that you cache data for no longer than a week \n to keep your data data fresh. You can \n assume that less than 1% ID shifts will\n release over release which is approximately 1 - 2 times per week.

    \n
  • \n
  • \n

    Grab: Place IDs can expire or become invalid in the following situations.

    \n
      \n
    • \n

      Data operations: The POI may be removed from Grab POI database by Grab Map Ops based on the ground-truth,\n such as being closed in the real world, being detected as a duplicate POI, or having incorrect information. Grab will synchronize data to the Waypoint environment on weekly basis.

      \n
    • \n
    • \n

      Interpolated POI: Interpolated POI is a temporary POI generated in real time when serving a request, \n and it will be marked as derived in the place.result_type field in the response. \n The information of interpolated POIs will be retained for at least 30 days, which means that within 30 days, you are able to obtain POI details by \n Place ID from Place Details API. After 30 days, the interpolated POIs(both Place ID and details) may expire and inaccessible from the Places Details API.

      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

The identifier of the place to find.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4454,6 +4810,39 @@ "target": "com.amazonaws.location#Id" } }, + "com.amazonaws.location#InferredState": { + "type": "structure", + "members": { + "Position": { + "target": "com.amazonaws.location#Position", + "traits": { + "smithy.api#documentation": "

The device position inferred by the provided position, IP address, cellular signals, and Wi-Fi- access points.

" + } + }, + "Accuracy": { + "target": "com.amazonaws.location#PositionalAccuracy", + "traits": { + "smithy.api#documentation": "

The level of certainty of the inferred position.

" + } + }, + "DeviationDistance": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

The distance between the inferred position and the device's self-reported position.

" + } + }, + "ProxyDetected": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates if a proxy was used.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The inferred state of the device, given the provided position, IP address, cellular signals, and Wi-Fi- access points.

" + } + }, "com.amazonaws.location#IntendedUse": { "type": "string", "traits": { @@ -4505,6 +4894,15 @@ } } }, + "com.amazonaws.location#LargeToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 60000 + } + } + }, "com.amazonaws.location#Leg": { "type": "structure", "members": { @@ -4893,7 +5291,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains the geofence collection details.

" + "smithy.api#documentation": "

Contains the geofence collection details.

\n \n

The returned geometry will always match the geometry format used when the geofence was created.

\n
" } }, "com.amazonaws.location#ListGeofenceCollectionsResponseEntryList": { @@ -4948,7 +5346,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains a list of geofences stored in a given geofence collection.

" + "smithy.api#documentation": "

Contains a list of geofences stored in a given geofence collection.

\n \n

The returned geometry will always match the geometry format used when the geofence was created.

\n
" } }, "com.amazonaws.location#ListGeofenceResponseEntryList": { @@ -5012,7 +5410,7 @@ } }, "NextToken": { - "target": "com.amazonaws.location#Token", + "target": "com.amazonaws.location#LargeToken", "traits": { "smithy.api#documentation": "

The pagination token specifying which page of results to return in the response. If no\n token is provided, the default page is the first page.

\n

Default value: null\n

" } @@ -5041,7 +5439,7 @@ } }, "NextToken": { - "target": "com.amazonaws.location#Token", + "target": "com.amazonaws.location#LargeToken", "traits": { "smithy.api#documentation": "

A pagination token indicating there are additional pages available. You can use the\n token in a following request to fetch the next set of results.

" } @@ -6555,15 +6953,187 @@ "Endpoint": "https://example.com" } }, - { - "documentation": "Missing region", - "expect": { - "error": "Invalid Configuration: Missing Region" - } + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.location#LteCellDetails": { + "type": "structure", + "members": { + "CellId": { + "target": "com.amazonaws.location#EutranCellId", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The E-UTRAN Cell Identifier (ECI).

", + "smithy.api#required": {} + } + }, + "Mcc": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The Mobile Country Code (MCC).

", + "smithy.api#range": { + "min": 200, + "max": 999 + }, + "smithy.api#required": {} + } + }, + "Mnc": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The Mobile Network Code (MNC)

", + "smithy.api#range": { + "min": 0, + "max": 999 + }, + "smithy.api#required": {} + } + }, + "LocalId": { + "target": "com.amazonaws.location#LteLocalId", + "traits": { + "smithy.api#documentation": "

The LTE local identification information (local ID).

" + } + }, + "NetworkMeasurements": { + "target": "com.amazonaws.location#LteNetworkMeasurementsList", + "traits": { + "smithy.api#documentation": "

The network measurements.

", + "smithy.api#length": { + "min": 1, + "max": 32 } - ], - "version": "1.0" + } + }, + "TimingAdvance": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Timing Advance (TA).

", + "smithy.api#range": { + "min": 0, + "max": 1282 + } + } + }, + "NrCapable": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates whether the LTE object is capable of supporting NR (new radio).

" + } + }, + "Rsrp": { + "target": "com.amazonaws.location#Rsrp", + "traits": { + "smithy.api#documentation": "

Signal power of the reference signal received, measured in decibel-milliwatts (dBm).

" + } + }, + "Rsrq": { + "target": "com.amazonaws.location#Rsrq", + "traits": { + "smithy.api#documentation": "

Signal quality of the reference Signal received, measured in decibels (dB).

" + } + }, + "Tac": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

LTE Tracking Area Code (TAC).

", + "smithy.api#range": { + "min": 0, + "max": 65535 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about the Long-Term Evolution (LTE) network.

" + } + }, + "com.amazonaws.location#LteCellDetailsList": { + "type": "list", + "member": { + "target": "com.amazonaws.location#LteCellDetails" + } + }, + "com.amazonaws.location#LteLocalId": { + "type": "structure", + "members": { + "Earfcn": { + "target": "com.amazonaws.location#Earfcn", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

E-UTRA (Evolved Universal Terrestrial Radio Access) absolute radio frequency channel number (EARFCN).

", + "smithy.api#required": {} + } + }, + "Pci": { + "target": "com.amazonaws.location#Pci", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

Physical Cell ID (PCI).

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

LTE local identification information (local ID).

" + } + }, + "com.amazonaws.location#LteNetworkMeasurements": { + "type": "structure", + "members": { + "Earfcn": { + "target": "com.amazonaws.location#Earfcn", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

E-UTRA (Evolved Universal Terrestrial Radio Access) absolute radio frequency channel number (EARFCN).

", + "smithy.api#required": {} + } + }, + "CellId": { + "target": "com.amazonaws.location#EutranCellId", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

E-UTRAN Cell Identifier (ECI).

", + "smithy.api#required": {} + } + }, + "Pci": { + "target": "com.amazonaws.location#Pci", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

Physical Cell ID (PCI).

", + "smithy.api#required": {} + } + }, + "Rsrp": { + "target": "com.amazonaws.location#Rsrp", + "traits": { + "smithy.api#documentation": "

Signal power of the reference signal received, measured in dBm (decibel-milliwatts).

" + } + }, + "Rsrq": { + "target": "com.amazonaws.location#Rsrq", + "traits": { + "smithy.api#documentation": "

Signal quality of the reference Signal received, measured in decibels (dB).

" + } } + }, + "traits": { + "smithy.api#documentation": "

LTE network measurements.

" + } + }, + "com.amazonaws.location#LteNetworkMeasurementsList": { + "type": "list", + "member": { + "target": "com.amazonaws.location#LteNetworkMeasurements" } }, "com.amazonaws.location#MapConfiguration": { @@ -6572,7 +7142,7 @@ "Style": { "target": "com.amazonaws.location#MapStyle", "traits": { - "smithy.api#documentation": "

Specifies the map style selected from an available data provider.

\n

Valid Esri map styles:

\n
    \n
  • \n

    \n VectorEsriNavigation – The Esri Navigation map style, which provides a detailed basemap for the world symbolized with a \n custom navigation map style that's designed for use during the day in mobile devices. It also includes a richer set of places, \n such as shops, services, restaurants, attractions, and other points of interest. \n Enable the POI layer by setting it in CustomLayers to leverage the additional places data.

    \n

    \n

  • \n
  • \n

    \n RasterEsriImagery – The Esri Imagery map style. A raster basemap\n that provides one meter or better satellite and aerial imagery in many parts of\n the world and lower resolution satellite imagery worldwide.

    \n
  • \n
  • \n

    \n VectorEsriLightGrayCanvas – The Esri Light Gray Canvas map style,\n which provides a detailed vector basemap with a light gray, neutral background\n style with minimal colors, labels, and features that's designed to draw\n attention to your thematic content.

    \n
  • \n
  • \n

    \n VectorEsriTopographic – The Esri Light map style, which provides\n a detailed vector basemap with a classic Esri map style.

    \n
  • \n
  • \n

    \n VectorEsriStreets – The Esri Street Map style, which\n provides a detailed vector basemap for the world symbolized with a classic Esri\n street map style. The vector tile layer is similar in content and style to the\n World Street Map raster map.

    \n
  • \n
  • \n

    \n VectorEsriDarkGrayCanvas – The Esri Dark Gray Canvas map style. A\n vector basemap with a dark gray, neutral background with minimal colors, labels,\n and features that's designed to draw attention to your thematic content.

    \n
  • \n
\n

Valid HERE\n Technologies map styles:

\n
    \n
  • \n

    \n VectorHereExplore – A default HERE map style containing a \n neutral, global map and its features including roads, buildings, landmarks, \n and water features. It also now includes a fully designed map of Japan.

    \n
  • \n
  • \n

    \n RasterHereExploreSatellite – A global map containing high\n resolution satellite imagery.

    \n
  • \n
  • \n

    \n HybridHereExploreSatellite – A global map displaying the road \n network, street names, and city labels over satellite imagery. This style \n will automatically retrieve both raster and vector tiles, and your charges \n will be based on total tiles retrieved.

    \n \n

    Hybrid styles use both vector and raster tiles when rendering the \n map that you see. This means that more tiles are retrieved than when using \n either vector or raster tiles alone. Your charges will include all tiles \n retrieved.

    \n
    \n
  • \n
  • \n

    \n VectorHereContrast – The HERE Contrast (Berlin) map style is a \n high contrast\n detailed base map of the world that blends 3D and 2D rendering.

    \n \n

    The VectorHereContrast style has been renamed from \n VectorHereBerlin. \n VectorHereBerlin has been deprecated, but will continue to work in \n applications that use it.

    \n
    \n
  • \n
  • \n

    \n VectorHereExploreTruck – A global map containing truck \n restrictions and attributes (e.g. width / height / HAZMAT) symbolized with \n highlighted segments and icons on top of HERE Explore to support use cases \n within transport and logistics.

    \n
  • \n
\n

Valid GrabMaps map styles:

\n
    \n
  • \n

    \n VectorGrabStandardLight – The Grab Standard Light \n map style provides a basemap with detailed land use coloring, \n area names, roads, landmarks, and points of interest covering \n Southeast Asia.

    \n
  • \n
  • \n

    \n VectorGrabStandardDark – The Grab Standard Dark \n map style provides a dark variation of the standard basemap \n covering Southeast Asia.

    \n
  • \n
\n \n

Grab provides maps only for countries in Southeast Asia, and is only available \n in the Asia Pacific (Singapore) Region (ap-southeast-1).\n For more information, see GrabMaps countries and area covered.

\n
\n

Valid Open Data map styles:

\n
    \n
  • \n

    \n VectorOpenDataStandardLight – The Open Data Standard Light \n map style provides a detailed basemap for the world suitable for\n website and mobile application use. The map includes highways major roads, \n minor roads, railways, water features, cities, parks, landmarks, building\n footprints, and administrative boundaries.

    \n
  • \n
  • \n

    \n VectorOpenDataStandardDark – Open Data Standard Dark is a\n dark-themed map style that provides a detailed basemap for the world \n suitable for website and mobile application use. The map includes highways \n major roads, minor roads, railways, water features, cities, parks, \n landmarks, building footprints, and administrative boundaries.

    \n
  • \n
  • \n

    \n VectorOpenDataVisualizationLight – The Open Data \n Visualization Light map style is a light-themed style with muted colors and\n fewer features that aids in understanding overlaid data.

    \n
  • \n
  • \n

    \n VectorOpenDataVisualizationDark – The Open Data \n Visualization Dark map style is a dark-themed style with muted colors and\n fewer features that aids in understanding overlaid data.

    \n
  • \n
", + "smithy.api#documentation": "

Specifies the map style selected from an available data provider.

\n

Valid Esri map styles:

\n
    \n
  • \n

    \n VectorEsriDarkGrayCanvas – The Esri Dark Gray Canvas map style. A\n vector basemap with a dark gray, neutral background with minimal colors, labels,\n and features that's designed to draw attention to your thematic content.

    \n
  • \n
  • \n

    \n RasterEsriImagery – The Esri Imagery map style. A raster basemap\n that provides one meter or better satellite and aerial imagery in many parts of\n the world and lower resolution satellite imagery worldwide.

    \n
  • \n
  • \n

    \n VectorEsriLightGrayCanvas – The Esri Light Gray Canvas map style,\n which provides a detailed vector basemap with a light gray, neutral background\n style with minimal colors, labels, and features that's designed to draw\n attention to your thematic content.

    \n
  • \n
  • \n

    \n VectorEsriTopographic – The Esri Light map style, which provides\n a detailed vector basemap with a classic Esri map style.

    \n
  • \n
  • \n

    \n VectorEsriStreets – The Esri Street Map style, which\n provides a detailed vector basemap for the world symbolized with a classic Esri\n street map style. The vector tile layer is similar in content and style to the\n World Street Map raster map.

    \n
  • \n
  • \n

    \n VectorEsriNavigation – The Esri Navigation map style, which\n provides a detailed basemap for the world symbolized with a custom navigation\n map style that's designed for use during the day in mobile devices.

    \n
  • \n
\n

Valid HERE\n Technologies map styles:

\n
    \n
  • \n

    \n VectorHereContrast – The HERE Contrast (Berlin) map style is a \n high contrast\n detailed base map of the world that blends 3D and 2D rendering.

    \n \n

    The VectorHereContrast style has been renamed from \n VectorHereBerlin. \n VectorHereBerlin has been deprecated, but will continue to work in \n applications that use it.

    \n
    \n
  • \n
  • \n

    \n VectorHereExplore – A default HERE map style containing a \n neutral, global map and its features including roads, buildings, landmarks, \n and water features. It also now includes a fully designed map of Japan.

    \n
  • \n
  • \n

    \n VectorHereExploreTruck – A global map containing truck \n restrictions and attributes (e.g. width / height / HAZMAT) symbolized with \n highlighted segments and icons on top of HERE Explore to support use cases \n within transport and logistics.

    \n
  • \n
  • \n

    \n RasterHereExploreSatellite – A global map containing high\n resolution satellite imagery.

    \n
  • \n
  • \n

    \n HybridHereExploreSatellite – A global map displaying the road \n network, street names, and city labels over satellite imagery. This style \n will automatically retrieve both raster and vector tiles, and your charges \n will be based on total tiles retrieved.

    \n \n

    Hybrid styles use both vector and raster tiles when rendering the \n map that you see. This means that more tiles are retrieved than when using \n either vector or raster tiles alone. Your charges will include all tiles \n retrieved.

    \n
    \n
  • \n
\n

Valid GrabMaps map styles:

\n
    \n
  • \n

    \n VectorGrabStandardLight – The Grab Standard Light \n map style provides a basemap with detailed land use coloring, \n area names, roads, landmarks, and points of interest covering \n Southeast Asia.

    \n
  • \n
  • \n

    \n VectorGrabStandardDark – The Grab Standard Dark \n map style provides a dark variation of the standard basemap \n covering Southeast Asia.

    \n
  • \n
\n \n

Grab provides maps only for countries in Southeast Asia, and is only available \n in the Asia Pacific (Singapore) Region (ap-southeast-1).\n For more information, see GrabMaps countries and area covered.

\n
\n

Valid Open Data map styles:

\n
    \n
  • \n

    \n VectorOpenDataStandardLight – The Open Data Standard Light \n map style provides a detailed basemap for the world suitable for\n website and mobile application use. The map includes highways major roads, \n minor roads, railways, water features, cities, parks, landmarks, building\n footprints, and administrative boundaries.

    \n
  • \n
  • \n

    \n VectorOpenDataStandardDark – Open Data Standard Dark is a\n dark-themed map style that provides a detailed basemap for the world \n suitable for website and mobile application use. The map includes highways \n major roads, minor roads, railways, water features, cities, parks, \n landmarks, building footprints, and administrative boundaries.

    \n
  • \n
  • \n

    \n VectorOpenDataVisualizationLight – The Open Data \n Visualization Light map style is a light-themed style with muted colors and\n fewer features that aids in understanding overlaid data.

    \n
  • \n
  • \n

    \n VectorOpenDataVisualizationDark – The Open Data \n Visualization Dark map style is a dark-themed style with muted colors and\n fewer features that aids in understanding overlaid data.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -6585,8 +7155,7 @@ "CustomLayers": { "target": "com.amazonaws.location#CustomLayerList", "traits": { - "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "

Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style.\nDefault is unset.

\n \n

Currenlty only VectorEsriNavigation supports CustomLayers. \nFor more information, see Custom Layers.

\n
" + "smithy.api#documentation": "

Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style.\nDefault is unset.

\n \n

Not all map resources or styles support custom layers. See Custom Layers for more information.

\n
" } } }, @@ -6606,8 +7175,7 @@ "CustomLayers": { "target": "com.amazonaws.location#CustomLayerList", "traits": { - "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "

Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style.\nDefault is unset.

\n \n

Currenlty only VectorEsriNavigation supports CustomLayers. \nFor more information, see Custom Layers.

\n
" + "smithy.api#documentation": "

Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style.\nDefault is unset.

\n \n

Not all map resources or styles support custom layers. See Custom Layers for more information.

\n
" } } }, @@ -6671,6 +7239,15 @@ "smithy.api#pattern": "^[-._\\w]+$" } }, + "com.amazonaws.location#NearestDistance": { + "type": "double", + "traits": { + "smithy.api#default": 0, + "smithy.api#range": { + "min": 0 + } + } + }, "com.amazonaws.location#OptimizationMode": { "type": "string", "traits": { @@ -6684,6 +7261,16 @@ ] } }, + "com.amazonaws.location#Pci": { + "type": "integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#range": { + "min": 0, + "max": 503 + } + } + }, "com.amazonaws.location#Place": { "type": "structure", "members": { @@ -6762,13 +7349,13 @@ "UnitType": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

For addresses with a UnitNumber, the type of unit. For example,\n Apartment.

\n \n

This property is returned only for a place index that uses Esri as a data provider.

\n
" + "smithy.api#documentation": "

For addresses with a UnitNumber, the type of unit. For example,\n Apartment.

\n \n

Returned only for a place index that uses Esri as a data provider.

\n
" } }, "UnitNumber": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

For addresses with multiple units, the unit identifier. Can include numbers and\n letters, for example 3B or Unit 123.

\n \n

This property is returned only for a place index that uses Esri or Grab as a data provider. It is \n not returned for SearchPlaceIndexForPosition.

\n
" + "smithy.api#documentation": "

For addresses with multiple units, the unit identifier. Can include numbers and\n letters, for example 3B or Unit 123.

\n \n

Returned only for a place index that uses Esri or Grab as a data provider. Is \n not returned for SearchPlaceIndexForPosition.

\n
" } }, "Categories": { @@ -6786,7 +7373,7 @@ "SubMunicipality": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

An area that's part of a larger municipality. For example, Blissville \n is a submunicipality in the Queen County in New York.

\n \n

This property is only returned for a place index that uses Esri as a data provider. The property is represented as a district.

\n
\n

For more information about data providers, see Amazon Location Service data providers.

" + "smithy.api#documentation": "

An area that's part of a larger municipality. For example, Blissville \n is a submunicipality in the Queen County in New York.

\n \n

This property supported by Esri and OpenData. The Esri property is district, and the OpenData property is borough.

\n
" } } }, @@ -6956,7 +7543,7 @@ "smithy.api#documentation": "

Estimated maximum distance, in meters, between the measured position and the true\n position of a device, along the Earth's surface.

", "smithy.api#range": { "min": 0, - "max": 10000 + "max": 10000000 }, "smithy.api#required": {} } @@ -7074,7 +7661,7 @@ "Geometry": { "target": "com.amazonaws.location#GeofenceGeometry", "traits": { - "smithy.api#documentation": "

Contains the details to specify the position of the geofence. Can be either a \n polygon or a circle. Including both will return a validation error.

\n \n

Each \n geofence polygon can have a maximum of 1,000 vertices.

\n
", + "smithy.api#documentation": "

Contains the details to specify the position of the geofence. Can be a\n polygon, a circle or a polygon encoded in Geobuf format. Including multiple selections will return a validation error.

\n \n

The \n geofence polygon format supports a maximum of 1,000 vertices. The Geofence Geobuf format supports a maximum of 100,000 vertices.

\n
", "smithy.api#required": {} } }, @@ -7293,6 +7880,24 @@ "target": "com.amazonaws.location#RouteMatrixEntry" } }, + "com.amazonaws.location#Rsrp": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": -140, + "max": -44 + } + } + }, + "com.amazonaws.location#Rsrq": { + "type": "float", + "traits": { + "smithy.api#range": { + "min": -19.5, + "max": -3 + } + } + }, "com.amazonaws.location#SearchForPositionResult": { "type": "structure", "members": { @@ -7343,7 +7948,7 @@ "PlaceId": { "target": "com.amazonaws.location#PlaceId", "traits": { - "smithy.api#documentation": "

The unique identifier of the Place. You can use this with the GetPlace\n operation to find the place again later, or to get full information for the Place.

\n

The GetPlace request must use the same PlaceIndex \n resource as the SearchPlaceIndexForSuggestions that generated the Place \n ID.

\n \n

For SearchPlaceIndexForSuggestions operations, the\n PlaceId is returned by place indexes that use Esri, Grab, or HERE\n as data providers.

\n
\n

While you can use PlaceID in subsequent requests, \n PlaceID is not intended to be a permanent \n identifier and the ID can change between consecutive API calls. \n Please see the following PlaceID behaviour for each data provider:

\n
    \n
  • \n

    Esri: Place IDs will change every quarter at a minimum. The typical time period for these changes would be March, June, September, and December. Place IDs might also change between the typical quarterly change but that will be much less frequent.

    \n
  • \n
  • \n

    HERE: We recommend \n that you cache data for no longer than a week \n to keep your data data fresh. You can \n assume that less than 1% ID shifts will\n release over release which is approximately 1 - 2 times per week.

    \n
  • \n
  • \n

    Grab: Place IDs can expire or become invalid in the following situations.

    \n
      \n
    • \n

      Data operations: The POI may be removed from Grab POI database by Grab Map Ops based on the ground-truth,\n such as being closed in the real world, being detected as a duplicate POI, or having incorrect information. Grab will synchronize data to the Waypoint environment on weekly basis.

      \n
    • \n
    • \n

      Interpolated POI: Interpolated POI is a temporary POI generated in real time when serving a request, \n and it will be marked as derived in the place.result_type field in the response. \n The information of interpolated POIs will be retained for at least 30 days, which means that within 30 days, you are able to obtain POI details by \n Place ID from Place Details API. After 30 days, the interpolated POIs(both Place ID and details) may expire and inaccessible from the Places Details API.

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The unique identifier of the Place. You can use this with the GetPlace\n operation to find the place again later, or to get full information for the Place.

\n

The GetPlace request must use the same PlaceIndex \n resource as the SearchPlaceIndexForSuggestions that generated the Place \n ID.

\n \n

For SearchPlaceIndexForSuggestions operations, the\n PlaceId is returned by place indexes that use Esri, Grab, or HERE\n as data providers.

\n
" } }, "Categories": { @@ -7941,6 +8546,19 @@ "smithy.api#httpError": 402 } }, + "com.amazonaws.location#SpeedUnit": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "KilometersPerHour" + }, + { + "value": "MilesPerHour" + } + ] + } + }, "com.amazonaws.location#Status": { "type": "string", "traits": { @@ -8222,6 +8840,9 @@ }, { "target": "com.amazonaws.location#ListTrackerConsumers" + }, + { + "target": "com.amazonaws.location#VerifyDevicePosition" } ], "traits": { @@ -9014,6 +9635,12 @@ } } }, + "com.amazonaws.location#Uuid": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + } + }, "com.amazonaws.location#ValidationException": { "type": "structure", "members": { @@ -9117,11 +9744,156 @@ ] } }, + "com.amazonaws.location#VerifyDevicePosition": { + "type": "operation", + "input": { + "target": "com.amazonaws.location#VerifyDevicePositionRequest" + }, + "output": { + "target": "com.amazonaws.location#VerifyDevicePositionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.location#AccessDeniedException" + }, + { + "target": "com.amazonaws.location#InternalServerException" + }, + { + "target": "com.amazonaws.location#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.location#ThrottlingException" + }, + { + "target": "com.amazonaws.location#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Verifies the integrity of the device's position by determining if it was reported behind a proxy, and by comparing it to an inferred position estimated based on the device's state.

", + "smithy.api#endpoint": { + "hostPrefix": "tracking." + }, + "smithy.api#http": { + "uri": "/tracking/v0/trackers/{TrackerName}/positions/verify", + "method": "POST" + } + } + }, + "com.amazonaws.location#VerifyDevicePositionRequest": { + "type": "structure", + "members": { + "TrackerName": { + "target": "com.amazonaws.location#ResourceName", + "traits": { + "smithy.api#documentation": "

The name of the tracker resource to be associated with verification request.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "DeviceState": { + "target": "com.amazonaws.location#DeviceState", + "traits": { + "smithy.api#documentation": "

The device's state, including position, IP address, cell signals and Wi-Fi access points.

", + "smithy.api#required": {} + } + }, + "DistanceUnit": { + "target": "com.amazonaws.location#DistanceUnit", + "traits": { + "smithy.api#documentation": "

The distance unit for the verification request.

\n

Default Value: Kilometers\n

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.location#VerifyDevicePositionResponse": { + "type": "structure", + "members": { + "InferredState": { + "target": "com.amazonaws.location#InferredState", + "traits": { + "smithy.api#documentation": "

The inferred state of the device, given the provided position, IP address, cellular signals, and Wi-Fi- access points.

", + "smithy.api#required": {} + } + }, + "DeviceId": { + "target": "com.amazonaws.location#Id", + "traits": { + "smithy.api#documentation": "

The device identifier.

", + "smithy.api#required": {} + } + }, + "SampleTime": { + "target": "com.amazonaws.location#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp at which the device's position was determined. Uses ISO 8601 \n format: YYYY-MM-DDThh:mm:ss.sssZ.

", + "smithy.api#required": {} + } + }, + "ReceivedTime": { + "target": "com.amazonaws.location#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp for when the tracker resource received the device position in ISO 8601 \n format: YYYY-MM-DDThh:mm:ss.sssZ.

", + "smithy.api#required": {} + } + }, + "DistanceUnit": { + "target": "com.amazonaws.location#DistanceUnit", + "traits": { + "smithy.api#documentation": "

The distance unit for the verification response.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.location#WaypointPositionList": { "type": "list", "member": { "target": "com.amazonaws.location#Position" } + }, + "com.amazonaws.location#WiFiAccessPoint": { + "type": "structure", + "members": { + "MacAddress": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Medium access control address (Mac).

", + "smithy.api#length": { + "min": 12, + "max": 17 + }, + "smithy.api#pattern": "^([0-9A-Fa-f]{2}[:-]?){5}([0-9A-Fa-f]{2})$", + "smithy.api#required": {} + } + }, + "Rss": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Received signal strength (dBm) of the WLAN measurement data.

", + "smithy.api#range": { + "min": -128, + "max": 0 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Wi-Fi access point.

" + } + }, + "com.amazonaws.location#WiFiAccessPointList": { + "type": "list", + "member": { + "target": "com.amazonaws.location#WiFiAccessPoint" + } } } } \ No newline at end of file diff --git a/models/macie2.json b/models/macie2.json index a408d58dce..5ba0b2f3b5 100644 --- a/models/macie2.json +++ b/models/macie2.json @@ -483,6 +483,158 @@ "smithy.api#documentation": "

Provides information about an identity that performed an action on an affected resource by using temporary security credentials. The credentials were obtained using the AssumeRole operation of the Security Token Service (STS) API.

" } }, + "com.amazonaws.macie2#AutoEnableMode": { + "type": "enum", + "members": { + "ALL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALL" + } + }, + "NEW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NEW" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies whether to automatically enable automated sensitive data discovery for accounts that are part of an organization in Amazon Macie. Valid values are:

" + } + }, + "com.amazonaws.macie2#AutomatedDiscoveryAccount": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.macie2#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account ID for the account.

", + "smithy.api#jsonName": "accountId" + } + }, + "status": { + "target": "com.amazonaws.macie2#AutomatedDiscoveryAccountStatus", + "traits": { + "smithy.api#documentation": "

The current status of automated sensitive data discovery for the account. Possible values are: ENABLED, perform automated sensitive data discovery activities for the account; and, DISABLED, don't perform automated sensitive data discovery activities for the account.

", + "smithy.api#jsonName": "status" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides information about the status of automated sensitive data discovery for an Amazon Macie account.

" + } + }, + "com.amazonaws.macie2#AutomatedDiscoveryAccountStatus": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + }, + "traits": { + "smithy.api#documentation": "

The status of automated sensitive data discovery for an Amazon Macie account. Valid values are:

" + } + }, + "com.amazonaws.macie2#AutomatedDiscoveryAccountUpdate": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.macie2#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account ID for the account.

", + "smithy.api#jsonName": "accountId" + } + }, + "status": { + "target": "com.amazonaws.macie2#AutomatedDiscoveryAccountStatus", + "traits": { + "smithy.api#documentation": "

The new status of automated sensitive data discovery for the account. Valid values are: ENABLED, perform automated sensitive data discovery activities for the account; and, DISABLED, don't perform automated sensitive data discovery activities for the account.

", + "smithy.api#jsonName": "status" + } + } + }, + "traits": { + "smithy.api#documentation": "

Changes the status of automated sensitive data discovery for an Amazon Macie account.

" + } + }, + "com.amazonaws.macie2#AutomatedDiscoveryAccountUpdateError": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.macie2#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account ID for the account that the request applied to.

", + "smithy.api#jsonName": "accountId" + } + }, + "errorCode": { + "target": "com.amazonaws.macie2#AutomatedDiscoveryAccountUpdateErrorCode", + "traits": { + "smithy.api#documentation": "

The error code for the error that caused the request to fail for the account (accountId). Possible values are: ACCOUNT_NOT_FOUND, the account doesn’t exist or you're not the Amazon Macie administrator for the account; and, ACCOUNT_PAUSED, Macie isn’t enabled for the account in the current Amazon Web Services Region.

", + "smithy.api#jsonName": "errorCode" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides information about a request that failed to change the status of automated sensitive data discovery for an Amazon Macie account.

" + } + }, + "com.amazonaws.macie2#AutomatedDiscoveryAccountUpdateErrorCode": { + "type": "enum", + "members": { + "ACCOUNT_PAUSED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACCOUNT_PAUSED" + } + }, + "ACCOUNT_NOT_FOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACCOUNT_NOT_FOUND" + } + } + }, + "traits": { + "smithy.api#documentation": "

The error code that indicates why a request failed to change the status of automated sensitive data discovery for an Amazon Macie account. Possible values are:

" + } + }, + "com.amazonaws.macie2#AutomatedDiscoveryMonitoringStatus": { + "type": "enum", + "members": { + "MONITORED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MONITORED" + } + }, + "NOT_MONITORED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_MONITORED" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies whether automated sensitive data discovery is currently configured to analyze objects in an S3 bucket. Possible values are:

" + } + }, "com.amazonaws.macie2#AutomatedDiscoveryStatus": { "type": "enum", "members": { @@ -500,7 +652,7 @@ } }, "traits": { - "smithy.api#documentation": "

The status of the automated sensitive data discovery configuration for an Amazon Macie account. Valid values are:

" + "smithy.api#documentation": "

The status of the automated sensitive data discovery configuration for an organization in Amazon Macie or a standalone Macie account. Valid values are:

" } }, "com.amazonaws.macie2#AvailabilityCode": { @@ -687,6 +839,70 @@ "smithy.api#output": {} } }, + "com.amazonaws.macie2#BatchUpdateAutomatedDiscoveryAccounts": { + "type": "operation", + "input": { + "target": "com.amazonaws.macie2#BatchUpdateAutomatedDiscoveryAccountsRequest" + }, + "output": { + "target": "com.amazonaws.macie2#BatchUpdateAutomatedDiscoveryAccountsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.macie2#AccessDeniedException" + }, + { + "target": "com.amazonaws.macie2#ConflictException" + }, + { + "target": "com.amazonaws.macie2#InternalServerException" + }, + { + "target": "com.amazonaws.macie2#ThrottlingException" + }, + { + "target": "com.amazonaws.macie2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Changes the status of automated sensitive data discovery for one or more accounts.

", + "smithy.api#http": { + "method": "PATCH", + "uri": "/automated-discovery/accounts", + "code": 200 + } + } + }, + "com.amazonaws.macie2#BatchUpdateAutomatedDiscoveryAccountsRequest": { + "type": "structure", + "members": { + "accounts": { + "target": "com.amazonaws.macie2#__listOfAutomatedDiscoveryAccountUpdate", + "traits": { + "smithy.api#documentation": "

An array of objects, one for each account to change the status of automated sensitive data discovery for. Each object specifies the Amazon Web Services account ID for an account and a new status for that account.

", + "smithy.api#jsonName": "accounts" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.macie2#BatchUpdateAutomatedDiscoveryAccountsResponse": { + "type": "structure", + "members": { + "errors": { + "target": "com.amazonaws.macie2#__listOfAutomatedDiscoveryAccountUpdateError", + "traits": { + "smithy.api#documentation": "

An array of objects, one for each account whose status wasn’t changed. Each object identifies the account and explains why the status of automated sensitive data discovery wasn’t changed for the account. This value is null if the request succeeded for all specified accounts.

", + "smithy.api#jsonName": "errors" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.macie2#BlockPublicAccess": { "type": "structure", "members": { @@ -975,6 +1191,13 @@ "smithy.api#jsonName": "allowsUnencryptedObjectUploads" } }, + "automatedDiscoveryMonitoringStatus": { + "target": "com.amazonaws.macie2#AutomatedDiscoveryMonitoringStatus", + "traits": { + "smithy.api#documentation": "

Specifies whether automated sensitive data discovery is currently configured to analyze objects in the bucket. Possible values are: MONITORED, the bucket is included in analyses; and, NOT_MONITORED, the bucket is excluded from analyses. If automated sensitive data discovery is disabled for your account, this value is NOT_MONITORED.

", + "smithy.api#jsonName": "automatedDiscoveryMonitoringStatus" + } + }, "bucketArn": { "target": "com.amazonaws.macie2#__string", "traits": { @@ -1027,14 +1250,14 @@ "jobDetails": { "target": "com.amazonaws.macie2#JobDetails", "traits": { - "smithy.api#documentation": "

Specifies whether any one-time or recurring classification jobs are configured to analyze data in the bucket, and, if so, the details of the job that ran most recently.

", + "smithy.api#documentation": "

Specifies whether any one-time or recurring classification jobs are configured to analyze objects in the bucket, and, if so, the details of the job that ran most recently.

", "smithy.api#jsonName": "jobDetails" } }, "lastAutomatedDiscoveryTime": { "target": "com.amazonaws.macie2#__timestampIso8601", "traits": { - "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed data in the bucket while performing automated sensitive data discovery for your account. This value is null if automated sensitive data discovery is currently disabled for your account.

", + "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account.

", "smithy.api#jsonName": "lastAutomatedDiscoveryTime" } }, @@ -1083,7 +1306,7 @@ "sensitivityScore": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive). This value is null if automated sensitive data discovery is currently disabled for your account.

", + "smithy.api#documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses.

", "smithy.api#jsonName": "sensitivityScore" } }, @@ -1159,7 +1382,7 @@ } }, "traits": { - "smithy.api#documentation": "

The error code for an error that prevented Amazon Macie from retrieving and processing metadata from Amazon S3 for an S3 bucket and the bucket's objects.

" + "smithy.api#documentation": "

The error code for an error that prevented Amazon Macie from retrieving and processing information about an S3 bucket and the bucket's objects.

" } }, "com.amazonaws.macie2#BucketPermissionConfiguration": { @@ -1408,7 +1631,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies where to store data classification results, and the encryption settings to use when storing results in that location. The location must be an S3 bucket.

" + "smithy.api#documentation": "

Specifies where to store data classification results, and the encryption settings to use when storing results in that location. The location must be an S3 general purpose bucket.

" } }, "com.amazonaws.macie2#ClassificationResult": { @@ -1759,7 +1982,7 @@ "target": "com.amazonaws.macie2#JobType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The schedule for running the job. Valid values are:

  • ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the scheduleFrequency property.

  • SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this value, use the scheduleFrequency property to define the recurrence pattern for the job.

", + "smithy.api#documentation": "

The schedule for running the job. Valid values are:

  • ONE_TIME - Run the job only once. If you specify this value, don't specify a value for the scheduleFrequency property.

  • SCHEDULED - Run the job on a daily, weekly, or monthly basis. If you specify this value, use the scheduleFrequency property to specify the recurrence pattern for the job.

", "smithy.api#jsonName": "jobType", "smithy.api#required": {} } @@ -1774,7 +1997,7 @@ "managedDataIdentifierSelector": { "target": "com.amazonaws.macie2#ManagedDataIdentifierSelector", "traits": { - "smithy.api#documentation": "

The selection type to apply when determining which managed data identifiers the job uses to analyze data. Valid values are:

  • ALL - Use all managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property.

  • EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.

  • INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.

  • NONE - Don't use any managed data identifiers. If you specify this value, specify at least one value for the customDataIdentifierIds property and don't specify any values for the managedDataIdentifierIds property.

  • RECOMMENDED (default) - Use the recommended set of managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property.

If you don't specify a value for this property, the job uses the recommended set of managed data identifiers.

If the job is a recurring job and you specify ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If you don't specify a value for this property or you specify RECOMMENDED for a recurring job, each job run automatically uses all the managed data identifiers that are in the recommended set when the run starts.

For information about individual managed data identifiers or to determine which ones are in the recommended set, see Using managed data identifiers and Recommended managed data identifiers in the Amazon Macie User Guide.

", + "smithy.api#documentation": "

The selection type to apply when determining which managed data identifiers the job uses to analyze data. Valid values are:

  • ALL - Use all managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property.

  • EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.

  • INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.

  • NONE - Don't use any managed data identifiers. If you specify this value, specify at least one value for the customDataIdentifierIds property and don't specify any values for the managedDataIdentifierIds property.

  • RECOMMENDED (default) - Use the recommended set of managed data identifiers. If you specify this value, don't specify any values for the managedDataIdentifierIds property.

If you don't specify a value for this property, the job uses the recommended set of managed data identifiers.

If the job is a recurring job and you specify ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If you don't specify a value for this property or you specify RECOMMENDED for a recurring job, each job run automatically uses all the managed data identifiers that are in the recommended set when the run starts.

To learn about individual managed data identifiers or determine which ones are in the recommended set, see Using managed data identifiers or Recommended managed data identifiers in the Amazon Macie User Guide.

", "smithy.api#jsonName": "managedDataIdentifierSelector" } }, @@ -3259,7 +3482,7 @@ "allowListIds": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "

An array of unique identifiers, one for each allow list that the job uses when it analyzes data.

", + "smithy.api#documentation": "

An array of unique identifiers, one for each allow list that the job is configured to use when it analyzes data.

", "smithy.api#jsonName": "allowListIds" } }, @@ -3281,7 +3504,7 @@ "customDataIdentifierIds": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "

An array of unique identifiers, one for each custom data identifier that the job uses when it analyzes data. This value is null if the job uses only managed data identifiers to analyze data.

", + "smithy.api#documentation": "

An array of unique identifiers, one for each custom data identifier that the job is configured to use when it analyzes data. This value is null if the job is configured to use only managed data identifiers to analyze data.

", "smithy.api#jsonName": "customDataIdentifierIds" } }, @@ -3351,7 +3574,7 @@ "managedDataIdentifierSelector": { "target": "com.amazonaws.macie2#ManagedDataIdentifierSelector", "traits": { - "smithy.api#documentation": "

The selection type that determines which managed data identifiers the job uses when it analyzes data. Possible values are:

  • ALL - Use all managed data identifiers.

  • EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.

  • INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.

  • NONE - Don't use any managed data identifiers. Use only custom data identifiers (customDataIdentifierIds).

  • RECOMMENDED (default) - Use the recommended set of managed data identifiers.

If this value is null, the job uses the recommended set of managed data identifiers.

If the job is a recurring job and this value is ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If this value is null or RECOMMENDED for a recurring job, each job run uses all the managed data identifiers that are in the recommended set when the run starts.

For information about individual managed data identifiers or to determine which ones are in the recommended set, see Using managed data identifiers and Recommended managed data identifiers in the Amazon Macie User Guide.

", + "smithy.api#documentation": "

The selection type that determines which managed data identifiers the job uses when it analyzes data. Possible values are:

  • ALL - Use all managed data identifiers.

  • EXCLUDE - Use all managed data identifiers except the ones specified by the managedDataIdentifierIds property.

  • INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.

  • NONE - Don't use any managed data identifiers. Use only custom data identifiers (customDataIdentifierIds).

  • RECOMMENDED (default) - Use the recommended set of managed data identifiers.

If this value is null, the job uses the recommended set of managed data identifiers.

If the job is a recurring job and this value is ALL or EXCLUDE, each job run automatically uses new managed data identifiers that are released. If this value is null or RECOMMENDED for a recurring job, each job run uses all the managed data identifiers that are in the recommended set when the run starts.

To learn about individual managed data identifiers or determine which ones are in the recommended set, see Using managed data identifiers or Recommended managed data identifiers in the Amazon Macie User Guide.

", "smithy.api#jsonName": "managedDataIdentifierSelector" } }, @@ -3393,7 +3616,7 @@ "tags": { "target": "com.amazonaws.macie2#TagMap", "traits": { - "smithy.api#documentation": "

A map of key-value pairs that specifies which tags (keys and values) are associated with the classification job.

", + "smithy.api#documentation": "

A map of key-value pairs that specifies which tags (keys and values) are associated with the job.

", "smithy.api#jsonName": "tags" } }, @@ -3542,7 +3765,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides information about a type of sensitive data that Amazon Macie found in an S3 bucket while performing automated sensitive data discovery for the bucket. The information also specifies the custom data identifier or managed data identifier that detected the data. This information is available only if automated sensitive data discovery is currently enabled for your account.

" + "smithy.api#documentation": "

Provides information about a type of sensitive data that Amazon Macie found in an S3 bucket while performing automated sensitive data discovery for an account. The information also specifies the custom or managed data identifier that detected the data. This information is available only if automated sensitive data discovery has been enabled for the account.

" } }, "com.amazonaws.macie2#DisableMacie": { @@ -4305,14 +4528,14 @@ "ipAddressDetails": { "target": "com.amazonaws.macie2#IpAddressDetails", "traits": { - "smithy.api#documentation": "

The IP address of the device that the entity used to perform the action on the affected resource. This object also provides information such as the owner and geographic location for the IP address.

", + "smithy.api#documentation": "

The IP address and related details about the device that the entity used to perform the action on the affected resource. The details can include information such as the owner and geographic location of the IP address.

", "smithy.api#jsonName": "ipAddressDetails" } }, "userIdentity": { "target": "com.amazonaws.macie2#UserIdentity", "traits": { - "smithy.api#documentation": "

The type and other characteristics of the entity that performed the action on the affected resource.

", + "smithy.api#documentation": "

The type and other characteristics of the entity that performed the action on the affected resource. This value is null if the action was performed by an anonymous (unauthenticated) entity.

", "smithy.api#jsonName": "userIdentity" } } @@ -4767,7 +4990,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the configuration settings and status of automated sensitive data discovery for an account.

", + "smithy.api#documentation": "

Retrieves the configuration settings and status of automated sensitive data discovery for an organization or standalone account.

", "smithy.api#http": { "method": "GET", "uri": "/automated-discovery/configuration", @@ -4785,45 +5008,52 @@ "com.amazonaws.macie2#GetAutomatedDiscoveryConfigurationResponse": { "type": "structure", "members": { + "autoEnableOrganizationMembers": { + "target": "com.amazonaws.macie2#AutoEnableMode", + "traits": { + "smithy.api#documentation": "

Specifies whether automated sensitive data discovery is enabled automatically for accounts in the organization. Possible values are: ALL, enable it for all existing accounts and new member accounts; NEW, enable it only for new member accounts; and, NONE, don't enable it for any accounts.

", + "smithy.api#jsonName": "autoEnableOrganizationMembers" + } + }, "classificationScopeId": { "target": "com.amazonaws.macie2#ClassificationScopeId", "traits": { - "smithy.api#documentation": "

The unique identifier for the classification scope that's used when performing automated sensitive data discovery for the account. The classification scope specifies S3 buckets to exclude from automated sensitive data discovery.

", + "smithy.api#documentation": "

The unique identifier for the classification scope that's used when performing automated sensitive data discovery. The classification scope specifies S3 buckets to exclude from analyses.

", "smithy.api#jsonName": "classificationScopeId" } }, "disabledAt": { "target": "com.amazonaws.macie2#Timestamp", "traits": { - "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was most recently disabled for the account. This value is null if automated sensitive data discovery wasn't enabled and subsequently disabled for the account.

", + "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was most recently disabled. This value is null if automated sensitive data discovery is currently enabled.

", "smithy.api#jsonName": "disabledAt" } }, "firstEnabledAt": { "target": "com.amazonaws.macie2#Timestamp", "traits": { - "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was initially enabled for the account. This value is null if automated sensitive data discovery has never been enabled for the account.

", + "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was initially enabled. This value is null if automated sensitive data discovery has never been enabled.

", "smithy.api#jsonName": "firstEnabledAt" } }, "lastUpdatedAt": { "target": "com.amazonaws.macie2#Timestamp", "traits": { - "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when automated sensitive data discovery was most recently enabled or disabled for the account.

", + "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when the configuration settings or status of automated sensitive data discovery was most recently changed.

", "smithy.api#jsonName": "lastUpdatedAt" } }, "sensitivityInspectionTemplateId": { "target": "com.amazonaws.macie2#SensitivityInspectionTemplateId", "traits": { - "smithy.api#documentation": "

The unique identifier for the sensitivity inspection template that's used when performing automated sensitive data discovery for the account. The template specifies which allow lists, custom data identifiers, and managed data identifiers to use when analyzing data.

", + "smithy.api#documentation": "

The unique identifier for the sensitivity inspection template that's used when performing automated sensitive data discovery. The template specifies which allow lists, custom data identifiers, and managed data identifiers to use when analyzing data.

", "smithy.api#jsonName": "sensitivityInspectionTemplateId" } }, "status": { "target": "com.amazonaws.macie2#AutomatedDiscoveryStatus", "traits": { - "smithy.api#documentation": "

The current status of the automated sensitive data discovery configuration for the account. Possible values are: ENABLED, use the specified settings to perform automated sensitive data discovery activities for the account; and, DISABLED, don't perform automated sensitive data discovery activities for the account.

", + "smithy.api#documentation": "

The current status of automated sensitive data discovery for the organization or account. Possible values are: ENABLED, use the specified settings to perform automated sensitive data discovery activities; and, DISABLED, don't perform automated sensitive data discovery activities.

", "smithy.api#jsonName": "status" } } @@ -6368,14 +6598,14 @@ "excludes": { "target": "com.amazonaws.macie2#SensitivityInspectionTemplateExcludes", "traits": { - "smithy.api#documentation": "

The managed data identifiers that are explicitly excluded (not used) when analyzing data.

", + "smithy.api#documentation": "

The managed data identifiers that are explicitly excluded (not used) when performing automated sensitive data discovery.

", "smithy.api#jsonName": "excludes" } }, "includes": { "target": "com.amazonaws.macie2#SensitivityInspectionTemplateIncludes", "traits": { - "smithy.api#documentation": "

The allow lists, custom data identifiers, and managed data identifiers that are explicitly included (used) when analyzing data.

", + "smithy.api#documentation": "

The allow lists, custom data identifiers, and managed data identifiers that are explicitly included (used) when performing automated sensitive data discovery.

", "smithy.api#jsonName": "includes" } }, @@ -6979,21 +7209,21 @@ "isDefinedInJob": { "target": "com.amazonaws.macie2#IsDefinedInJob", "traits": { - "smithy.api#documentation": "

Specifies whether any one-time or recurring jobs are configured to analyze data in the bucket. Possible values are:

  • TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more jobs and at least one of those jobs has a status other than CANCELLED. Or the bucket matched the bucket criteria (S3BucketCriteriaForJob) for at least one job that previously ran.

  • FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any jobs, all the jobs that explicitly include the bucket in their bucket definitions have a status of CANCELLED, or the bucket didn't match the bucket criteria (S3BucketCriteriaForJob) for any jobs that previously ran.

  • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

", + "smithy.api#documentation": "

Specifies whether any one-time or recurring jobs are configured to analyze objects in the bucket. Possible values are:

  • TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more jobs and at least one of those jobs has a status other than CANCELLED. Or the bucket matched the bucket criteria (S3BucketCriteriaForJob) for at least one job that previously ran.

  • FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any jobs, all the jobs that explicitly include the bucket in their bucket definitions have a status of CANCELLED, or the bucket didn't match the bucket criteria (S3BucketCriteriaForJob) for any jobs that previously ran.

  • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

", "smithy.api#jsonName": "isDefinedInJob" } }, "isMonitoredByJob": { "target": "com.amazonaws.macie2#IsMonitoredByJob", "traits": { - "smithy.api#documentation": "

Specifies whether any recurring jobs are configured to analyze data in the bucket. Possible values are:

  • TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more recurring jobs or the bucket matches the bucket criteria (S3BucketCriteriaForJob) for one or more recurring jobs. At least one of those jobs has a status other than CANCELLED.

  • FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any recurring jobs, the bucket doesn't match the bucket criteria (S3BucketCriteriaForJob) for any recurring jobs, or all the recurring jobs that are configured to analyze data in the bucket have a status of CANCELLED.

  • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

", + "smithy.api#documentation": "

Specifies whether any recurring jobs are configured to analyze objects in the bucket. Possible values are:

  • TRUE - The bucket is explicitly included in the bucket definition (S3BucketDefinitionForJob) for one or more recurring jobs or the bucket matches the bucket criteria (S3BucketCriteriaForJob) for one or more recurring jobs. At least one of those jobs has a status other than CANCELLED.

  • FALSE - The bucket isn't explicitly included in the bucket definition (S3BucketDefinitionForJob) for any recurring jobs, the bucket doesn't match the bucket criteria (S3BucketCriteriaForJob) for any recurring jobs, or all the recurring jobs that are configured to analyze data in the bucket have a status of CANCELLED.

  • UNKNOWN - An exception occurred when Amazon Macie attempted to retrieve job data for the bucket.

", "smithy.api#jsonName": "isMonitoredByJob" } }, "lastJobId": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "

The unique identifier for the job that ran most recently and is configured to analyze data in the bucket, either the latest run of a recurring job or the only run of a one-time job.

This value is typically null if the value for the isDefinedInJob property is FALSE or UNKNOWN.

", + "smithy.api#documentation": "

The unique identifier for the job that ran most recently and is configured to analyze objects in the bucket, either the latest run of a recurring job or the only run of a one-time job.

This value is typically null if the value for the isDefinedInJob property is FALSE or UNKNOWN.

", "smithy.api#jsonName": "lastJobId" } }, @@ -7006,7 +7236,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies whether any one-time or recurring classification jobs are configured to analyze data in an S3 bucket, and, if so, the details of the job that ran most recently.

" + "smithy.api#documentation": "

Specifies whether any one-time or recurring classification jobs are configured to analyze objects in an S3 bucket, and, if so, the details of the job that ran most recently.

" } }, "com.amazonaws.macie2#JobScheduleFrequency": { @@ -7357,6 +7587,97 @@ "smithy.api#output": {} } }, + "com.amazonaws.macie2#ListAutomatedDiscoveryAccounts": { + "type": "operation", + "input": { + "target": "com.amazonaws.macie2#ListAutomatedDiscoveryAccountsRequest" + }, + "output": { + "target": "com.amazonaws.macie2#ListAutomatedDiscoveryAccountsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.macie2#AccessDeniedException" + }, + { + "target": "com.amazonaws.macie2#InternalServerException" + }, + { + "target": "com.amazonaws.macie2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.macie2#ThrottlingException" + }, + { + "target": "com.amazonaws.macie2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the status of automated sensitive data discovery for one or more accounts.

", + "smithy.api#http": { + "method": "GET", + "uri": "/automated-discovery/accounts", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "items", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.macie2#ListAutomatedDiscoveryAccountsRequest": { + "type": "structure", + "members": { + "accountIds": { + "target": "com.amazonaws.macie2#__listOf__string", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account ID for each account, for as many as 50 accounts. To retrieve the status for multiple accounts, append the accountIds parameter and argument for each account, separated by an ampersand (&). To retrieve the status for all the accounts in an organization, omit this parameter.

", + "smithy.api#httpQuery": "accountIds" + } + }, + "maxResults": { + "target": "com.amazonaws.macie2#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of items to include in each page of a paginated response.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.macie2#__string", + "traits": { + "smithy.api#documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.macie2#ListAutomatedDiscoveryAccountsResponse": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.macie2#__listOfAutomatedDiscoveryAccount", + "traits": { + "smithy.api#documentation": "

An array of objects, one for each account specified in the request. Each object specifies the Amazon Web Services account ID for an account and the current status of automated sensitive data discovery for that account.

", + "smithy.api#jsonName": "items" + } + }, + "nextToken": { + "target": "com.amazonaws.macie2#__string", + "traits": { + "smithy.api#documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

", + "smithy.api#jsonName": "nextToken" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.macie2#ListClassificationJobs": { "type": "operation", "input": { @@ -7857,7 +8178,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information about the Amazon Macie membership invitations that were received by an account.

", + "smithy.api#documentation": "

Retrieves information about Amazon Macie membership invitations that were received by an account.

", "smithy.api#http": { "method": "GET", "uri": "/invitations", @@ -8324,7 +8645,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information about objects that were selected from an S3 bucket for automated sensitive data discovery.

", + "smithy.api#documentation": "

Retrieves information about objects that Amazon Macie selected from an S3 bucket for automated sensitive data discovery.

", "smithy.api#http": { "method": "GET", "uri": "/resource-profiles/artifacts", @@ -8621,6 +8942,9 @@ { "target": "com.amazonaws.macie2#BatchGetCustomDataIdentifiers" }, + { + "target": "com.amazonaws.macie2#BatchUpdateAutomatedDiscoveryAccounts" + }, { "target": "com.amazonaws.macie2#CreateAllowList" }, @@ -8759,6 +9083,9 @@ { "target": "com.amazonaws.macie2#ListAllowLists" }, + { + "target": "com.amazonaws.macie2#ListAutomatedDiscoveryAccounts" + }, { "target": "com.amazonaws.macie2#ListClassificationJobs" }, @@ -8865,6 +9192,9 @@ "name": "macie2" }, "aws.protocols#restJson1": {}, + "smithy.api#auth": [ + "aws.auth#sigv4" + ], "smithy.api#documentation": "

Amazon Macie

", "smithy.api#title": "Amazon Macie 2", "smithy.rules#endpointRuleSet": { @@ -9886,6 +10216,13 @@ "smithy.api#jsonName": "accountId" } }, + "automatedDiscoveryMonitoringStatus": { + "target": "com.amazonaws.macie2#AutomatedDiscoveryMonitoringStatus", + "traits": { + "smithy.api#documentation": "

Specifies whether automated sensitive data discovery is currently configured to analyze objects in the bucket. Possible values are: MONITORED, the bucket is included in analyses; and, NOT_MONITORED, the bucket is excluded from analyses. If automated sensitive data discovery is disabled for your account, this value is NOT_MONITORED.

", + "smithy.api#jsonName": "automatedDiscoveryMonitoringStatus" + } + }, "bucketName": { "target": "com.amazonaws.macie2#__string", "traits": { @@ -9931,7 +10268,7 @@ "lastAutomatedDiscoveryTime": { "target": "com.amazonaws.macie2#__timestampIso8601", "traits": { - "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed data in the bucket while performing automated sensitive data discovery for your account. This value is null if automated sensitive data discovery is currently disabled for your account.

", + "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when Amazon Macie most recently analyzed objects in the bucket while performing automated sensitive data discovery. This value is null if automated sensitive data discovery is disabled for your account.

", "smithy.api#jsonName": "lastAutomatedDiscoveryTime" } }, @@ -9952,7 +10289,7 @@ "sensitivityScore": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "

The current sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive). This value is null if automated sensitive data discovery is currently disabled for your account.

", + "smithy.api#documentation": "

The sensitivity score for the bucket, ranging from -1 (classification error) to 100 (sensitive).

If automated sensitive data discovery has never been enabled for your account or it’s been disabled for your organization or your standalone account for more than 30 days, possible values are: 1, the bucket is empty; or, 50, the bucket stores objects but it’s been excluded from recent analyses.

", "smithy.api#jsonName": "sensitivityScore" } }, @@ -10350,7 +10687,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates the configuration settings for storing data classification results.

", + "smithy.api#documentation": "

Adds or updates the configuration settings for storing data classification results.

", "smithy.api#http": { "method": "PUT", "uri": "/classification-export-configuration", @@ -10673,7 +11010,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides information about an S3 object that Amazon Macie selected for analysis while performing automated sensitive data discovery for an S3 bucket, and the status and results of the analysis. This information is available only if automated sensitive data discovery is currently enabled for your account.

" + "smithy.api#documentation": "

Provides information about an S3 object that Amazon Macie selected for analysis while performing automated sensitive data discovery for an account, and the status and results of the analysis. This information is available only if automated sensitive data discovery has been enabled for the account.

" } }, "com.amazonaws.macie2#ResourceStatistics": { @@ -10744,7 +11081,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides statistical data for sensitive data discovery metrics that apply to an S3 bucket that Amazon Macie monitors and analyzes for your account. The statistics capture the results of automated sensitive data discovery activities that Macie has performed for the bucket. The data is available only if automated sensitive data discovery is currently enabled for your account.

" + "smithy.api#documentation": "

Provides statistical data for sensitive data discovery metrics that apply to an S3 bucket that Amazon Macie monitors and analyzes for an account, if automated sensitive data discovery has been enabled for the account. The data captures the results of automated sensitive data discovery activities that Macie has performed for the bucket.

" } }, "com.amazonaws.macie2#ResourcesAffected": { @@ -11115,7 +11452,7 @@ "target": "com.amazonaws.macie2#__string", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the bucket.

", + "smithy.api#documentation": "

The name of the bucket. This must be the name of an existing general purpose bucket.

", "smithy.api#jsonName": "bucketName", "smithy.api#required": {} } @@ -11545,7 +11882,7 @@ "values": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "

An array that lists one or more values to use in the condition. If you specify multiple values, Amazon Macie uses OR logic to join the values. Valid values for each supported property (key) are:

  • ACCOUNT_ID - A string that represents the unique identifier for the Amazon Web Services account that owns the resource.

  • S3_BUCKET_EFFECTIVE_PERMISSION - A string that represents an enumerated value that Macie defines for the BucketPublicAccess.effectivePermission property of an S3 bucket.

  • S3_BUCKET_NAME - A string that represents the name of an S3 bucket.

  • S3_BUCKET_SHARED_ACCESS - A string that represents an enumerated value that Macie defines for the BucketMetadata.sharedAccess property of an S3 bucket.

Values are case sensitive. Also, Macie doesn't support use of partial values or wildcard characters in values.

", + "smithy.api#documentation": "

An array that lists one or more values to use in the condition. If you specify multiple values, Amazon Macie uses OR logic to join the values. Valid values for each supported property (key) are:

  • ACCOUNT_ID - A string that represents the unique identifier for the Amazon Web Services account that owns the resource.

  • AUTOMATED_DISCOVERY_MONITORING_STATUS - A string that represents an enumerated value that Macie defines for the BucketMetadata.automatedDiscoveryMonitoringStatus property of an S3 bucket.

  • S3_BUCKET_EFFECTIVE_PERMISSION - A string that represents an enumerated value that Macie defines for the BucketPublicAccess.effectivePermission property of an S3 bucket.

  • S3_BUCKET_NAME - A string that represents the name of an S3 bucket.

  • S3_BUCKET_SHARED_ACCESS - A string that represents an enumerated value that Macie defines for the BucketMetadata.sharedAccess property of an S3 bucket.

Values are case sensitive. Also, Macie doesn't support use of partial values or wildcard characters in values.

", "smithy.api#jsonName": "values" } } @@ -11580,6 +11917,12 @@ "traits": { "smithy.api#enumValue": "S3_BUCKET_SHARED_ACCESS" } + }, + "AUTOMATED_DISCOVERY_MONITORING_STATUS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUTOMATED_DISCOVERY_MONITORING_STATUS" + } } }, "traits": { @@ -11840,7 +12183,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies managed data identifiers to exclude (not use) when performing automated sensitive data discovery for an Amazon Macie account. For information about the managed data identifiers that Amazon Macie currently provides, see Using managed data identifiers in the Amazon Macie User Guide.

" + "smithy.api#documentation": "

Specifies managed data identifiers to exclude (not use) when performing automated sensitive data discovery. For information about the managed data identifiers that Amazon Macie currently provides, see Using managed data identifiers in the Amazon Macie User Guide.

" } }, "com.amazonaws.macie2#SensitivityInspectionTemplateId": { @@ -11875,7 +12218,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the allow lists, custom data identifiers, and managed data identifiers to include (use) when performing automated sensitive data discovery for an Amazon Macie account. The configuration must specify at least one custom data identifier or managed data identifier. For information about the managed data identifiers that Amazon Macie currently provides, see Using managed data identifiers in the Amazon Macie User Guide.

" + "smithy.api#documentation": "

Specifies the allow lists, custom data identifiers, and managed data identifiers to include (use) when performing automated sensitive data discovery. The configuration must specify at least one custom data identifier or managed data identifier. For information about the managed data identifiers that Amazon Macie currently provides, see Using managed data identifiers in the Amazon Macie User Guide.

" } }, "com.amazonaws.macie2#SensitivityInspectionTemplatesEntry": { @@ -11897,7 +12240,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides information about the sensitivity inspection template for an Amazon Macie account. Macie uses the template's settings when it performs automated sensitive data discovery for the account.

" + "smithy.api#documentation": "

Provides information about the sensitivity inspection template for an Amazon Macie account.

" } }, "com.amazonaws.macie2#ServerSideEncryption": { @@ -12594,7 +12937,7 @@ } ], "traits": { - "smithy.api#documentation": "

Tests a custom data identifier.

", + "smithy.api#documentation": "

Tests criteria for a custom data identifier.

", "smithy.api#http": { "method": "POST", "uri": "/custom-data-identifiers/test", @@ -13039,7 +13382,7 @@ } ], "traits": { - "smithy.api#documentation": "

Enables or disables automated sensitive data discovery for an account.

", + "smithy.api#documentation": "

Changes the configuration settings and status of automated sensitive data discovery for an organization or standalone account.

", "smithy.api#http": { "method": "PUT", "uri": "/automated-discovery/configuration", @@ -13050,11 +13393,18 @@ "com.amazonaws.macie2#UpdateAutomatedDiscoveryConfigurationRequest": { "type": "structure", "members": { + "autoEnableOrganizationMembers": { + "target": "com.amazonaws.macie2#AutoEnableMode", + "traits": { + "smithy.api#documentation": "

Specifies whether to automatically enable automated sensitive data discovery for accounts in the organization. Valid values are: ALL (default), enable it for all existing accounts and new member accounts; NEW, enable it only for new member accounts; and, NONE, don't enable it for any accounts.

If you specify NEW or NONE, automated sensitive data discovery continues to be enabled for any existing accounts that it's currently enabled for. To enable or disable it for individual member accounts, specify NEW or NONE, and then enable or disable it for each account by using the BatchUpdateAutomatedDiscoveryAccounts operation.

", + "smithy.api#jsonName": "autoEnableOrganizationMembers" + } + }, "status": { "target": "com.amazonaws.macie2#AutomatedDiscoveryStatus", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The new status of automated sensitive data discovery for the account. Valid values are: ENABLED, start or resume automated sensitive data discovery activities for the account; and, DISABLED, stop performing automated sensitive data discovery activities for the account.

When you enable automated sensitive data discovery for the first time, Amazon Macie uses default configuration settings to determine which data sources to analyze and which managed data identifiers to use. To change these settings, use the UpdateClassificationScope and UpdateSensitivityInspectionTemplate operations, respectively. If you change the settings and subsequently disable the configuration, Amazon Macie retains your changes.

", + "smithy.api#documentation": "

The new status of automated sensitive data discovery for the organization or account. Valid values are: ENABLED, start or resume all automated sensitive data discovery activities; and, DISABLED, stop performing all automated sensitive data discovery activities.

If you specify DISABLED for an administrator account, you also disable automated sensitive data discovery for all member accounts in the organization.

", "smithy.api#jsonName": "status", "smithy.api#required": {} } @@ -13516,7 +13866,7 @@ "target": "com.amazonaws.macie2#__boolean", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Specifies whether to enable Amazon Macie automatically for an account when the account is added to the organization in Organizations.

", + "smithy.api#documentation": "

Specifies whether to enable Amazon Macie automatically for accounts that are added to the organization in Organizations.

", "smithy.api#jsonName": "autoEnable", "smithy.api#required": {} } @@ -13817,7 +14167,7 @@ "excludes": { "target": "com.amazonaws.macie2#SensitivityInspectionTemplateExcludes", "traits": { - "smithy.api#documentation": "

The managed data identifiers to explicitly exclude (not use) when analyzing data.

To exclude an allow list or custom data identifier that's currently included by the template, update the values for the SensitivityInspectionTemplateIncludes.allowListIds and SensitivityInspectionTemplateIncludes.customDataIdentifierIds properties, respectively.

", + "smithy.api#documentation": "

The managed data identifiers to explicitly exclude (not use) when performing automated sensitive data discovery.

To exclude an allow list or custom data identifier that's currently included by the template, update the values for the SensitivityInspectionTemplateIncludes.allowListIds and SensitivityInspectionTemplateIncludes.customDataIdentifierIds properties, respectively.

", "smithy.api#jsonName": "excludes" } }, @@ -13832,7 +14182,7 @@ "includes": { "target": "com.amazonaws.macie2#SensitivityInspectionTemplateIncludes", "traits": { - "smithy.api#documentation": "

The allow lists, custom data identifiers, and managed data identifiers to explicitly include (use) when analyzing data.

", + "smithy.api#documentation": "

The allow lists, custom data identifiers, and managed data identifiers to explicitly include (use) when performing automated sensitive data discovery.

", "smithy.api#jsonName": "includes" } } @@ -13897,7 +14247,7 @@ "automatedDiscoveryFreeTrialStartDate": { "target": "com.amazonaws.macie2#__timestampIso8601", "traits": { - "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when the free trial of automated sensitive data discovery started for the account. If the account is a member account in an organization, this value is the same as the value for the organization's Amazon Macie administrator account.

", + "smithy.api#documentation": "

The date and time, in UTC and extended ISO 8601 format, when the free trial of automated sensitive data discovery started for the account. This value is null if automated sensitive data discovery hasn't been enabled for the account.

", "smithy.api#jsonName": "automatedDiscoveryFreeTrialStartDate" } }, @@ -14358,6 +14708,24 @@ "target": "com.amazonaws.macie2#AllowListSummary" } }, + "com.amazonaws.macie2#__listOfAutomatedDiscoveryAccount": { + "type": "list", + "member": { + "target": "com.amazonaws.macie2#AutomatedDiscoveryAccount" + } + }, + "com.amazonaws.macie2#__listOfAutomatedDiscoveryAccountUpdate": { + "type": "list", + "member": { + "target": "com.amazonaws.macie2#AutomatedDiscoveryAccountUpdate" + } + }, + "com.amazonaws.macie2#__listOfAutomatedDiscoveryAccountUpdateError": { + "type": "list", + "member": { + "target": "com.amazonaws.macie2#AutomatedDiscoveryAccountUpdateError" + } + }, "com.amazonaws.macie2#__listOfBatchGetCustomDataIdentifierSummary": { "type": "list", "member": { diff --git a/models/mailmanager.json b/models/mailmanager.json new file mode 100644 index 0000000000..39fb5641d2 --- /dev/null +++ b/models/mailmanager.json @@ -0,0 +1,7827 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.mailmanager#AcceptAction": { + "type": "enum", + "members": { + "ALLOW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALLOW" + } + }, + "DENY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DENY" + } + } + } + }, + "com.amazonaws.mailmanager#AccessDeniedException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.mailmanager#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

Occurs when a user is denied access to a specific resource or action.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.mailmanager#ActionFailurePolicy": { + "type": "enum", + "members": { + "CONTINUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONTINUE" + } + }, + "DROP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DROP" + } + } + } + }, + "com.amazonaws.mailmanager#AddHeaderAction": { + "type": "structure", + "members": { + "HeaderName": { + "target": "com.amazonaws.mailmanager#HeaderName", + "traits": { + "smithy.api#documentation": "

The name of the header to add to an email. The header must be prefixed with\n \"X-\". Headers are added regardless of whether the header name pre-existed in\n the email.

", + "smithy.api#required": {} + } + }, + "HeaderValue": { + "target": "com.amazonaws.mailmanager#HeaderValue", + "traits": { + "smithy.api#documentation": "

The value of the header to add to the email.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The action to add a header to a message. When executed, this action will add the given\n header to the message.

" + } + }, + "com.amazonaws.mailmanager#AddonInstance": { + "type": "structure", + "members": { + "AddonInstanceId": { + "target": "com.amazonaws.mailmanager#AddonInstanceId", + "traits": { + "smithy.api#documentation": "

The unique ID of the Add On instance.

" + } + }, + "AddonSubscriptionId": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionId", + "traits": { + "smithy.api#documentation": "

The subscription ID for the instance.

" + } + }, + "AddonName": { + "target": "com.amazonaws.mailmanager#AddonName", + "traits": { + "smithy.api#documentation": "

The name of the Add On for the instance.

" + } + }, + "AddonInstanceArn": { + "target": "com.amazonaws.mailmanager#AddonInstanceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Add On instance.

" + } + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the Add On instance was created.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An Add On instance represents a specific configuration of an Add On.

" + } + }, + "com.amazonaws.mailmanager#AddonInstanceArn": { + "type": "string", + "traits": { + "aws.api#arnReference": {} + } + }, + "com.amazonaws.mailmanager#AddonInstanceId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 4, + "max": 67 + }, + "smithy.api#pattern": "^ai-[a-zA-Z0-9]{1,64}$" + } + }, + "com.amazonaws.mailmanager#AddonInstanceResource": { + "type": "resource", + "identifiers": { + "AddonInstanceId": { + "target": "com.amazonaws.mailmanager#AddonInstanceId" + } + }, + "properties": { + "AddonInstanceArn": { + "target": "com.amazonaws.mailmanager#AddonInstanceArn" + }, + "AddonSubscriptionId": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionId" + }, + "AddonName": { + "target": "com.amazonaws.mailmanager#AddonName" + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp" + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList" + } + }, + "create": { + "target": "com.amazonaws.mailmanager#CreateAddonInstance" + }, + "read": { + "target": "com.amazonaws.mailmanager#GetAddonInstance" + }, + "delete": { + "target": "com.amazonaws.mailmanager#DeleteAddonInstance" + }, + "list": { + "target": "com.amazonaws.mailmanager#ListAddonInstances" + }, + "traits": { + "aws.cloudformation#cfnResource": { + "name": "MailManagerAddonInstance" + } + } + }, + "com.amazonaws.mailmanager#AddonInstances": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#AddonInstance" + } + }, + "com.amazonaws.mailmanager#AddonName": { + "type": "string" + }, + "com.amazonaws.mailmanager#AddonSubscription": { + "type": "structure", + "members": { + "AddonSubscriptionId": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionId", + "traits": { + "smithy.api#documentation": "

The unique ID of the Add On subscription.

" + } + }, + "AddonName": { + "target": "com.amazonaws.mailmanager#AddonName", + "traits": { + "smithy.api#documentation": "

The name of the Add On.

" + } + }, + "AddonSubscriptionArn": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Add On subscription.

" + } + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the Add On subscription was created.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A subscription for an Add On representing the acceptance of its terms of use and\n additional pricing.

" + } + }, + "com.amazonaws.mailmanager#AddonSubscriptionArn": { + "type": "string", + "traits": { + "aws.api#arnReference": {} + } + }, + "com.amazonaws.mailmanager#AddonSubscriptionId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 4, + "max": 67 + }, + "smithy.api#pattern": "^as-[a-zA-Z0-9]{1,64}$" + } + }, + "com.amazonaws.mailmanager#AddonSubscriptionResource": { + "type": "resource", + "identifiers": { + "AddonSubscriptionId": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionId" + } + }, + "properties": { + "AddonSubscriptionArn": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionArn" + }, + "AddonName": { + "target": "com.amazonaws.mailmanager#AddonName" + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp" + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList" + } + }, + "create": { + "target": "com.amazonaws.mailmanager#CreateAddonSubscription" + }, + "read": { + "target": "com.amazonaws.mailmanager#GetAddonSubscription" + }, + "delete": { + "target": "com.amazonaws.mailmanager#DeleteAddonSubscription" + }, + "list": { + "target": "com.amazonaws.mailmanager#ListAddonSubscriptions" + }, + "traits": { + "aws.cloudformation#cfnResource": { + "name": "MailManagerAddonSubscription" + } + } + }, + "com.amazonaws.mailmanager#AddonSubscriptions": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#AddonSubscription" + } + }, + "com.amazonaws.mailmanager#Analysis": { + "type": "structure", + "members": { + "Analyzer": { + "target": "com.amazonaws.mailmanager#AnalyzerArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Add On.

", + "smithy.api#required": {} + } + }, + "ResultField": { + "target": "com.amazonaws.mailmanager#ResultField", + "traits": { + "smithy.api#documentation": "

The returned value from an Add On.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The result of an analysis can be used in conditions to trigger actions. Analyses can\n inspect the email content and report a certain aspect of the email.

" + } + }, + "com.amazonaws.mailmanager#AnalyzerArn": { + "type": "string", + "traits": { + "aws.api#arnReference": {}, + "smithy.api#pattern": "^[a-zA-Z0-9:_/+=,@.#-]+$" + } + }, + "com.amazonaws.mailmanager#Archive": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveIdString", + "traits": { + "smithy.api#documentation": "

The unique identifier of the archive.

", + "smithy.api#required": {} + } + }, + "ArchiveName": { + "target": "com.amazonaws.mailmanager#ArchiveNameString", + "traits": { + "smithy.api#documentation": "

The unique name assigned to the archive.

" + } + }, + "ArchiveState": { + "target": "com.amazonaws.mailmanager#ArchiveState", + "traits": { + "smithy.api#documentation": "

The current state of the archive:

\n
    \n
  • \n

    \n ACTIVE – The archive is ready and available for use.

    \n
  • \n
  • \n

    \n PENDING_DELETION – The archive has been marked for deletion\n and will be permanently deleted in 30 days. No further modifications can be made\n in this state.

    \n
  • \n
" + } + }, + "LastUpdatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the archive was last updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An archive resource for storing and retaining emails.

" + } + }, + "com.amazonaws.mailmanager#ArchiveAction": { + "type": "structure", + "members": { + "ActionFailurePolicy": { + "target": "com.amazonaws.mailmanager#ActionFailurePolicy", + "traits": { + "smithy.api#documentation": "

A policy that states what to do in the case of failure. The action will fail if there\n are configuration errors. For example, the specified archive has been deleted.

" + } + }, + "TargetArchive": { + "target": "com.amazonaws.mailmanager#NameOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the archive to send the email to.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The action to archive the email by delivering the email to an Amazon SES archive.

" + } + }, + "com.amazonaws.mailmanager#ArchiveArn": { + "type": "string", + "traits": { + "aws.api#arnReference": {} + } + }, + "com.amazonaws.mailmanager#ArchiveBooleanEmailAttribute": { + "type": "enum", + "members": { + "HAS_ATTACHMENTS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HAS_ATTACHMENTS" + } + } + } + }, + "com.amazonaws.mailmanager#ArchiveBooleanExpression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#ArchiveBooleanToEvaluate", + "traits": { + "smithy.api#documentation": "

The email attribute value to evaluate.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#ArchiveBooleanOperator", + "traits": { + "smithy.api#documentation": "

The boolean operator to use for evaluation.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A boolean expression to evaluate email attribute values.

" + } + }, + "com.amazonaws.mailmanager#ArchiveBooleanOperator": { + "type": "enum", + "members": { + "IS_TRUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IS_TRUE" + } + }, + "IS_FALSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IS_FALSE" + } + } + } + }, + "com.amazonaws.mailmanager#ArchiveBooleanToEvaluate": { + "type": "union", + "members": { + "Attribute": { + "target": "com.amazonaws.mailmanager#ArchiveBooleanEmailAttribute", + "traits": { + "smithy.api#documentation": "

The name of the email attribute to evaluate.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The attribute to evaluate in a boolean expression.

" + } + }, + "com.amazonaws.mailmanager#ArchiveFilterCondition": { + "type": "union", + "members": { + "StringExpression": { + "target": "com.amazonaws.mailmanager#ArchiveStringExpression", + "traits": { + "smithy.api#documentation": "

A string expression to evaluate against email attributes.

" + } + }, + "BooleanExpression": { + "target": "com.amazonaws.mailmanager#ArchiveBooleanExpression", + "traits": { + "smithy.api#documentation": "

A boolean expression to evaluate against email attributes.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A filter condition used to include or exclude emails when exporting from or searching an archive.

" + } + }, + "com.amazonaws.mailmanager#ArchiveFilterConditions": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#ArchiveFilterCondition" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.mailmanager#ArchiveFilters": { + "type": "structure", + "members": { + "Include": { + "target": "com.amazonaws.mailmanager#ArchiveFilterConditions", + "traits": { + "smithy.api#documentation": "

The filter conditions for emails to include.

" + } + }, + "Unless": { + "target": "com.amazonaws.mailmanager#ArchiveFilterConditions", + "traits": { + "smithy.api#documentation": "

The filter conditions for emails to exclude.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A set of filter conditions to include and/or exclude emails.

" + } + }, + "com.amazonaws.mailmanager#ArchiveId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 66 + }, + "smithy.api#pattern": "^a-[\\w]{1,64}$" + } + }, + "com.amazonaws.mailmanager#ArchiveIdString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 66 + } + } + }, + "com.amazonaws.mailmanager#ArchiveNameString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[a-zA-Z0-9][a-zA-Z0-9_-]*[a-zA-Z0-9]$" + } + }, + "com.amazonaws.mailmanager#ArchiveResource": { + "type": "resource", + "identifiers": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveIdString" + } + }, + "properties": { + "ArchiveArn": { + "target": "com.amazonaws.mailmanager#ArchiveArn" + }, + "ArchiveName": { + "target": "com.amazonaws.mailmanager#ArchiveNameString" + }, + "ArchiveState": { + "target": "com.amazonaws.mailmanager#ArchiveState" + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp" + }, + "LastUpdatedTimestamp": { + "target": "smithy.api#Timestamp" + }, + "Retention": { + "target": "com.amazonaws.mailmanager#ArchiveRetention" + }, + "KmsKeyArn": { + "target": "com.amazonaws.mailmanager#KmsKeyArn" + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList" + } + }, + "create": { + "target": "com.amazonaws.mailmanager#CreateArchive" + }, + "read": { + "target": "com.amazonaws.mailmanager#GetArchive" + }, + "update": { + "target": "com.amazonaws.mailmanager#UpdateArchive" + }, + "delete": { + "target": "com.amazonaws.mailmanager#DeleteArchive" + }, + "list": { + "target": "com.amazonaws.mailmanager#ListArchives" + }, + "traits": { + "aws.cloudformation#cfnResource": { + "name": "MailManagerArchive" + } + } + }, + "com.amazonaws.mailmanager#ArchiveRetention": { + "type": "union", + "members": { + "RetentionPeriod": { + "target": "com.amazonaws.mailmanager#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

The enum value sets the period for retaining emails in an archive.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The retention policy for an email archive that specifies how long emails are kept\n before being automatically deleted.

" + } + }, + "com.amazonaws.mailmanager#ArchiveState": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "PENDING_DELETION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PENDING_DELETION" + } + } + } + }, + "com.amazonaws.mailmanager#ArchiveStringEmailAttribute": { + "type": "enum", + "members": { + "TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TO" + } + }, + "FROM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FROM" + } + }, + "CC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CC" + } + }, + "SUBJECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUBJECT" + } + } + } + }, + "com.amazonaws.mailmanager#ArchiveStringExpression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#ArchiveStringToEvaluate", + "traits": { + "smithy.api#documentation": "

The attribute of the email to evaluate.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#ArchiveStringOperator", + "traits": { + "smithy.api#documentation": "

The operator to use when evaluating the string values.

", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.mailmanager#StringValueList", + "traits": { + "smithy.api#documentation": "

The list of string values to evaluate the email attribute against.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A string expression to evaluate an email attribute value against one or more string values.

" + } + }, + "com.amazonaws.mailmanager#ArchiveStringOperator": { + "type": "enum", + "members": { + "CONTAINS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONTAINS" + } + } + } + }, + "com.amazonaws.mailmanager#ArchiveStringToEvaluate": { + "type": "union", + "members": { + "Attribute": { + "target": "com.amazonaws.mailmanager#ArchiveStringEmailAttribute", + "traits": { + "smithy.api#documentation": "

The name of the email attribute to evaluate.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the email attribute to evaluate in a string expression.

" + } + }, + "com.amazonaws.mailmanager#ArchivedMessageId": { + "type": "string" + }, + "com.amazonaws.mailmanager#ArchivesList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#Archive" + } + }, + "com.amazonaws.mailmanager#ConflictException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.mailmanager#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The request configuration has conflicts. For details, see the accompanying error message.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.mailmanager#CreateAddonInstance": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#CreateAddonInstanceRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#CreateAddonInstanceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an Add On instance for the subscription indicated in the request. The\n resulting Amazon Resource Name (ARN) can be used in a conditional statement for a rule set or traffic policy.\n

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#CreateAddonInstanceRequest": { + "type": "structure", + "members": { + "ClientToken": { + "target": "com.amazonaws.mailmanager#IdempotencyToken", + "traits": { + "smithy.api#documentation": "

A unique token that Amazon SES uses to recognize subsequent retries of the same\n request.

", + "smithy.api#idempotencyToken": {} + } + }, + "AddonSubscriptionId": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionId", + "traits": { + "smithy.api#documentation": "

The unique ID of a previously created subscription that an Add On instance is created\n for. You can only have one instance per subscription.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#CreateAddonInstanceResponse": { + "type": "structure", + "members": { + "AddonInstanceId": { + "target": "com.amazonaws.mailmanager#AddonInstanceId", + "traits": { + "smithy.api#documentation": "

The unique ID of the Add On instance created by this API.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#CreateAddonSubscription": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#CreateAddonSubscriptionRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#CreateAddonSubscriptionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a subscription for an Add On representing the acceptance of its terms of use\n and additional pricing. The subscription can then be used to create an instance for use\n in rule sets or traffic policies.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#CreateAddonSubscriptionRequest": { + "type": "structure", + "members": { + "ClientToken": { + "target": "com.amazonaws.mailmanager#IdempotencyToken", + "traits": { + "smithy.api#documentation": "

A unique token that Amazon SES uses to recognize subsequent retries of the same\n request.

", + "smithy.api#idempotencyToken": {} + } + }, + "AddonName": { + "target": "com.amazonaws.mailmanager#AddonName", + "traits": { + "smithy.api#documentation": "

The name of the Add On to subscribe to. You can only have one subscription for each\n Add On name.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#CreateAddonSubscriptionResponse": { + "type": "structure", + "members": { + "AddonSubscriptionId": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionId", + "traits": { + "smithy.api#documentation": "

The unique ID of the Add On subscription created by this API.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#CreateArchive": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#CreateArchiveRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#CreateArchiveResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a new email archive resource for storing and retaining emails.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#CreateArchiveRequest": { + "type": "structure", + "members": { + "ClientToken": { + "target": "com.amazonaws.mailmanager#IdempotencyToken", + "traits": { + "smithy.api#documentation": "

A unique token Amazon SES uses to recognize retries of this request.

", + "smithy.api#idempotencyToken": {} + } + }, + "ArchiveName": { + "target": "com.amazonaws.mailmanager#ArchiveNameString", + "traits": { + "smithy.api#documentation": "

A unique name for the new archive.

", + "smithy.api#required": {} + } + }, + "Retention": { + "target": "com.amazonaws.mailmanager#ArchiveRetention", + "traits": { + "smithy.api#documentation": "

The period for retaining emails in the archive before automatic deletion.

" + } + }, + "KmsKeyArn": { + "target": "com.amazonaws.mailmanager#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key for encrypting emails in the archive.

" + } + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to create a new email archive.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#CreateArchiveResponse": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveIdString", + "traits": { + "smithy.api#documentation": "

The unique identifier for the newly created archive.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response from creating a new email archive.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#CreateIngressPoint": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#CreateIngressPointRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#CreateIngressPointResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Provision a new ingress endpoint resource.

", + "smithy.api#examples": [ + { + "title": "Create Open IngressPoint", + "input": { + "IngressPointName": "ingressPointName", + "Type": "OPEN", + "RuleSetId": "rs-12345", + "TrafficPolicyId": "tp-12345", + "Tags": [ + { + "Key": "key", + "Value": "value" + } + ] + }, + "output": { + "IngressPointId": "inp-12345" + } + }, + { + "title": "Create Auth IngressPoint with Password", + "input": { + "IngressPointName": "ingressPointName", + "Type": "AUTH", + "RuleSetId": "rs-12345", + "TrafficPolicyId": "tp-12345", + "IngressPointConfiguration": { + "SmtpPassword": "smtpPassword" + }, + "Tags": [ + { + "Key": "key", + "Value": "value" + } + ] + }, + "output": { + "IngressPointId": "inp-12345" + } + }, + { + "title": "Create Auth IngressPoint with SecretsManager Secret", + "input": { + "IngressPointName": "ingressPointName", + "Type": "AUTH", + "RuleSetId": "rs-12345", + "TrafficPolicyId": "tp-12345", + "IngressPointConfiguration": { + "SecretArn": "arn:aws:secretsmanager:us-west-2:123456789012:secret:abcde" + }, + "Tags": [ + { + "Key": "key", + "Value": "value" + } + ] + }, + "output": { + "IngressPointId": "inp-12345" + } + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#CreateIngressPointRequest": { + "type": "structure", + "members": { + "ClientToken": { + "target": "com.amazonaws.mailmanager#IdempotencyToken", + "traits": { + "smithy.api#documentation": "

A unique token that Amazon SES uses to recognize subsequent retries of the same\n request.

", + "smithy.api#idempotencyToken": {} + } + }, + "IngressPointName": { + "target": "com.amazonaws.mailmanager#IngressPointName", + "traits": { + "smithy.api#documentation": "

A user friendly name for an ingress endpoint resource.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "IngressPointId" + } + }, + "Type": { + "target": "com.amazonaws.mailmanager#IngressPointType", + "traits": { + "smithy.api#documentation": "

The type of the ingress endpoint to create.

", + "smithy.api#required": {} + } + }, + "RuleSetId": { + "target": "com.amazonaws.mailmanager#RuleSetId", + "traits": { + "smithy.api#documentation": "

The identifier of an existing rule set that you attach to an ingress endpoint\n resource.

", + "smithy.api#required": {} + } + }, + "TrafficPolicyId": { + "target": "com.amazonaws.mailmanager#TrafficPolicyId", + "traits": { + "smithy.api#documentation": "

The identifier of an existing traffic policy that you attach to an ingress endpoint\n resource.

", + "smithy.api#required": {} + } + }, + "IngressPointConfiguration": { + "target": "com.amazonaws.mailmanager#IngressPointConfiguration", + "traits": { + "smithy.api#documentation": "

If you choose an Authenticated ingress endpoint, you must configure either an SMTP password or a secret\n ARN.

" + } + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#CreateIngressPointResponse": { + "type": "structure", + "members": { + "IngressPointId": { + "target": "com.amazonaws.mailmanager#IngressPointId", + "traits": { + "smithy.api#documentation": "

The unique identifier for a previously created ingress endpoint.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#CreateRelay": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#CreateRelayRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#CreateRelayResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a relay resource which can be used in rules to relay incoming emails to\n defined relay destinations.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#CreateRelayRequest": { + "type": "structure", + "members": { + "ClientToken": { + "target": "com.amazonaws.mailmanager#IdempotencyToken", + "traits": { + "smithy.api#documentation": "

A unique token that Amazon SES uses to recognize subsequent retries of the same\n request.

", + "smithy.api#idempotencyToken": {} + } + }, + "RelayName": { + "target": "com.amazonaws.mailmanager#RelayName", + "traits": { + "smithy.api#documentation": "

The unique name of the relay resource.

", + "smithy.api#required": {} + } + }, + "ServerName": { + "target": "com.amazonaws.mailmanager#RelayServerName", + "traits": { + "smithy.api#documentation": "

The destination relay server address.

", + "smithy.api#required": {} + } + }, + "ServerPort": { + "target": "com.amazonaws.mailmanager#RelayServerPort", + "traits": { + "smithy.api#documentation": "

The destination relay server port.

", + "smithy.api#required": {} + } + }, + "Authentication": { + "target": "com.amazonaws.mailmanager#RelayAuthentication", + "traits": { + "smithy.api#documentation": "

Authentication for the relay destination server—specify the secretARN where\n the SMTP credentials are stored.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#CreateRelayResponse": { + "type": "structure", + "members": { + "RelayId": { + "target": "com.amazonaws.mailmanager#RelayId", + "traits": { + "smithy.api#documentation": "

A unique identifier of the created relay resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#CreateRuleSet": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#CreateRuleSetRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#CreateRuleSetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Provision a new rule set.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#CreateRuleSetRequest": { + "type": "structure", + "members": { + "ClientToken": { + "target": "com.amazonaws.mailmanager#IdempotencyToken", + "traits": { + "smithy.api#documentation": "

A unique token that Amazon SES uses to recognize subsequent retries of the same\n request.

", + "smithy.api#idempotencyToken": {} + } + }, + "RuleSetName": { + "target": "com.amazonaws.mailmanager#RuleSetName", + "traits": { + "smithy.api#documentation": "

A user-friendly name for the rule set.

", + "smithy.api#required": {} + } + }, + "Rules": { + "target": "com.amazonaws.mailmanager#Rules", + "traits": { + "smithy.api#documentation": "

Conditional rules that are evaluated for determining actions on email.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#CreateRuleSetResponse": { + "type": "structure", + "members": { + "RuleSetId": { + "target": "com.amazonaws.mailmanager#RuleSetId", + "traits": { + "smithy.api#documentation": "

The identifier of the created rule set.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#CreateTrafficPolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#CreateTrafficPolicyRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#CreateTrafficPolicyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Provision a new traffic policy resource.

", + "smithy.api#examples": [ + { + "title": "Create TrafficPolicy", + "input": { + "TrafficPolicyName": "trafficPolicyName", + "PolicyStatements": [ + { + "Conditions": [ + { + "IpExpression": { + "Evaluate": { + "Attribute": "SENDER_IP" + }, + "Operator": "CIDR_MATCHES", + "Values": [ + "0.0.0.0/12" + ] + } + } + ], + "Action": "ALLOW" + } + ], + "DefaultAction": "DENY" + }, + "output": { + "TrafficPolicyId": "tp-13245" + } + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#CreateTrafficPolicyRequest": { + "type": "structure", + "members": { + "ClientToken": { + "target": "com.amazonaws.mailmanager#IdempotencyToken", + "traits": { + "smithy.api#documentation": "

A unique token that Amazon SES uses to recognize subsequent retries of the same\n request.

", + "smithy.api#idempotencyToken": {} + } + }, + "TrafficPolicyName": { + "target": "com.amazonaws.mailmanager#TrafficPolicyName", + "traits": { + "smithy.api#documentation": "

A user-friendly name for the traffic policy resource.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "TrafficPolicyId" + } + }, + "PolicyStatements": { + "target": "com.amazonaws.mailmanager#PolicyStatementList", + "traits": { + "smithy.api#documentation": "

Conditional statements for filtering email traffic.

", + "smithy.api#required": {} + } + }, + "DefaultAction": { + "target": "com.amazonaws.mailmanager#AcceptAction", + "traits": { + "smithy.api#documentation": "

Default action instructs the traffic policy to either Allow or Deny (block) messages that fall outside of (or not addressed by) the conditions of your policy statements

", + "smithy.api#required": {} + } + }, + "MaxMessageSizeBytes": { + "target": "com.amazonaws.mailmanager#MaxMessageSizeBytes", + "traits": { + "smithy.api#documentation": "

The maximum message size in bytes of email which is allowed in by this traffic\n policy—anything larger will be blocked.

" + } + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#CreateTrafficPolicyResponse": { + "type": "structure", + "members": { + "TrafficPolicyId": { + "target": "com.amazonaws.mailmanager#TrafficPolicyId", + "traits": { + "smithy.api#documentation": "

The identifier of the traffic policy resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#DeleteAddonInstance": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#DeleteAddonInstanceRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#DeleteAddonInstanceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an Add On instance.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#DeleteAddonInstanceRequest": { + "type": "structure", + "members": { + "AddonInstanceId": { + "target": "com.amazonaws.mailmanager#AddonInstanceId", + "traits": { + "smithy.api#documentation": "

The Add On instance ID to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#DeleteAddonInstanceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#DeleteAddonSubscription": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#DeleteAddonSubscriptionRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#DeleteAddonSubscriptionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an Add On subscription.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#DeleteAddonSubscriptionRequest": { + "type": "structure", + "members": { + "AddonSubscriptionId": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionId", + "traits": { + "smithy.api#documentation": "

The Add On subscription ID to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#DeleteAddonSubscriptionResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#DeleteArchive": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#DeleteArchiveRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#DeleteArchiveResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Initiates deletion of an email archive. This changes the archive state to pending\n deletion. In this state, no new emails can be added, and existing archived emails become\n inaccessible (search, export, download). The archive and all of its contents will be\n permanently deleted 30 days after entering the pending deletion state, regardless of the\n configured retention period.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#DeleteArchiveRequest": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveIdString", + "traits": { + "smithy.api#documentation": "

The identifier of the archive to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to initiate deletion of an email archive.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#DeleteArchiveResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

The response indicating if the archive deletion was successfully initiated.

\n

On success, returns an HTTP 200 status code. On failure, returns an error\n message.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#DeleteIngressPoint": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#DeleteIngressPointRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#DeleteIngressPointResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Delete an ingress endpoint resource.

", + "smithy.api#examples": [ + { + "title": "Delete IngressPoint", + "input": { + "IngressPointId": "inp-12345" + }, + "output": {} + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#DeleteIngressPointRequest": { + "type": "structure", + "members": { + "IngressPointId": { + "target": "com.amazonaws.mailmanager#IngressPointId", + "traits": { + "smithy.api#documentation": "

The identifier of the ingress endpoint resource that you want to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#DeleteIngressPointResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#DeleteRelay": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#DeleteRelayRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#DeleteRelayResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an existing relay resource.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#DeleteRelayRequest": { + "type": "structure", + "members": { + "RelayId": { + "target": "com.amazonaws.mailmanager#RelayId", + "traits": { + "smithy.api#documentation": "

The unique relay identifier.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#DeleteRelayResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#DeleteRuleSet": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#DeleteRuleSetRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#DeleteRuleSetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Delete a rule set.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#DeleteRuleSetRequest": { + "type": "structure", + "members": { + "RuleSetId": { + "target": "com.amazonaws.mailmanager#RuleSetId", + "traits": { + "smithy.api#documentation": "

The identifier of an existing rule set resource to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#DeleteRuleSetResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#DeleteTrafficPolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#DeleteTrafficPolicyRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#DeleteTrafficPolicyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Delete a traffic policy resource.

", + "smithy.api#examples": [ + { + "title": "Delete TrafficPolicy", + "input": { + "TrafficPolicyId": "tp-12345" + }, + "output": {} + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#DeleteTrafficPolicyRequest": { + "type": "structure", + "members": { + "TrafficPolicyId": { + "target": "com.amazonaws.mailmanager#TrafficPolicyId", + "traits": { + "smithy.api#documentation": "

The identifier of the traffic policy that you want to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#DeleteTrafficPolicyResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#DeliverToMailboxAction": { + "type": "structure", + "members": { + "ActionFailurePolicy": { + "target": "com.amazonaws.mailmanager#ActionFailurePolicy", + "traits": { + "smithy.api#documentation": "

A policy that states what to do in the case of failure. The action will fail if there\n are configuration errors. For example, the mailbox ARN is no longer valid.

" + } + }, + "MailboxArn": { + "target": "com.amazonaws.mailmanager#NameOrArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a WorkMail organization to deliver the email to.

", + "smithy.api#required": {} + } + }, + "RoleArn": { + "target": "com.amazonaws.mailmanager#IamRoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role to use to execute this action. The role must have access to\n the workmail:DeliverToMailbox API.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This action to delivers an email to a mailbox.

" + } + }, + "com.amazonaws.mailmanager#DropAction": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

This action causes processing to stop and the email to be dropped. If the action\n applies only to certain recipients, only those recipients are dropped, and processing\n continues for other recipients.

" + } + }, + "com.amazonaws.mailmanager#EmailAddress": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 254 + }, + "smithy.api#pattern": "^[0-9A-Za-z@+.-]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.mailmanager#EmailReceivedHeadersList": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.mailmanager#ErrorMessage": { + "type": "string" + }, + "com.amazonaws.mailmanager#ExportDestinationConfiguration": { + "type": "union", + "members": { + "S3": { + "target": "com.amazonaws.mailmanager#S3ExportDestinationConfiguration", + "traits": { + "smithy.api#documentation": "

Configuration for delivering to an Amazon S3 bucket.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The destination configuration for delivering exported email data.

" + } + }, + "com.amazonaws.mailmanager#ExportId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.mailmanager#ExportMaxResults": { + "type": "integer" + }, + "com.amazonaws.mailmanager#ExportState": { + "type": "enum", + "members": { + "QUEUED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUEUED" + } + }, + "PREPROCESSING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PREPROCESSING" + } + }, + "PROCESSING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROCESSING" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "CANCELLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CANCELLED" + } + } + } + }, + "com.amazonaws.mailmanager#ExportStatus": { + "type": "structure", + "members": { + "SubmissionTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the export job was submitted.

" + } + }, + "CompletionTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the export job completed (if finished).

" + } + }, + "State": { + "target": "com.amazonaws.mailmanager#ExportState", + "traits": { + "smithy.api#documentation": "

The current state of the export job.

" + } + }, + "ErrorMessage": { + "target": "com.amazonaws.mailmanager#ErrorMessage", + "traits": { + "smithy.api#documentation": "

An error message if the export job failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The current status of an archive export job.

" + } + }, + "com.amazonaws.mailmanager#ExportSummary": { + "type": "structure", + "members": { + "ExportId": { + "target": "com.amazonaws.mailmanager#ExportId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the export job.

" + } + }, + "Status": { + "target": "com.amazonaws.mailmanager#ExportStatus", + "traits": { + "smithy.api#documentation": "

The current status of the export job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary statuses of an archive export job.

" + } + }, + "com.amazonaws.mailmanager#ExportSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#ExportSummary" + } + }, + "com.amazonaws.mailmanager#GetAddonInstance": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetAddonInstanceRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetAddonInstanceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets detailed information about an Add On instance.

", + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#GetAddonInstanceRequest": { + "type": "structure", + "members": { + "AddonInstanceId": { + "target": "com.amazonaws.mailmanager#AddonInstanceId", + "traits": { + "smithy.api#documentation": "

The Add On instance ID to retrieve information for.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetAddonInstanceResponse": { + "type": "structure", + "members": { + "AddonSubscriptionId": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionId", + "traits": { + "smithy.api#documentation": "

The subscription ID associated to the instance.

" + } + }, + "AddonName": { + "target": "com.amazonaws.mailmanager#AddonName", + "traits": { + "smithy.api#documentation": "

The name of the Add On provider associated to the subscription of the instance.

" + } + }, + "AddonInstanceArn": { + "target": "com.amazonaws.mailmanager#AddonInstanceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Add On instance.

" + } + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the Add On instance was created.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#GetAddonSubscription": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetAddonSubscriptionRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetAddonSubscriptionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets detailed information about an Add On subscription.

", + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#GetAddonSubscriptionRequest": { + "type": "structure", + "members": { + "AddonSubscriptionId": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionId", + "traits": { + "smithy.api#documentation": "

The Add On subscription ID to retrieve information for.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetAddonSubscriptionResponse": { + "type": "structure", + "members": { + "AddonName": { + "target": "com.amazonaws.mailmanager#AddonName", + "traits": { + "smithy.api#documentation": "

The name of the Add On for the subscription.

" + } + }, + "AddonSubscriptionArn": { + "target": "com.amazonaws.mailmanager#AddonSubscriptionArn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) for the subscription.

" + } + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the Add On subscription was created.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#GetArchive": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetArchiveRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetArchiveResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the full details and current state of a specified email archive.

", + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveExport": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetArchiveExportRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetArchiveExportResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the details and current status of a specific email archive export job.

" + } + }, + "com.amazonaws.mailmanager#GetArchiveExportRequest": { + "type": "structure", + "members": { + "ExportId": { + "target": "com.amazonaws.mailmanager#ExportId", + "traits": { + "smithy.api#documentation": "

The identifier of the export job to get details for.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to retrieve details of a specific archive export job.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveExportResponse": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveId", + "traits": { + "smithy.api#documentation": "

The identifier of the archive the email export was performed from.

" + } + }, + "Filters": { + "target": "com.amazonaws.mailmanager#ArchiveFilters", + "traits": { + "smithy.api#documentation": "

The criteria used to filter emails included in the export.

" + } + }, + "FromTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start of the timestamp range the exported emails cover.

" + } + }, + "ToTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end of the date range the exported emails cover.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.mailmanager#ExportMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of email items included in the export.

" + } + }, + "ExportDestinationConfiguration": { + "target": "com.amazonaws.mailmanager#ExportDestinationConfiguration", + "traits": { + "smithy.api#documentation": "

Where the exported emails are being delivered.

" + } + }, + "Status": { + "target": "com.amazonaws.mailmanager#ExportStatus", + "traits": { + "smithy.api#documentation": "

The current status of the export job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response containing details of the specified archive export job.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveMessage": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetArchiveMessageRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetArchiveMessageResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a pre-signed URL that provides temporary download access to the specific email message stored in\n the archive.\n

", + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveMessageContent": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetArchiveMessageContentRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetArchiveMessageContentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the textual content of a specific email message stored in the archive. Attachments are not\n included.\n

" + } + }, + "com.amazonaws.mailmanager#GetArchiveMessageContentRequest": { + "type": "structure", + "members": { + "ArchivedMessageId": { + "target": "com.amazonaws.mailmanager#ArchivedMessageId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the archived email message.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to get the textual content of a specific email message stored in an archive.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveMessageContentResponse": { + "type": "structure", + "members": { + "Body": { + "target": "com.amazonaws.mailmanager#MessageBody", + "traits": { + "smithy.api#documentation": "

The textual body content of the email message.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response containing the textual content of the requested archived email message.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveMessageRequest": { + "type": "structure", + "members": { + "ArchivedMessageId": { + "target": "com.amazonaws.mailmanager#ArchivedMessageId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the archived email message.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to get details of a specific email message stored in an archive.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveMessageResponse": { + "type": "structure", + "members": { + "MessageDownloadLink": { + "target": "com.amazonaws.mailmanager#S3PresignedURL", + "traits": { + "smithy.api#documentation": "

A pre-signed URL to temporarily download the full message content.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response containing details about the requested archived email message.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveRequest": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveIdString", + "traits": { + "smithy.api#documentation": "

The identifier of the archive to retrieve.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to retrieve details of an email archive.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveResponse": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveIdString", + "traits": { + "smithy.api#documentation": "

The unique identifier of the archive.

", + "smithy.api#required": {} + } + }, + "ArchiveName": { + "target": "com.amazonaws.mailmanager#ArchiveNameString", + "traits": { + "smithy.api#documentation": "

The unique name assigned to the archive.

", + "smithy.api#required": {} + } + }, + "ArchiveArn": { + "target": "com.amazonaws.mailmanager#ArchiveArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the archive.

", + "smithy.api#required": {} + } + }, + "ArchiveState": { + "target": "com.amazonaws.mailmanager#ArchiveState", + "traits": { + "smithy.api#documentation": "

The current state of the archive:

\n
    \n
  • \n

    \n ACTIVE – The archive is ready and available for use.

    \n
  • \n
  • \n

    \n PENDING_DELETION – The archive has been marked for deletion\n and will be permanently deleted in 30 days. No further modifications can be made\n in this state.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "Retention": { + "target": "com.amazonaws.mailmanager#ArchiveRetention", + "traits": { + "smithy.api#documentation": "

The retention period for emails in this archive.

", + "smithy.api#required": {} + } + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the archive was created.

" + } + }, + "LastUpdatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the archive was modified.

" + } + }, + "KmsKeyArn": { + "target": "com.amazonaws.mailmanager#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key used to encrypt the archive.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response containing details of the requested archive.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveSearch": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetArchiveSearchRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetArchiveSearchResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the details and current status of a specific email archive search job.

", + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveSearchRequest": { + "type": "structure", + "members": { + "SearchId": { + "target": "com.amazonaws.mailmanager#SearchId", + "traits": { + "smithy.api#documentation": "

The identifier of the search job to get details for.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to retrieve details of a specific archive search job.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveSearchResponse": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveId", + "traits": { + "smithy.api#documentation": "

The identifier of the archive the email search was performed in.

" + } + }, + "Filters": { + "target": "com.amazonaws.mailmanager#ArchiveFilters", + "traits": { + "smithy.api#documentation": "

The criteria used to filter emails included in the search.

" + } + }, + "FromTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start timestamp of the range the searched emails cover.

" + } + }, + "ToTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end timestamp of the range the searched emails cover.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.mailmanager#SearchMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of search results to return.

" + } + }, + "Status": { + "target": "com.amazonaws.mailmanager#SearchStatus", + "traits": { + "smithy.api#documentation": "

The current status of the search job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response containing details of the specified archive search job.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveSearchResults": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetArchiveSearchResultsRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetArchiveSearchResultsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the results of a completed email archive search job.

", + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveSearchResultsRequest": { + "type": "structure", + "members": { + "SearchId": { + "target": "com.amazonaws.mailmanager#SearchId", + "traits": { + "smithy.api#documentation": "

The identifier of the completed search job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to retrieve results from a completed archive search job.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetArchiveSearchResultsResponse": { + "type": "structure", + "members": { + "Rows": { + "target": "com.amazonaws.mailmanager#RowsList", + "traits": { + "smithy.api#documentation": "

The list of email result objects matching the search criteria.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response containing search results from a completed archive search.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#GetIngressPoint": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetIngressPointRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetIngressPointResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Fetch ingress endpoint resource attributes.

", + "smithy.api#examples": [ + { + "title": "Get Open IngressPoint", + "input": { + "IngressPointId": "inp-12345" + }, + "output": { + "IngressPointId": "inp-12345", + "IngressPointName": "ingressPointName", + "IngressPointArn": "arn:aws:ses:us-east-1:123456789012:mailmanager-ingress-point/inp-12345", + "Status": "ACTIVE", + "Type": "OPEN", + "ARecord": "abcde123.prod.us-east-1.email-border.ses.aws.a2z.com" + } + }, + { + "title": "Get Auth IngressPoint", + "input": { + "IngressPointId": "inp-12345" + }, + "output": { + "IngressPointId": "inp-12345", + "IngressPointName": "ingressPointName", + "IngressPointArn": "arn:aws:ses:us-east-1:123456789012:mailmanager-ingress-point/inp-12345", + "Status": "ACTIVE", + "IngressPointAuthConfiguration": { + "SecretArn": "arn:aws:secretsmanager:us-west-2:123456789012:secret:abcde" + }, + "Type": "AUTH", + "ARecord": "abcde123.prod.us-east-1.email-border.ses.aws.a2z.com" + } + } + ], + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#GetIngressPointRequest": { + "type": "structure", + "members": { + "IngressPointId": { + "target": "com.amazonaws.mailmanager#IngressPointId", + "traits": { + "smithy.api#documentation": "

The identifier of an ingress endpoint.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetIngressPointResponse": { + "type": "structure", + "members": { + "IngressPointId": { + "target": "com.amazonaws.mailmanager#IngressPointId", + "traits": { + "smithy.api#documentation": "

The identifier of an ingress endpoint resource.

", + "smithy.api#required": {} + } + }, + "IngressPointName": { + "target": "com.amazonaws.mailmanager#IngressPointName", + "traits": { + "smithy.api#documentation": "

A user friendly name for the ingress endpoint.

", + "smithy.api#required": {} + } + }, + "IngressPointArn": { + "target": "com.amazonaws.mailmanager#IngressPointArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the ingress endpoint resource.

" + } + }, + "Status": { + "target": "com.amazonaws.mailmanager#IngressPointStatus", + "traits": { + "smithy.api#documentation": "

The status of the ingress endpoint resource.

" + } + }, + "Type": { + "target": "com.amazonaws.mailmanager#IngressPointType", + "traits": { + "smithy.api#documentation": "

The type of ingress endpoint.

" + } + }, + "ARecord": { + "target": "com.amazonaws.mailmanager#IngressPointARecord", + "traits": { + "smithy.api#documentation": "

\n The DNS A Record that identifies your ingress endpoint. Configure your DNS Mail Exchange (MX) record with this value to route emails to Mail Manager.\n

" + } + }, + "RuleSetId": { + "target": "com.amazonaws.mailmanager#RuleSetId", + "traits": { + "smithy.api#documentation": "

The identifier of a rule set resource associated with the ingress endpoint.

" + } + }, + "TrafficPolicyId": { + "target": "com.amazonaws.mailmanager#TrafficPolicyId", + "traits": { + "smithy.api#documentation": "

The identifier of the traffic policy resource associated with the ingress\n endpoint.

" + } + }, + "IngressPointAuthConfiguration": { + "target": "com.amazonaws.mailmanager#IngressPointAuthConfiguration", + "traits": { + "smithy.api#documentation": "

The authentication configuration of the ingress endpoint resource.

" + } + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the ingress endpoint was created.

" + } + }, + "LastUpdatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the ingress endpoint was last updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#GetRelay": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetRelayRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetRelayResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Fetch the relay resource and it's attributes.

", + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#GetRelayRequest": { + "type": "structure", + "members": { + "RelayId": { + "target": "com.amazonaws.mailmanager#RelayId", + "traits": { + "smithy.api#documentation": "

A unique relay identifier.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetRelayResponse": { + "type": "structure", + "members": { + "RelayId": { + "target": "com.amazonaws.mailmanager#RelayId", + "traits": { + "smithy.api#documentation": "

The unique relay identifier.

", + "smithy.api#required": {} + } + }, + "RelayArn": { + "target": "com.amazonaws.mailmanager#RelayArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the relay.

" + } + }, + "RelayName": { + "target": "com.amazonaws.mailmanager#RelayName", + "traits": { + "smithy.api#documentation": "

The unique name of the relay.

" + } + }, + "ServerName": { + "target": "com.amazonaws.mailmanager#RelayServerName", + "traits": { + "smithy.api#documentation": "

The destination relay server address.

" + } + }, + "ServerPort": { + "target": "com.amazonaws.mailmanager#RelayServerPort", + "traits": { + "smithy.api#documentation": "

The destination relay server port.

" + } + }, + "Authentication": { + "target": "com.amazonaws.mailmanager#RelayAuthentication", + "traits": { + "smithy.api#documentation": "

The authentication attribute—contains the secret ARN where the customer relay\n server credentials are stored.

" + } + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the relay was created.

" + } + }, + "LastModifiedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when relay was last updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#GetRuleSet": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetRuleSetRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetRuleSetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Fetch attributes of a rule set.

", + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#GetRuleSetRequest": { + "type": "structure", + "members": { + "RuleSetId": { + "target": "com.amazonaws.mailmanager#RuleSetId", + "traits": { + "smithy.api#documentation": "

The identifier of an existing rule set to be retrieved.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetRuleSetResponse": { + "type": "structure", + "members": { + "RuleSetId": { + "target": "com.amazonaws.mailmanager#RuleSetId", + "traits": { + "smithy.api#documentation": "

The identifier of the rule set resource.

", + "smithy.api#required": {} + } + }, + "RuleSetArn": { + "target": "com.amazonaws.mailmanager#RuleSetArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the rule set resource.

", + "smithy.api#required": {} + } + }, + "RuleSetName": { + "target": "com.amazonaws.mailmanager#RuleSetName", + "traits": { + "smithy.api#documentation": "

A user-friendly name for the rule set resource.

", + "smithy.api#required": {} + } + }, + "CreatedDate": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date of when then rule set was created.

", + "smithy.api#required": {} + } + }, + "LastModificationDate": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date of when the rule set was last modified.

", + "smithy.api#required": {} + } + }, + "Rules": { + "target": "com.amazonaws.mailmanager#Rules", + "traits": { + "smithy.api#documentation": "

The rules contained in the rule set.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#GetTrafficPolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#GetTrafficPolicyRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#GetTrafficPolicyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Fetch attributes of a traffic policy resource.

", + "smithy.api#examples": [ + { + "title": "Get TrafficPolicy", + "input": { + "TrafficPolicyId": "tp-12345" + }, + "output": { + "TrafficPolicyName": "trafficPolicyName", + "TrafficPolicyId": "tp-12345", + "TrafficPolicyArn": "arn:aws:ses:us-east-1:123456789012:mailmanager-traffic-policy/tp-12345", + "PolicyStatements": [ + { + "Conditions": [ + { + "StringExpression": { + "Evaluate": { + "Attribute": "RECIPIENT" + }, + "Operator": "EQUALS", + "Values": [ + "example@amazon.com", + "example@gmail.com" + ] + } + } + ], + "Action": "ALLOW" + } + ], + "DefaultAction": "DENY", + "MaxMessageSizeBytes": 1000 + } + } + ], + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#GetTrafficPolicyRequest": { + "type": "structure", + "members": { + "TrafficPolicyId": { + "target": "com.amazonaws.mailmanager#TrafficPolicyId", + "traits": { + "smithy.api#documentation": "

The identifier of the traffic policy resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#GetTrafficPolicyResponse": { + "type": "structure", + "members": { + "TrafficPolicyName": { + "target": "com.amazonaws.mailmanager#TrafficPolicyName", + "traits": { + "smithy.api#documentation": "

A user-friendly name for the traffic policy resource.

", + "smithy.api#required": {} + } + }, + "TrafficPolicyId": { + "target": "com.amazonaws.mailmanager#TrafficPolicyId", + "traits": { + "smithy.api#documentation": "

The identifier of the traffic policy resource.

", + "smithy.api#required": {} + } + }, + "TrafficPolicyArn": { + "target": "com.amazonaws.mailmanager#TrafficPolicyArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the traffic policy resource.

" + } + }, + "PolicyStatements": { + "target": "com.amazonaws.mailmanager#PolicyStatementList", + "traits": { + "smithy.api#documentation": "

The list of conditions which are in the traffic policy resource.

" + } + }, + "MaxMessageSizeBytes": { + "target": "com.amazonaws.mailmanager#MaxMessageSizeBytes", + "traits": { + "smithy.api#documentation": "

The maximum message size in bytes of email which is allowed in by this traffic\n policy—anything larger will be blocked.

" + } + }, + "DefaultAction": { + "target": "com.amazonaws.mailmanager#AcceptAction", + "traits": { + "smithy.api#documentation": "

The default action of the traffic policy.

" + } + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the traffic policy was created.

" + } + }, + "LastUpdatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the traffic policy was last updated.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#HeaderName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[xX]\\-[a-zA-Z0-9\\-]+$" + } + }, + "com.amazonaws.mailmanager#HeaderValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.mailmanager#IamRoleArn": { + "type": "string", + "traits": { + "aws.api#arnReference": { + "type": "AWS::IAM::Role" + }, + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^[a-zA-Z0-9:_/+=,@.#-]+$" + } + }, + "com.amazonaws.mailmanager#IdOrArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^[a-zA-Z0-9:_/+=,@.#-]+$" + } + }, + "com.amazonaws.mailmanager#IdempotencyToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.mailmanager#IngressAnalysis": { + "type": "structure", + "members": { + "Analyzer": { + "target": "com.amazonaws.mailmanager#AnalyzerArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Add On.

", + "smithy.api#required": {} + } + }, + "ResultField": { + "target": "com.amazonaws.mailmanager#ResultField", + "traits": { + "smithy.api#documentation": "

The returned value from an Add On.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The Add On ARN and its returned value that is evaluated in a policy statement's\n conditional expression to either deny or block the incoming email.

" + } + }, + "com.amazonaws.mailmanager#IngressBooleanExpression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#IngressBooleanToEvaluate", + "traits": { + "smithy.api#documentation": "

The operand on which to perform a boolean condition operation.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#IngressBooleanOperator", + "traits": { + "smithy.api#documentation": "

The matching operator for a boolean condition expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The structure for a boolean condition matching on the incoming mail.

" + } + }, + "com.amazonaws.mailmanager#IngressBooleanOperator": { + "type": "enum", + "members": { + "IS_TRUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IS_TRUE" + } + }, + "IS_FALSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IS_FALSE" + } + } + } + }, + "com.amazonaws.mailmanager#IngressBooleanToEvaluate": { + "type": "union", + "members": { + "Analysis": { + "target": "com.amazonaws.mailmanager#IngressAnalysis", + "traits": { + "smithy.api#documentation": "

The structure type for a boolean condition stating the Add On ARN and its returned\n value.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The union type representing the allowed types of operands for a boolean\n condition.

" + } + }, + "com.amazonaws.mailmanager#IngressIpOperator": { + "type": "enum", + "members": { + "CIDR_MATCHES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CIDR_MATCHES" + } + }, + "NOT_CIDR_MATCHES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_CIDR_MATCHES" + } + } + } + }, + "com.amazonaws.mailmanager#IngressIpToEvaluate": { + "type": "union", + "members": { + "Attribute": { + "target": "com.amazonaws.mailmanager#IngressIpv4Attribute", + "traits": { + "smithy.api#documentation": "

An enum type representing the allowed attribute types for an IP condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The structure for an IP based condition matching on the incoming mail.

" + } + }, + "com.amazonaws.mailmanager#IngressIpv4Attribute": { + "type": "enum", + "members": { + "SENDER_IP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SENDER_IP" + } + } + } + }, + "com.amazonaws.mailmanager#IngressIpv4Expression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#IngressIpToEvaluate", + "traits": { + "smithy.api#documentation": "

The left hand side argument of an IP condition expression.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#IngressIpOperator", + "traits": { + "smithy.api#documentation": "

The matching operator for an IP condition expression.

", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.mailmanager#Ipv4Cidrs", + "traits": { + "smithy.api#documentation": "

The right hand side argument of an IP condition expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The union type representing the allowed types for the left hand side of an IP\n condition.

" + } + }, + "com.amazonaws.mailmanager#IngressPoint": { + "type": "structure", + "members": { + "IngressPointName": { + "target": "com.amazonaws.mailmanager#IngressPointName", + "traits": { + "smithy.api#documentation": "

A user friendly name for the ingress endpoint resource.

", + "smithy.api#required": {} + } + }, + "IngressPointId": { + "target": "com.amazonaws.mailmanager#IngressPointId", + "traits": { + "smithy.api#documentation": "

The identifier of the ingress endpoint resource.

", + "smithy.api#required": {} + } + }, + "Status": { + "target": "com.amazonaws.mailmanager#IngressPointStatus", + "traits": { + "smithy.api#documentation": "

The status of the ingress endpoint resource.

", + "smithy.api#required": {} + } + }, + "Type": { + "target": "com.amazonaws.mailmanager#IngressPointType", + "traits": { + "smithy.api#documentation": "

The type of ingress endpoint resource.

", + "smithy.api#required": {} + } + }, + "ARecord": { + "target": "com.amazonaws.mailmanager#IngressPointARecord", + "traits": { + "smithy.api#documentation": "

\n The DNS A Record that identifies your ingress endpoint. Configure your DNS Mail Exchange (MX) record with this value to route emails to Mail Manager.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The structure of an ingress endpoint resource.

" + } + }, + "com.amazonaws.mailmanager#IngressPointARecord": { + "type": "string" + }, + "com.amazonaws.mailmanager#IngressPointArn": { + "type": "string", + "traits": { + "aws.api#arnReference": {} + } + }, + "com.amazonaws.mailmanager#IngressPointAuthConfiguration": { + "type": "structure", + "members": { + "IngressPointPasswordConfiguration": { + "target": "com.amazonaws.mailmanager#IngressPointPasswordConfiguration", + "traits": { + "smithy.api#documentation": "

The ingress endpoint password configuration for the ingress endpoint resource.

" + } + }, + "SecretArn": { + "target": "com.amazonaws.mailmanager#SecretArn", + "traits": { + "smithy.api#documentation": "

The ingress endpoint SecretsManager::Secret ARN configuration for the ingress endpoint\n resource.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The authentication configuration for the ingress endpoint resource.

" + } + }, + "com.amazonaws.mailmanager#IngressPointConfiguration": { + "type": "union", + "members": { + "SmtpPassword": { + "target": "com.amazonaws.mailmanager#SmtpPassword", + "traits": { + "smithy.api#documentation": "

The password of the ingress endpoint resource.

" + } + }, + "SecretArn": { + "target": "com.amazonaws.mailmanager#SecretArn", + "traits": { + "smithy.api#documentation": "

The SecretsManager::Secret ARN of the ingress endpoint resource.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of the ingress endpoint resource.

" + } + }, + "com.amazonaws.mailmanager#IngressPointId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.mailmanager#IngressPointName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 63 + }, + "smithy.api#pattern": "^[A-Za-z0-9_\\-]+$" + } + }, + "com.amazonaws.mailmanager#IngressPointPasswordConfiguration": { + "type": "structure", + "members": { + "SmtpPasswordVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The current password expiry timestamp of the ingress endpoint resource.

" + } + }, + "PreviousSmtpPasswordVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The previous password version of the ingress endpoint resource.

" + } + }, + "PreviousSmtpPasswordExpiryTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The previous password expiry timestamp of the ingress endpoint resource.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The password configuration of the ingress endpoint resource.

" + } + }, + "com.amazonaws.mailmanager#IngressPointResource": { + "type": "resource", + "identifiers": { + "IngressPointId": { + "target": "com.amazonaws.mailmanager#IngressPointId" + } + }, + "put": { + "target": "com.amazonaws.mailmanager#CreateIngressPoint" + }, + "read": { + "target": "com.amazonaws.mailmanager#GetIngressPoint" + }, + "update": { + "target": "com.amazonaws.mailmanager#UpdateIngressPoint" + }, + "delete": { + "target": "com.amazonaws.mailmanager#DeleteIngressPoint" + }, + "list": { + "target": "com.amazonaws.mailmanager#ListIngressPoints" + }, + "traits": { + "aws.cloudformation#cfnResource": { + "name": "MailManagerIngressPoint" + }, + "smithy.api#noReplace": {} + } + }, + "com.amazonaws.mailmanager#IngressPointStatus": { + "type": "enum", + "members": { + "PROVISIONING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROVISIONING" + } + }, + "DEPROVISIONING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEPROVISIONING" + } + }, + "UPDATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UPDATING" + } + }, + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "CLOSED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLOSED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, + "com.amazonaws.mailmanager#IngressPointStatusToUpdate": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "CLOSED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLOSED" + } + } + } + }, + "com.amazonaws.mailmanager#IngressPointType": { + "type": "enum", + "members": { + "OPEN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OPEN" + } + }, + "AUTH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUTH" + } + } + } + }, + "com.amazonaws.mailmanager#IngressPointsList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#IngressPoint" + } + }, + "com.amazonaws.mailmanager#IngressStringEmailAttribute": { + "type": "enum", + "members": { + "RECIPIENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RECIPIENT" + } + } + } + }, + "com.amazonaws.mailmanager#IngressStringExpression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#IngressStringToEvaluate", + "traits": { + "smithy.api#documentation": "

The left hand side argument of a string condition expression.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#IngressStringOperator", + "traits": { + "smithy.api#documentation": "

The matching operator for a string condition expression.

", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.mailmanager#StringList", + "traits": { + "smithy.api#documentation": "

The right hand side argument of a string condition expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The structure for a string based condition matching on the incoming mail.

" + } + }, + "com.amazonaws.mailmanager#IngressStringOperator": { + "type": "enum", + "members": { + "EQUALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQUALS" + } + }, + "NOT_EQUALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_EQUALS" + } + }, + "STARTS_WITH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STARTS_WITH" + } + }, + "ENDS_WITH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENDS_WITH" + } + }, + "CONTAINS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONTAINS" + } + } + } + }, + "com.amazonaws.mailmanager#IngressStringToEvaluate": { + "type": "union", + "members": { + "Attribute": { + "target": "com.amazonaws.mailmanager#IngressStringEmailAttribute", + "traits": { + "smithy.api#documentation": "

The enum type representing the allowed attribute types for a string condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The union type representing the allowed types for the left hand side of a string\n condition.

" + } + }, + "com.amazonaws.mailmanager#IngressTlsAttribute": { + "type": "enum", + "members": { + "TLS_PROTOCOL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TLS_PROTOCOL" + } + } + } + }, + "com.amazonaws.mailmanager#IngressTlsProtocolAttribute": { + "type": "enum", + "members": { + "TLS1_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TLS1_2" + } + }, + "TLS1_3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TLS1_3" + } + } + } + }, + "com.amazonaws.mailmanager#IngressTlsProtocolExpression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#IngressTlsProtocolToEvaluate", + "traits": { + "smithy.api#documentation": "

The left hand side argument of a TLS condition expression.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#IngressTlsProtocolOperator", + "traits": { + "smithy.api#documentation": "

The matching operator for a TLS condition expression.

", + "smithy.api#required": {} + } + }, + "Value": { + "target": "com.amazonaws.mailmanager#IngressTlsProtocolAttribute", + "traits": { + "smithy.api#documentation": "

The right hand side argument of a TLS condition expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The structure for a TLS related condition matching on the incoming mail.

" + } + }, + "com.amazonaws.mailmanager#IngressTlsProtocolOperator": { + "type": "enum", + "members": { + "MINIMUM_TLS_VERSION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MINIMUM_TLS_VERSION" + } + }, + "IS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IS" + } + } + } + }, + "com.amazonaws.mailmanager#IngressTlsProtocolToEvaluate": { + "type": "union", + "members": { + "Attribute": { + "target": "com.amazonaws.mailmanager#IngressTlsAttribute", + "traits": { + "smithy.api#documentation": "

The enum type representing the allowed attribute types for the TLS condition.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The union type representing the allowed types for the left hand side of a TLS\n condition.

" + } + }, + "com.amazonaws.mailmanager#Ipv4Cidr": { + "type": "string", + "traits": { + "smithy.api#pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/([0-9]|[12][0-9]|3[0-2])$" + } + }, + "com.amazonaws.mailmanager#Ipv4Cidrs": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#Ipv4Cidr" + } + }, + "com.amazonaws.mailmanager#KmsKeyArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):kms:[a-z0-9-]{1,20}:[0-9]{12}:(key|alias)/.+$" + } + }, + "com.amazonaws.mailmanager#KmsKeyId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^[a-zA-Z0-9-:/]+$" + } + }, + "com.amazonaws.mailmanager#ListAddonInstances": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#ListAddonInstancesRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#ListAddonInstancesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all Add On instances in your account.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "AddonInstances", + "pageSize": "PageSize" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#ListAddonInstancesRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + } + }, + "PageSize": { + "target": "com.amazonaws.mailmanager#PageSize", + "traits": { + "smithy.api#documentation": "

The maximum number of ingress endpoint resources that are returned per call. You can\n use NextToken to obtain further ingress endpoints.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#ListAddonInstancesResponse": { + "type": "structure", + "members": { + "AddonInstances": { + "target": "com.amazonaws.mailmanager#AddonInstances", + "traits": { + "smithy.api#documentation": "

The list of ingress endpoints.

" + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#ListAddonSubscriptions": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#ListAddonSubscriptionsRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#ListAddonSubscriptionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all Add On subscriptions in your account.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "AddonSubscriptions", + "pageSize": "PageSize" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#ListAddonSubscriptionsRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + } + }, + "PageSize": { + "target": "com.amazonaws.mailmanager#PageSize", + "traits": { + "smithy.api#documentation": "

The maximum number of ingress endpoint resources that are returned per call. You can\n use NextToken to obtain further ingress endpoints.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#ListAddonSubscriptionsResponse": { + "type": "structure", + "members": { + "AddonSubscriptions": { + "target": "com.amazonaws.mailmanager#AddonSubscriptions", + "traits": { + "smithy.api#documentation": "

The list of ingress endpoints.

" + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#ListArchiveExports": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#ListArchiveExportsRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#ListArchiveExportsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of email archive export jobs.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "PageSize", + "items": "Exports" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#ListArchiveExportsRequest": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveId", + "traits": { + "smithy.api#documentation": "

The identifier of the archive.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination\n token for each page. Make the call again using the returned token to retrieve the next page.\n

" + } + }, + "PageSize": { + "target": "com.amazonaws.mailmanager#PageSize", + "traits": { + "smithy.api#documentation": "

The maximum number of archive export jobs that are returned per call. You can use NextToken to obtain\n further pages of archives.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to list archive export jobs in your account.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#ListArchiveExportsResponse": { + "type": "structure", + "members": { + "Exports": { + "target": "com.amazonaws.mailmanager#ExportSummaryList", + "traits": { + "smithy.api#documentation": "

The list of export job identifiers and statuses.

" + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If present, use to retrieve the next page of results.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response containing a list of archive export jobs and their statuses.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#ListArchiveSearches": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#ListArchiveSearchesRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#ListArchiveSearchesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of email archive search jobs.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "PageSize", + "items": "Searches" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#ListArchiveSearchesRequest": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveId", + "traits": { + "smithy.api#documentation": "

The identifier of the archive.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination\n token for each page. Make the call again using the returned token to retrieve the next page.\n

" + } + }, + "PageSize": { + "target": "com.amazonaws.mailmanager#PageSize", + "traits": { + "smithy.api#documentation": "

The maximum number of archive search jobs that are returned per call. You can use NextToken to obtain\n further pages of archives.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to list archive search jobs in your account.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#ListArchiveSearchesResponse": { + "type": "structure", + "members": { + "Searches": { + "target": "com.amazonaws.mailmanager#SearchSummaryList", + "traits": { + "smithy.api#documentation": "

The list of search job identifiers and statuses.

" + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If present, use to retrieve the next page of results.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response containing a list of archive search jobs and their statuses.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#ListArchives": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#ListArchivesRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#ListArchivesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of all email archives in your account.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "PageSize", + "items": "Archives" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#ListArchivesRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If NextToken is returned, there are more results available. The value of NextToken is\n a unique pagination token for each page. Make the call again using the returned token to\n retrieve the next page.

" + } + }, + "PageSize": { + "target": "com.amazonaws.mailmanager#PageSize", + "traits": { + "smithy.api#documentation": "

The maximum number of archives that are returned per call. You can use NextToken to\n obtain further pages of archives.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to list email archives in your account.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#ListArchivesResponse": { + "type": "structure", + "members": { + "Archives": { + "target": "com.amazonaws.mailmanager#ArchivesList", + "traits": { + "smithy.api#documentation": "

The list of archive details.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If present, use to retrieve the next page of results.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response containing a list of your email archives.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#ListIngressPoints": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#ListIngressPointsRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#ListIngressPointsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List all ingress endpoint resources.

", + "smithy.api#examples": [ + { + "title": "List IngressPoints", + "input": {}, + "output": { + "IngressPoints": [ + { + "IngressPointId": "inp-12345", + "IngressPointName": "ingressPointName", + "Status": "ACTIVE", + "Type": "OPEN", + "ARecord": "abcde123.prod.us-east-1.email-border.ses.aws.a2z.com" + } + ] + } + }, + { + "title": "List IngressPoints with PageSize", + "input": { + "PageSize": 10 + }, + "output": { + "IngressPoints": [ + { + "IngressPointId": "inp-12345", + "IngressPointName": "ingressPointName", + "Status": "ACTIVE", + "Type": "OPEN", + "ARecord": "abcde123.prod.us-east-1.email-border.ses.aws.a2z.com" + } + ] + } + }, + { + "title": "List IngressPoints with NextToken", + "input": { + "NextToken": "nextToken" + }, + "output": { + "IngressPoints": [ + { + "IngressPointId": "inp-12345", + "IngressPointName": "ingressPointName", + "Status": "ACTIVE", + "Type": "OPEN", + "ARecord": "abcde123.prod.us-east-1.email-border.ses.aws.a2z.com" + } + ] + } + } + ], + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "PageSize", + "items": "IngressPoints" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#ListIngressPointsRequest": { + "type": "structure", + "members": { + "PageSize": { + "target": "com.amazonaws.mailmanager#PageSize", + "traits": { + "smithy.api#documentation": "

The maximum number of ingress endpoint resources that are returned per call. You can\n use NextToken to obtain further ingress endpoints.

" + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#ListIngressPointsResponse": { + "type": "structure", + "members": { + "IngressPoints": { + "target": "com.amazonaws.mailmanager#IngressPointsList", + "traits": { + "smithy.api#documentation": "

The list of ingress endpoints.

" + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#ListRelays": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#ListRelaysRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#ListRelaysResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all the existing relay resources.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "PageSize", + "items": "Relays" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#ListRelaysRequest": { + "type": "structure", + "members": { + "PageSize": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of relays to be returned in one request.

" + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#ListRelaysResponse": { + "type": "structure", + "members": { + "Relays": { + "target": "com.amazonaws.mailmanager#Relays", + "traits": { + "smithy.api#documentation": "

The list of returned relays.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#ListRuleSets": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#ListRuleSetsRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#ListRuleSetsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List rule sets for this account.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "RuleSets", + "pageSize": "PageSize" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#ListRuleSetsRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + } + }, + "PageSize": { + "target": "com.amazonaws.mailmanager#PageSize", + "traits": { + "smithy.api#documentation": "

The maximum number of rule set resources that are returned per call. You can use\n NextToken to obtain further rule sets.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#ListRuleSetsResponse": { + "type": "structure", + "members": { + "RuleSets": { + "target": "com.amazonaws.mailmanager#RuleSets", + "traits": { + "smithy.api#documentation": "

The list of rule sets.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the list of tags (keys and values) assigned to the resource.

", + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.mailmanager#TaggableResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to retrieve tags from.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.mailmanager#TagList", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#ListTrafficPolicies": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#ListTrafficPoliciesRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#ListTrafficPoliciesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List traffic policy resources.

", + "smithy.api#examples": [ + { + "title": "List TrafficPolicies", + "input": {}, + "output": { + "TrafficPolicies": [ + { + "TrafficPolicyId": "tp-12345", + "TrafficPolicyName": "trafficPolicyName", + "DefaultAction": "DENY" + } + ] + } + }, + { + "title": "List TrafficPolicies with PageSize", + "input": { + "PageSize": 10 + }, + "output": { + "TrafficPolicies": [ + { + "TrafficPolicyId": "tp-12345", + "TrafficPolicyName": "trafficPolicyName", + "DefaultAction": "DENY" + } + ] + } + }, + { + "title": "List TrafficPolicies with NextToken", + "input": { + "NextToken": "nextToken" + }, + "output": { + "TrafficPolicies": [ + { + "TrafficPolicyId": "tp-12345", + "TrafficPolicyName": "trafficPolicyName", + "DefaultAction": "DENY" + } + ] + } + } + ], + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "PageSize", + "items": "TrafficPolicies" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.mailmanager#ListTrafficPoliciesRequest": { + "type": "structure", + "members": { + "PageSize": { + "target": "com.amazonaws.mailmanager#PageSize", + "traits": { + "smithy.api#documentation": "

The maximum number of traffic policy resources that are returned per call. You can use\n NextToken to obtain further traffic policies.

" + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If you received a pagination token from a previous call to this API, you can provide it here to continue paginating through the next page of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#ListTrafficPoliciesResponse": { + "type": "structure", + "members": { + "TrafficPolicies": { + "target": "com.amazonaws.mailmanager#TrafficPolicyList", + "traits": { + "smithy.api#documentation": "

The list of traffic policies.

" + } + }, + "NextToken": { + "target": "com.amazonaws.mailmanager#PaginationToken", + "traits": { + "smithy.api#documentation": "

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#MailFrom": { + "type": "enum", + "members": { + "REPLACE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REPLACE" + } + }, + "PRESERVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESERVE" + } + } + } + }, + "com.amazonaws.mailmanager#MailManagerSvc": { + "type": "service", + "version": "2023-10-17", + "operations": [ + { + "target": "com.amazonaws.mailmanager#GetArchiveExport" + }, + { + "target": "com.amazonaws.mailmanager#GetArchiveMessage" + }, + { + "target": "com.amazonaws.mailmanager#GetArchiveMessageContent" + }, + { + "target": "com.amazonaws.mailmanager#GetArchiveSearch" + }, + { + "target": "com.amazonaws.mailmanager#GetArchiveSearchResults" + }, + { + "target": "com.amazonaws.mailmanager#ListArchiveExports" + }, + { + "target": "com.amazonaws.mailmanager#ListArchiveSearches" + }, + { + "target": "com.amazonaws.mailmanager#ListTagsForResource" + }, + { + "target": "com.amazonaws.mailmanager#StartArchiveExport" + }, + { + "target": "com.amazonaws.mailmanager#StartArchiveSearch" + }, + { + "target": "com.amazonaws.mailmanager#StopArchiveExport" + }, + { + "target": "com.amazonaws.mailmanager#StopArchiveSearch" + }, + { + "target": "com.amazonaws.mailmanager#TagResource" + }, + { + "target": "com.amazonaws.mailmanager#UntagResource" + } + ], + "resources": [ + { + "target": "com.amazonaws.mailmanager#AddonInstanceResource" + }, + { + "target": "com.amazonaws.mailmanager#AddonSubscriptionResource" + }, + { + "target": "com.amazonaws.mailmanager#ArchiveResource" + }, + { + "target": "com.amazonaws.mailmanager#IngressPointResource" + }, + { + "target": "com.amazonaws.mailmanager#RelayResource" + }, + { + "target": "com.amazonaws.mailmanager#RuleSetResource" + }, + { + "target": "com.amazonaws.mailmanager#TrafficPolicyResource" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "MailManager", + "cloudFormationName": "SES", + "endpointPrefix": "mail-manager", + "arnNamespace": "ses" + }, + "aws.auth#sigv4": { + "name": "ses" + }, + "aws.protocols#awsJson1_0": {}, + "smithy.api#cors": {}, + "smithy.api#documentation": "AWS SES Mail Manager API\n

\n AWS SES Mail Manager API contains operations and data types\n that comprise the Mail Manager feature of Amazon Simple Email Service.

\n

Mail Manager is a set of Amazon SES email gateway features designed to help you strengthen\n your organization's email infrastructure, simplify email workflow management, and\n streamline email compliance control. To learn more, see the Mail Manager chapter in the Amazon SES Developer\n Guide.

", + "smithy.api#externalDocumentation": { + "API Reference": "https://w.amazon.com/bin/view/AWS/Border" + }, + "smithy.api#title": "MailManager", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mail-manager-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mail-manager-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mail-manager.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mail-manager.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://mail-manager.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.mailmanager#MaxMessageSizeBytes": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.mailmanager#MessageBody": { + "type": "structure", + "members": { + "Text": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The plain text body content of the message.

" + } + }, + "Html": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The HTML body content of the message.

" + } + }, + "MessageMalformed": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

A flag indicating if the email was malformed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The textual body content of an email message.

" + } + }, + "com.amazonaws.mailmanager#NameOrArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^[a-zA-Z0-9:_/+=,@.#-]+$" + } + }, + "com.amazonaws.mailmanager#NoAuthentication": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

Explicitly indicate that the relay destination server does not require SMTP credential authentication.

" + } + }, + "com.amazonaws.mailmanager#PageSize": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.mailmanager#PaginationToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, + "com.amazonaws.mailmanager#PolicyCondition": { + "type": "union", + "members": { + "StringExpression": { + "target": "com.amazonaws.mailmanager#IngressStringExpression", + "traits": { + "smithy.api#documentation": "

This represents a string based condition matching on the incoming mail. It performs\n the string operation configured in 'Operator' and evaluates the 'Protocol' object\n against the 'Value'.

" + } + }, + "IpExpression": { + "target": "com.amazonaws.mailmanager#IngressIpv4Expression", + "traits": { + "smithy.api#documentation": "

This represents an IP based condition matching on the incoming mail. It performs the\n operation configured in 'Operator' and evaluates the 'Protocol' object against the\n 'Value'.

" + } + }, + "TlsExpression": { + "target": "com.amazonaws.mailmanager#IngressTlsProtocolExpression", + "traits": { + "smithy.api#documentation": "

This represents a TLS based condition matching on the incoming mail. It performs the\n operation configured in 'Operator' and evaluates the 'Protocol' object against the\n 'Value'.

" + } + }, + "BooleanExpression": { + "target": "com.amazonaws.mailmanager#IngressBooleanExpression", + "traits": { + "smithy.api#documentation": "

This represents a boolean type condition matching on the incoming mail. It performs\n the boolean operation configured in 'Operator' and evaluates the 'Protocol' object\n against the 'Value'.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The email traffic filtering conditions which are contained in a traffic policy\n resource.

" + } + }, + "com.amazonaws.mailmanager#PolicyConditions": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#PolicyCondition" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.mailmanager#PolicyStatement": { + "type": "structure", + "members": { + "Conditions": { + "target": "com.amazonaws.mailmanager#PolicyConditions", + "traits": { + "smithy.api#documentation": "

The list of conditions to apply to incoming messages for filtering email\n traffic.

", + "smithy.api#required": {} + } + }, + "Action": { + "target": "com.amazonaws.mailmanager#AcceptAction", + "traits": { + "smithy.api#documentation": "

The action that informs a traffic policy resource to either allow or block the email\n if it matches a condition in the policy statement.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The structure containing traffic policy conditions and actions.

" + } + }, + "com.amazonaws.mailmanager#PolicyStatementList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#PolicyStatement" + } + }, + "com.amazonaws.mailmanager#Recipients": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#EmailAddress" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#uniqueItems": {} + } + }, + "com.amazonaws.mailmanager#Relay": { + "type": "structure", + "members": { + "RelayId": { + "target": "com.amazonaws.mailmanager#RelayId", + "traits": { + "smithy.api#documentation": "

The unique relay identifier.

" + } + }, + "RelayName": { + "target": "com.amazonaws.mailmanager#RelayName", + "traits": { + "smithy.api#documentation": "

The unique relay name.

" + } + }, + "LastModifiedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the relay was last modified.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The relay resource that can be used as a rule to relay receiving emails to the destination\n relay server.

" + } + }, + "com.amazonaws.mailmanager#RelayAction": { + "type": "structure", + "members": { + "ActionFailurePolicy": { + "target": "com.amazonaws.mailmanager#ActionFailurePolicy", + "traits": { + "smithy.api#documentation": "

A policy that states what to do in the case of failure. The action will fail if there\n are configuration errors. For example, the specified relay has been deleted.

" + } + }, + "Relay": { + "target": "com.amazonaws.mailmanager#IdOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the relay resource to be used when relaying an email.

", + "smithy.api#required": {} + } + }, + "MailFrom": { + "target": "com.amazonaws.mailmanager#MailFrom", + "traits": { + "smithy.api#documentation": "

This action specifies whether to preserve or replace original mail from address while\n relaying received emails to a destination server.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The action relays the email via SMTP to another specific SMTP server.

" + } + }, + "com.amazonaws.mailmanager#RelayArn": { + "type": "string", + "traits": { + "aws.api#arnReference": {} + } + }, + "com.amazonaws.mailmanager#RelayAuthentication": { + "type": "union", + "members": { + "SecretArn": { + "target": "com.amazonaws.mailmanager#SecretArn", + "traits": { + "smithy.api#documentation": "

The ARN of the secret created in secrets manager where the relay server's SMTP credentials are stored.

" + } + }, + "NoAuthentication": { + "target": "com.amazonaws.mailmanager#NoAuthentication", + "traits": { + "smithy.api#documentation": "

Keep an empty structure if the relay destination server does not require SMTP credential authentication.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Authentication for the relay destination server—specify the secretARN where the SMTP credentials are stored,\n or specify an empty NoAuthentication structure if the relay destination server does not require SMTP credential authentication.

" + } + }, + "com.amazonaws.mailmanager#RelayId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[a-zA-Z0-9-]+$" + } + }, + "com.amazonaws.mailmanager#RelayName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[a-zA-Z0-9-_]+$" + } + }, + "com.amazonaws.mailmanager#RelayResource": { + "type": "resource", + "identifiers": { + "RelayId": { + "target": "com.amazonaws.mailmanager#RelayId" + } + }, + "properties": { + "RelayArn": { + "target": "com.amazonaws.mailmanager#RelayArn" + }, + "RelayName": { + "target": "com.amazonaws.mailmanager#RelayName" + }, + "ServerName": { + "target": "com.amazonaws.mailmanager#RelayServerName" + }, + "ServerPort": { + "target": "com.amazonaws.mailmanager#RelayServerPort" + }, + "Authentication": { + "target": "com.amazonaws.mailmanager#RelayAuthentication" + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp" + }, + "LastModifiedTimestamp": { + "target": "smithy.api#Timestamp" + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList" + } + }, + "create": { + "target": "com.amazonaws.mailmanager#CreateRelay" + }, + "read": { + "target": "com.amazonaws.mailmanager#GetRelay" + }, + "update": { + "target": "com.amazonaws.mailmanager#UpdateRelay" + }, + "delete": { + "target": "com.amazonaws.mailmanager#DeleteRelay" + }, + "list": { + "target": "com.amazonaws.mailmanager#ListRelays" + }, + "traits": { + "aws.cloudformation#cfnResource": { + "name": "MailManagerRelay" + } + } + }, + "com.amazonaws.mailmanager#RelayServerName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[a-zA-Z0-9-\\.]+$" + } + }, + "com.amazonaws.mailmanager#RelayServerPort": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 65535 + } + } + }, + "com.amazonaws.mailmanager#Relays": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#Relay" + } + }, + "com.amazonaws.mailmanager#ReplaceRecipientAction": { + "type": "structure", + "members": { + "ReplaceWith": { + "target": "com.amazonaws.mailmanager#Recipients", + "traits": { + "smithy.api#documentation": "

This action specifies the replacement recipient email addresses to insert.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This action replaces the email envelope recipients with the given list of recipients.\n If the condition of this action applies only to a subset of recipients, only those\n recipients are replaced with the recipients specified in the action. The message\n contents and headers are unaffected by this action, only the envelope recipients are\n updated.

" + } + }, + "com.amazonaws.mailmanager#ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.mailmanager#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

Occurs when a requested resource is not found.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.mailmanager#ResultField": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[\\sa-zA-Z0-9_]+$" + } + }, + "com.amazonaws.mailmanager#RetentionPeriod": { + "type": "enum", + "members": { + "THREE_MONTHS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "THREE_MONTHS" + } + }, + "SIX_MONTHS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SIX_MONTHS" + } + }, + "NINE_MONTHS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NINE_MONTHS" + } + }, + "ONE_YEAR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ONE_YEAR" + } + }, + "EIGHTEEN_MONTHS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EIGHTEEN_MONTHS" + } + }, + "TWO_YEARS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TWO_YEARS" + } + }, + "THIRTY_MONTHS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "THIRTY_MONTHS" + } + }, + "THREE_YEARS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "THREE_YEARS" + } + }, + "FOUR_YEARS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FOUR_YEARS" + } + }, + "FIVE_YEARS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FIVE_YEARS" + } + }, + "SIX_YEARS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SIX_YEARS" + } + }, + "SEVEN_YEARS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SEVEN_YEARS" + } + }, + "EIGHT_YEARS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EIGHT_YEARS" + } + }, + "NINE_YEARS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NINE_YEARS" + } + }, + "TEN_YEARS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TEN_YEARS" + } + }, + "PERMANENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PERMANENT" + } + } + } + }, + "com.amazonaws.mailmanager#Row": { + "type": "structure", + "members": { + "ArchivedMessageId": { + "target": "com.amazonaws.mailmanager#ArchivedMessageId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the archived message.

" + } + }, + "ReceivedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the email was received.

" + } + }, + "Date": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The date the email was sent.

" + } + }, + "To": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The email addresses in the To header.

" + } + }, + "From": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The email address of the sender.

" + } + }, + "Cc": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The email addresses in the CC header.

" + } + }, + "Subject": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The subject header value of the email.

" + } + }, + "MessageId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique message ID of the email.

" + } + }, + "HasAttachments": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

A flag indicating if the email has attachments.

" + } + }, + "ReceivedHeaders": { + "target": "com.amazonaws.mailmanager#EmailReceivedHeadersList", + "traits": { + "smithy.api#documentation": "

The received headers from the email delivery path.

" + } + }, + "InReplyTo": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The email message ID this is a reply to.

" + } + }, + "XMailer": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The user agent that sent the email.

" + } + }, + "XOriginalMailer": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The original user agent that sent the email.

" + } + }, + "XPriority": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The priority level of the email.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A result row containing metadata for an archived email message.

" + } + }, + "com.amazonaws.mailmanager#RowsList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#Row" + } + }, + "com.amazonaws.mailmanager#Rule": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.mailmanager#RuleName", + "traits": { + "smithy.api#documentation": "

The user-friendly name of the rule.

" + } + }, + "Conditions": { + "target": "com.amazonaws.mailmanager#RuleConditions", + "traits": { + "smithy.api#documentation": "

The conditions of this rule. All conditions must match the email for the actions to be\n executed. An empty list of conditions means that all emails match, but are still subject\n to any \"unless conditions\"

" + } + }, + "Unless": { + "target": "com.amazonaws.mailmanager#RuleConditions", + "traits": { + "smithy.api#documentation": "

The \"unless conditions\" of this rule. None of the conditions can match the\n email for the actions to be executed. If any of these conditions do match the email,\n then the actions are not executed.

" + } + }, + "Actions": { + "target": "com.amazonaws.mailmanager#RuleActions", + "traits": { + "smithy.api#documentation": "

The list of actions to execute when the conditions match the incoming email, and none\n of the \"unless conditions\" match.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A rule contains conditions, \"unless conditions\" and actions. For each\n envelope recipient of an email, if all conditions match and none of the \"unless\n conditions\" match, then all of the actions are executed sequentially. If no\n conditions are provided, the rule always applies and the actions are implicitly\n executed. If only \"unless conditions\" are provided, the rule applies if the\n email does not match the evaluation of the \"unless conditions\".

" + } + }, + "com.amazonaws.mailmanager#RuleAction": { + "type": "union", + "members": { + "Drop": { + "target": "com.amazonaws.mailmanager#DropAction", + "traits": { + "smithy.api#documentation": "

This action terminates the evaluation of rules in the rule set.

" + } + }, + "Relay": { + "target": "com.amazonaws.mailmanager#RelayAction", + "traits": { + "smithy.api#documentation": "

This action relays the email to another SMTP server.

" + } + }, + "Archive": { + "target": "com.amazonaws.mailmanager#ArchiveAction", + "traits": { + "smithy.api#documentation": "

This action archives the email. This can be used to deliver an email to an\n archive.

" + } + }, + "WriteToS3": { + "target": "com.amazonaws.mailmanager#S3Action", + "traits": { + "smithy.api#documentation": "

This action writes the MIME content of the email to an S3 bucket.

" + } + }, + "Send": { + "target": "com.amazonaws.mailmanager#SendAction", + "traits": { + "smithy.api#documentation": "

This action sends the email to the internet.

" + } + }, + "AddHeader": { + "target": "com.amazonaws.mailmanager#AddHeaderAction", + "traits": { + "smithy.api#documentation": "

This action adds a header. This can be used to add arbitrary email headers.

" + } + }, + "ReplaceRecipient": { + "target": "com.amazonaws.mailmanager#ReplaceRecipientAction", + "traits": { + "smithy.api#documentation": "

The action replaces certain or all recipients with a different set of\n recipients.

" + } + }, + "DeliverToMailbox": { + "target": "com.amazonaws.mailmanager#DeliverToMailboxAction", + "traits": { + "smithy.api#documentation": "

This action delivers an email to a WorkMail mailbox.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The action for a rule to take. Only one of the contained actions can be set.

" + } + }, + "com.amazonaws.mailmanager#RuleActions": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#RuleAction" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.mailmanager#RuleBooleanEmailAttribute": { + "type": "enum", + "members": { + "READ_RECEIPT_REQUESTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "READ_RECEIPT_REQUESTED" + } + }, + "TLS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TLS" + } + }, + "TLS_WRAPPED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TLS_WRAPPED" + } + } + } + }, + "com.amazonaws.mailmanager#RuleBooleanExpression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#RuleBooleanToEvaluate", + "traits": { + "smithy.api#documentation": "

The operand on which to perform a boolean condition operation.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#RuleBooleanOperator", + "traits": { + "smithy.api#documentation": "

The matching operator for a boolean condition expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A boolean expression to be used in a rule condition.

" + } + }, + "com.amazonaws.mailmanager#RuleBooleanOperator": { + "type": "enum", + "members": { + "IS_TRUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IS_TRUE" + } + }, + "IS_FALSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IS_FALSE" + } + } + } + }, + "com.amazonaws.mailmanager#RuleBooleanToEvaluate": { + "type": "union", + "members": { + "Attribute": { + "target": "com.amazonaws.mailmanager#RuleBooleanEmailAttribute", + "traits": { + "smithy.api#documentation": "

The boolean type representing the allowed attribute types for an email.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The union type representing the allowed types of operands for a boolean\n condition.

" + } + }, + "com.amazonaws.mailmanager#RuleCondition": { + "type": "union", + "members": { + "BooleanExpression": { + "target": "com.amazonaws.mailmanager#RuleBooleanExpression", + "traits": { + "smithy.api#documentation": "

The condition applies to a boolean expression passed in this field.

" + } + }, + "StringExpression": { + "target": "com.amazonaws.mailmanager#RuleStringExpression", + "traits": { + "smithy.api#documentation": "

The condition applies to a string expression passed in this field.

" + } + }, + "NumberExpression": { + "target": "com.amazonaws.mailmanager#RuleNumberExpression", + "traits": { + "smithy.api#documentation": "

The condition applies to a number expression passed in this field.

" + } + }, + "IpExpression": { + "target": "com.amazonaws.mailmanager#RuleIpExpression", + "traits": { + "smithy.api#documentation": "

The condition applies to an IP address expression passed in this field.

" + } + }, + "VerdictExpression": { + "target": "com.amazonaws.mailmanager#RuleVerdictExpression", + "traits": { + "smithy.api#documentation": "

The condition applies to a verdict expression passed in this field.

" + } + }, + "DmarcExpression": { + "target": "com.amazonaws.mailmanager#RuleDmarcExpression", + "traits": { + "smithy.api#documentation": "

The condition applies to a DMARC policy expression passed in this field.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The conditional expression used to evaluate an email for determining if a rule action\n should be taken.

" + } + }, + "com.amazonaws.mailmanager#RuleConditions": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#RuleCondition" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.mailmanager#RuleDmarcExpression": { + "type": "structure", + "members": { + "Operator": { + "target": "com.amazonaws.mailmanager#RuleDmarcOperator", + "traits": { + "smithy.api#documentation": "

The operator to apply to the DMARC policy of the incoming email.

", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.mailmanager#RuleDmarcValueList", + "traits": { + "smithy.api#documentation": "

The values to use for the given DMARC policy operator. For the operator EQUALS, if\n multiple values are given, they are evaluated as an OR. That is, if any of the given\n values match, the condition is deemed to match. For the operator NOT_EQUALS, if multiple\n values are given, they are evaluated as an AND. That is, only if the email's DMARC\n policy is not equal to any of the given values, then the condition is deemed to\n match.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A DMARC policy expression. The condition matches if the given DMARC policy matches\n that of the incoming email.

" + } + }, + "com.amazonaws.mailmanager#RuleDmarcOperator": { + "type": "enum", + "members": { + "EQUALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQUALS" + } + }, + "NOT_EQUALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_EQUALS" + } + } + } + }, + "com.amazonaws.mailmanager#RuleDmarcPolicy": { + "type": "enum", + "members": { + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + }, + "QUARANTINE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUARANTINE" + } + }, + "REJECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REJECT" + } + } + } + }, + "com.amazonaws.mailmanager#RuleDmarcValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#RuleDmarcPolicy" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.mailmanager#RuleIpEmailAttribute": { + "type": "enum", + "members": { + "SOURCE_IP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SOURCE_IP" + } + } + } + }, + "com.amazonaws.mailmanager#RuleIpExpression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#RuleIpToEvaluate", + "traits": { + "smithy.api#documentation": "

The IP address to evaluate in this condition.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#RuleIpOperator", + "traits": { + "smithy.api#documentation": "

The operator to evaluate the IP address.

", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.mailmanager#RuleIpValueList", + "traits": { + "smithy.api#documentation": "

The IP CIDR blocks in format \"x.y.z.w/n\" (eg 10.0.0.0/8) to match with the\n email's IP address. For the operator CIDR_MATCHES, if multiple values are given, they\n are evaluated as an OR. That is, if the IP address is contained within any of the given\n CIDR ranges, the condition is deemed to match. For NOT_CIDR_MATCHES, if multiple CIDR\n ranges are given, the condition is deemed to match if the IP address is not contained in\n any of the given CIDR ranges.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An IP address expression matching certain IP addresses within a given range of IP\n addresses.

" + } + }, + "com.amazonaws.mailmanager#RuleIpOperator": { + "type": "enum", + "members": { + "CIDR_MATCHES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CIDR_MATCHES" + } + }, + "NOT_CIDR_MATCHES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_CIDR_MATCHES" + } + } + } + }, + "com.amazonaws.mailmanager#RuleIpStringValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 18 + }, + "smithy.api#pattern": "^(([0-9]|.|/)*)$" + } + }, + "com.amazonaws.mailmanager#RuleIpToEvaluate": { + "type": "union", + "members": { + "Attribute": { + "target": "com.amazonaws.mailmanager#RuleIpEmailAttribute", + "traits": { + "smithy.api#documentation": "

The attribute of the email to evaluate.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The IP address to evaluate for this condition.

" + } + }, + "com.amazonaws.mailmanager#RuleIpValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#RuleIpStringValue" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.mailmanager#RuleName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 32 + }, + "smithy.api#pattern": "^[a-zA-Z0-9_.-]+$" + } + }, + "com.amazonaws.mailmanager#RuleNumberEmailAttribute": { + "type": "enum", + "members": { + "MESSAGE_SIZE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MESSAGE_SIZE" + } + } + } + }, + "com.amazonaws.mailmanager#RuleNumberExpression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#RuleNumberToEvaluate", + "traits": { + "smithy.api#documentation": "

The number to evaluate in a numeric condition expression.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#RuleNumberOperator", + "traits": { + "smithy.api#documentation": "

The operator for a numeric condition expression.

", + "smithy.api#required": {} + } + }, + "Value": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

The value to evaluate in a numeric condition expression.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A number expression to match numeric conditions with integers from the incoming\n email.

" + } + }, + "com.amazonaws.mailmanager#RuleNumberOperator": { + "type": "enum", + "members": { + "EQUALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQUALS" + } + }, + "NOT_EQUALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_EQUALS" + } + }, + "LESS_THAN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LESS_THAN" + } + }, + "GREATER_THAN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GREATER_THAN" + } + }, + "LESS_THAN_OR_EQUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LESS_THAN_OR_EQUAL" + } + }, + "GREATER_THAN_OR_EQUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GREATER_THAN_OR_EQUAL" + } + } + } + }, + "com.amazonaws.mailmanager#RuleNumberToEvaluate": { + "type": "union", + "members": { + "Attribute": { + "target": "com.amazonaws.mailmanager#RuleNumberEmailAttribute", + "traits": { + "smithy.api#documentation": "

An email attribute that is used as the number to evaluate.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The number to evaluate in a numeric condition expression.

" + } + }, + "com.amazonaws.mailmanager#RuleSet": { + "type": "structure", + "members": { + "RuleSetId": { + "target": "com.amazonaws.mailmanager#RuleSetId", + "traits": { + "smithy.api#documentation": "

The identifier of the rule set.

" + } + }, + "RuleSetName": { + "target": "com.amazonaws.mailmanager#RuleSetName", + "traits": { + "smithy.api#documentation": "

A user-friendly name for the rule set.

" + } + }, + "LastModificationDate": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The last modification date of the rule set.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A rule set contains a list of rules that are evaluated in order. Each rule is\n evaluated sequentially for each email.

" + } + }, + "com.amazonaws.mailmanager#RuleSetArn": { + "type": "string", + "traits": { + "aws.api#arnReference": {} + } + }, + "com.amazonaws.mailmanager#RuleSetId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.mailmanager#RuleSetName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[a-zA-Z0-9_.-]+$" + } + }, + "com.amazonaws.mailmanager#RuleSetResource": { + "type": "resource", + "identifiers": { + "RuleSetId": { + "target": "com.amazonaws.mailmanager#RuleSetId" + } + }, + "properties": { + "RuleSetArn": { + "target": "com.amazonaws.mailmanager#RuleSetArn" + }, + "RuleSetName": { + "target": "com.amazonaws.mailmanager#RuleSetName" + }, + "CreatedDate": { + "target": "smithy.api#Timestamp" + }, + "LastModificationDate": { + "target": "smithy.api#Timestamp" + }, + "Rules": { + "target": "com.amazonaws.mailmanager#Rules" + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList" + } + }, + "create": { + "target": "com.amazonaws.mailmanager#CreateRuleSet" + }, + "read": { + "target": "com.amazonaws.mailmanager#GetRuleSet" + }, + "update": { + "target": "com.amazonaws.mailmanager#UpdateRuleSet" + }, + "delete": { + "target": "com.amazonaws.mailmanager#DeleteRuleSet" + }, + "list": { + "target": "com.amazonaws.mailmanager#ListRuleSets" + }, + "traits": { + "aws.cloudformation#cfnResource": { + "name": "MailManagerRuleSet" + } + } + }, + "com.amazonaws.mailmanager#RuleSets": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#RuleSet" + } + }, + "com.amazonaws.mailmanager#RuleStringEmailAttribute": { + "type": "enum", + "members": { + "MAIL_FROM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MAIL_FROM" + } + }, + "HELO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HELO" + } + }, + "RECIPIENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RECIPIENT" + } + }, + "SENDER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SENDER" + } + }, + "FROM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FROM" + } + }, + "SUBJECT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUBJECT" + } + }, + "TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TO" + } + }, + "CC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CC" + } + } + } + }, + "com.amazonaws.mailmanager#RuleStringExpression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#RuleStringToEvaluate", + "traits": { + "smithy.api#documentation": "

The string to evaluate in a string condition expression.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#RuleStringOperator", + "traits": { + "smithy.api#documentation": "

The matching operator for a string condition expression.

", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.mailmanager#RuleStringList", + "traits": { + "smithy.api#documentation": "

The string(s) to be evaluated in a string condition expression. For all operators,\n except for NOT_EQUALS, if multiple values are given, the values are processed as an OR.\n That is, if any of the values match the email's string using the given operator, the\n condition is deemed to match. However, for NOT_EQUALS, the condition is only deemed to\n match if none of the given strings match the email's string.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A string expression is evaluated against strings or substrings of the email.

" + } + }, + "com.amazonaws.mailmanager#RuleStringList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#RuleStringValue" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.mailmanager#RuleStringOperator": { + "type": "enum", + "members": { + "EQUALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQUALS" + } + }, + "NOT_EQUALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_EQUALS" + } + }, + "STARTS_WITH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STARTS_WITH" + } + }, + "ENDS_WITH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENDS_WITH" + } + }, + "CONTAINS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONTAINS" + } + } + } + }, + "com.amazonaws.mailmanager#RuleStringToEvaluate": { + "type": "union", + "members": { + "Attribute": { + "target": "com.amazonaws.mailmanager#RuleStringEmailAttribute", + "traits": { + "smithy.api#documentation": "

The email attribute to evaluate in a string condition expression.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The string to evaluate in a string condition expression.

" + } + }, + "com.amazonaws.mailmanager#RuleStringValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + } + } + }, + "com.amazonaws.mailmanager#RuleVerdict": { + "type": "enum", + "members": { + "PASS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PASS" + } + }, + "FAIL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAIL" + } + }, + "GRAY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GRAY" + } + }, + "PROCESSING_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROCESSING_FAILED" + } + } + } + }, + "com.amazonaws.mailmanager#RuleVerdictAttribute": { + "type": "enum", + "members": { + "SPF": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SPF" + } + }, + "DKIM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DKIM" + } + } + } + }, + "com.amazonaws.mailmanager#RuleVerdictExpression": { + "type": "structure", + "members": { + "Evaluate": { + "target": "com.amazonaws.mailmanager#RuleVerdictToEvaluate", + "traits": { + "smithy.api#documentation": "

The verdict to evaluate in a verdict condition expression.

", + "smithy.api#required": {} + } + }, + "Operator": { + "target": "com.amazonaws.mailmanager#RuleVerdictOperator", + "traits": { + "smithy.api#documentation": "

The matching operator for a verdict condition expression.

", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.mailmanager#RuleVerdictValueList", + "traits": { + "smithy.api#documentation": "

The values to match with the email's verdict using the given operator. For the EQUALS\n operator, if multiple values are given, the condition is deemed to match if any of the\n given verdicts match that of the email. For the NOT_EQUALS operator, if multiple values\n are given, the condition is deemed to match of none of the given verdicts match the\n verdict of the email.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A verdict expression is evaluated against verdicts of the email.

" + } + }, + "com.amazonaws.mailmanager#RuleVerdictOperator": { + "type": "enum", + "members": { + "EQUALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EQUALS" + } + }, + "NOT_EQUALS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_EQUALS" + } + } + } + }, + "com.amazonaws.mailmanager#RuleVerdictToEvaluate": { + "type": "union", + "members": { + "Attribute": { + "target": "com.amazonaws.mailmanager#RuleVerdictAttribute", + "traits": { + "smithy.api#documentation": "

The email verdict attribute to evaluate in a string verdict expression.

" + } + }, + "Analysis": { + "target": "com.amazonaws.mailmanager#Analysis", + "traits": { + "smithy.api#documentation": "

The Add On ARN and its returned value to evaluate in a verdict condition\n expression.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The verdict to evaluate in a verdict condition expression.

" + } + }, + "com.amazonaws.mailmanager#RuleVerdictValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#RuleVerdict" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.mailmanager#Rules": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#Rule" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 40 + } + } + }, + "com.amazonaws.mailmanager#S3Action": { + "type": "structure", + "members": { + "ActionFailurePolicy": { + "target": "com.amazonaws.mailmanager#ActionFailurePolicy", + "traits": { + "smithy.api#documentation": "

A policy that states what to do in the case of failure. The action will fail if there\n are configuration errors. For example, the specified the bucket has been deleted.

" + } + }, + "RoleArn": { + "target": "com.amazonaws.mailmanager#IamRoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM Role to use while writing to S3. This role must have access to\n the s3:PutObject, kms:Encrypt, and kms:GenerateDataKey APIs for the given bucket.

", + "smithy.api#required": {} + } + }, + "S3Bucket": { + "target": "com.amazonaws.mailmanager#S3Bucket", + "traits": { + "smithy.api#documentation": "

The bucket name of the S3 bucket to write to.

", + "smithy.api#required": {} + } + }, + "S3Prefix": { + "target": "com.amazonaws.mailmanager#S3Prefix", + "traits": { + "smithy.api#documentation": "

The S3 prefix to use for the write to the s3 bucket.

" + } + }, + "S3SseKmsKeyId": { + "target": "com.amazonaws.mailmanager#KmsKeyId", + "traits": { + "smithy.api#documentation": "

The KMS Key ID to use to encrypt the message in S3.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Writes the MIME content of the email to an S3 bucket.

" + } + }, + "com.amazonaws.mailmanager#S3Bucket": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 62 + }, + "smithy.api#pattern": "^[a-zA-Z0-9.-]+$" + } + }, + "com.amazonaws.mailmanager#S3ExportDestinationConfiguration": { + "type": "structure", + "members": { + "S3Location": { + "target": "com.amazonaws.mailmanager#S3Location", + "traits": { + "smithy.api#documentation": "

The S3 location to deliver the exported email data.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration for exporting email data to an Amazon S3 bucket.

" + } + }, + "com.amazonaws.mailmanager#S3Location": { + "type": "string", + "traits": { + "smithy.api#pattern": "^s3://[a-zA-Z0-9.-]{3,63}(/[a-zA-Z0-9!_.*'()/-]*)*$" + } + }, + "com.amazonaws.mailmanager#S3Prefix": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 62 + }, + "smithy.api#pattern": "^[a-zA-Z0-9!_.*'()/-]+$" + } + }, + "com.amazonaws.mailmanager#S3PresignedURL": { + "type": "string" + }, + "com.amazonaws.mailmanager#SearchId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.mailmanager#SearchMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 1000 + } + } + }, + "com.amazonaws.mailmanager#SearchState": { + "type": "enum", + "members": { + "QUEUED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUEUED" + } + }, + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RUNNING" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "CANCELLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CANCELLED" + } + } + } + }, + "com.amazonaws.mailmanager#SearchStatus": { + "type": "structure", + "members": { + "SubmissionTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the search was submitted.

" + } + }, + "CompletionTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the search completed (if finished).

" + } + }, + "State": { + "target": "com.amazonaws.mailmanager#SearchState", + "traits": { + "smithy.api#documentation": "

The current state of the search job.

" + } + }, + "ErrorMessage": { + "target": "com.amazonaws.mailmanager#ErrorMessage", + "traits": { + "smithy.api#documentation": "

An error message if the search failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The current status of an archive search job.

" + } + }, + "com.amazonaws.mailmanager#SearchSummary": { + "type": "structure", + "members": { + "SearchId": { + "target": "com.amazonaws.mailmanager#SearchId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the search job.

" + } + }, + "Status": { + "target": "com.amazonaws.mailmanager#SearchStatus", + "traits": { + "smithy.api#documentation": "

The current status of the search job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary details of an archive search job.

" + } + }, + "com.amazonaws.mailmanager#SearchSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#SearchSummary" + } + }, + "com.amazonaws.mailmanager#SecretArn": { + "type": "string", + "traits": { + "aws.api#arnReference": { + "type": "AWS::SecretsManager::Secret" + }, + "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov):secretsmanager:[a-z0-9-]+:\\d{12}:secret:[a-zA-Z0-9/_+=,.@-]+$" + } + }, + "com.amazonaws.mailmanager#SendAction": { + "type": "structure", + "members": { + "ActionFailurePolicy": { + "target": "com.amazonaws.mailmanager#ActionFailurePolicy", + "traits": { + "smithy.api#documentation": "

A policy that states what to do in the case of failure. The action will fail if there\n are configuration errors. For example, the caller does not have the permissions to call\n the sendRawEmail API.

" + } + }, + "RoleArn": { + "target": "com.amazonaws.mailmanager#IamRoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the role to use for this action. This role must have access to the\n ses:SendRawEmail API.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Sends the email to the internet using the ses:SendRawEmail API.

" + } + }, + "com.amazonaws.mailmanager#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.mailmanager#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

Occurs when an operation exceeds a predefined service quota or limit.

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.mailmanager#SmtpPassword": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 8, + "max": 64 + }, + "smithy.api#pattern": "^[A-Za-z0-9!@#$%^&*()_+\\-=\\[\\]{}|.,?]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.mailmanager#StartArchiveExport": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#StartArchiveExportRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#StartArchiveExportResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Initiates an export of emails from the specified archive.

" + } + }, + "com.amazonaws.mailmanager#StartArchiveExportRequest": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveId", + "traits": { + "smithy.api#documentation": "

The identifier of the archive to export emails from.

", + "smithy.api#required": {} + } + }, + "Filters": { + "target": "com.amazonaws.mailmanager#ArchiveFilters", + "traits": { + "smithy.api#documentation": "

Criteria to filter which emails are included in the export.

" + } + }, + "FromTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start of the timestamp range to include emails from.

", + "smithy.api#required": {} + } + }, + "ToTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end of the timestamp range to include emails from.

", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.mailmanager#ExportMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of email items to include in the export.

" + } + }, + "ExportDestinationConfiguration": { + "target": "com.amazonaws.mailmanager#ExportDestinationConfiguration", + "traits": { + "smithy.api#documentation": "

Details on where to deliver the exported email data.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to initiate an export of emails from an archive.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#StartArchiveExportResponse": { + "type": "structure", + "members": { + "ExportId": { + "target": "com.amazonaws.mailmanager#ExportId", + "traits": { + "smithy.api#documentation": "

The unique identifier for the initiated export job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response from initiating an archive export.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#StartArchiveSearch": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#StartArchiveSearchRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#StartArchiveSearchResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Initiates a search across emails in the specified archive.

" + } + }, + "com.amazonaws.mailmanager#StartArchiveSearchRequest": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveId", + "traits": { + "smithy.api#documentation": "

The identifier of the archive to search emails in.

", + "smithy.api#required": {} + } + }, + "Filters": { + "target": "com.amazonaws.mailmanager#ArchiveFilters", + "traits": { + "smithy.api#documentation": "

Criteria to filter which emails are included in the search results.

" + } + }, + "FromTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The start timestamp of the range to search emails from.

", + "smithy.api#required": {} + } + }, + "ToTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The end timestamp of the range to search emails from.

", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.mailmanager#SearchMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of search results to return.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to initiate a search across emails in an archive.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#StartArchiveSearchResponse": { + "type": "structure", + "members": { + "SearchId": { + "target": "com.amazonaws.mailmanager#SearchId", + "traits": { + "smithy.api#documentation": "

The unique identifier for the initiated search job.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response from initiating an archive search.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#StopArchiveExport": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#StopArchiveExportRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#StopArchiveExportResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Stops an in-progress export of emails from an archive.

" + } + }, + "com.amazonaws.mailmanager#StopArchiveExportRequest": { + "type": "structure", + "members": { + "ExportId": { + "target": "com.amazonaws.mailmanager#ExportId", + "traits": { + "smithy.api#documentation": "

The identifier of the export job to stop.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to stop an in-progress archive export job.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#StopArchiveExportResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

The response indicating if the request to stop the export job succeeded.

\n

On success, returns an HTTP 200 status code. On failure, returns an error message.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#StopArchiveSearch": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#StopArchiveSearchRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#StopArchiveSearchResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Stops an in-progress archive search job.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#StopArchiveSearchRequest": { + "type": "structure", + "members": { + "SearchId": { + "target": "com.amazonaws.mailmanager#SearchId", + "traits": { + "smithy.api#documentation": "

The identifier of the search job to stop.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to stop an in-progress archive search job.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#StopArchiveSearchResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

The response indicating if the request to stop the search job succeeded.

\n

On success, returns an HTTP 200 status code. On failure, returns an error message.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#StringList": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.mailmanager#StringValueList": { + "type": "list", + "member": { + "target": "smithy.api#String" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.mailmanager#Tag": { + "type": "structure", + "members": { + "Key": { + "target": "com.amazonaws.mailmanager#TagKey", + "traits": { + "smithy.api#documentation": "

The key of the key-value tag.

", + "smithy.api#required": {} + } + }, + "Value": { + "target": "com.amazonaws.mailmanager#TagValue", + "traits": { + "smithy.api#documentation": "

The value of the key-value tag.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A key-value pair (the value is optional), that you can define and assign to Amazon Web Services resources.

" + } + }, + "com.amazonaws.mailmanager#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[a-zA-Z0-9/_\\+=\\.:@\\-]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.mailmanager#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.mailmanager#TagList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#Tag" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.mailmanager#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Adds one or more tags (keys and values) to a specified resource.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.mailmanager#TaggableResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource that you want to tag.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.mailmanager#TagList", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for the resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9/_\\+=\\.:@\\-]*$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.mailmanager#TaggableResourceArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 1011 + }, + "smithy.api#pattern": "^arn:aws(|-cn|-us-gov):ses:[a-z0-9-]{1,20}:[0-9]{12}:(mailmanager-|addon-).+$" + } + }, + "com.amazonaws.mailmanager#ThrottlingException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.mailmanager#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

Occurs when a service's request rate limit is exceeded, resulting in throttling of further requests.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, + "com.amazonaws.mailmanager#TrafficPolicy": { + "type": "structure", + "members": { + "TrafficPolicyName": { + "target": "com.amazonaws.mailmanager#TrafficPolicyName", + "traits": { + "smithy.api#documentation": "

A user-friendly name of the traffic policy resource.

", + "smithy.api#required": {} + } + }, + "TrafficPolicyId": { + "target": "com.amazonaws.mailmanager#TrafficPolicyId", + "traits": { + "smithy.api#documentation": "

The identifier of the traffic policy resource.

", + "smithy.api#required": {} + } + }, + "DefaultAction": { + "target": "com.amazonaws.mailmanager#AcceptAction", + "traits": { + "smithy.api#documentation": "

Default action instructs the traffic policy to either Allow or Deny (block) messages that fall outside of (or not addressed by) the conditions of your policy statements

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The structure of a traffic policy resource which is a container for policy\n statements.

" + } + }, + "com.amazonaws.mailmanager#TrafficPolicyArn": { + "type": "string", + "traits": { + "aws.api#arnReference": {} + } + }, + "com.amazonaws.mailmanager#TrafficPolicyId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.mailmanager#TrafficPolicyList": { + "type": "list", + "member": { + "target": "com.amazonaws.mailmanager#TrafficPolicy" + } + }, + "com.amazonaws.mailmanager#TrafficPolicyName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 63 + }, + "smithy.api#pattern": "^[A-Za-z0-9_\\-]+$" + } + }, + "com.amazonaws.mailmanager#TrafficPolicyResource": { + "type": "resource", + "identifiers": { + "TrafficPolicyId": { + "target": "com.amazonaws.mailmanager#TrafficPolicyId" + } + }, + "put": { + "target": "com.amazonaws.mailmanager#CreateTrafficPolicy" + }, + "read": { + "target": "com.amazonaws.mailmanager#GetTrafficPolicy" + }, + "update": { + "target": "com.amazonaws.mailmanager#UpdateTrafficPolicy" + }, + "delete": { + "target": "com.amazonaws.mailmanager#DeleteTrafficPolicy" + }, + "list": { + "target": "com.amazonaws.mailmanager#ListTrafficPolicies" + }, + "traits": { + "aws.cloudformation#cfnResource": { + "name": "MailManagerTrafficPolicy" + }, + "smithy.api#noReplace": {} + } + }, + "com.amazonaws.mailmanager#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Remove one or more tags (keys and values) from a specified resource.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.mailmanager#TaggableResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource that you want to untag.

", + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.mailmanager#TagKeyList", + "traits": { + "smithy.api#documentation": "

The keys of the key-value pairs for the tag or tags you want to remove from the\n specified resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#UntagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#UpdateArchive": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#UpdateArchiveRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#UpdateArchiveResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#AccessDeniedException" + }, + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.mailmanager#ThrottlingException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the attributes of an existing email archive.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#UpdateArchiveRequest": { + "type": "structure", + "members": { + "ArchiveId": { + "target": "com.amazonaws.mailmanager#ArchiveIdString", + "traits": { + "smithy.api#documentation": "

The identifier of the archive to update.

", + "smithy.api#required": {} + } + }, + "ArchiveName": { + "target": "com.amazonaws.mailmanager#ArchiveNameString", + "traits": { + "smithy.api#documentation": "

A new, unique name for the archive.

" + } + }, + "Retention": { + "target": "com.amazonaws.mailmanager#ArchiveRetention", + "traits": { + "smithy.api#documentation": "

A new retention period for emails in the archive.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request to update properties of an existing email archive.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#UpdateArchiveResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

The response indicating if the archive update succeeded or failed.

\n

On success, returns an HTTP 200 status code. On failure, returns an error\n message.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#UpdateIngressPoint": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#UpdateIngressPointRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#UpdateIngressPointResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Update attributes of a provisioned ingress endpoint resource.

", + "smithy.api#examples": [ + { + "title": "Update Open/Auth IngressPoint with new Name", + "input": { + "IngressPointId": "inp-12345", + "IngressPointName": "ingressPointNewName" + }, + "output": {} + }, + { + "title": "Update Open/Auth IngressPoint with new RuleSetId / TrafficPolicyId", + "input": { + "IngressPointId": "inp-12345", + "RuleSetId": "rs-12345", + "TrafficPolicyId": "tp-12345" + }, + "output": {} + }, + { + "title": "Update Auth IngressPoint with new SmtpPassword", + "input": { + "IngressPointId": "inp-12345", + "IngressPointConfiguration": { + "SmtpPassword": "newSmtpPassword" + } + }, + "output": {} + }, + { + "title": "Update Auth IngressPoint with new SecretArn", + "input": { + "IngressPointId": "inp-12345", + "IngressPointConfiguration": { + "SecretArn": "arn:aws:secretsmanager:us-west-2:123456789012:secret:abcde" + } + }, + "output": {} + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#UpdateIngressPointRequest": { + "type": "structure", + "members": { + "IngressPointId": { + "target": "com.amazonaws.mailmanager#IngressPointId", + "traits": { + "smithy.api#documentation": "

The identifier for the ingress endpoint you want to update.

", + "smithy.api#required": {} + } + }, + "IngressPointName": { + "target": "com.amazonaws.mailmanager#IngressPointName", + "traits": { + "smithy.api#documentation": "

A user friendly name for the ingress endpoint resource.

" + } + }, + "StatusToUpdate": { + "target": "com.amazonaws.mailmanager#IngressPointStatusToUpdate", + "traits": { + "smithy.api#documentation": "

The update status of an ingress endpoint.

" + } + }, + "RuleSetId": { + "target": "com.amazonaws.mailmanager#RuleSetId", + "traits": { + "smithy.api#documentation": "

The identifier of an existing rule set that you attach to an ingress endpoint\n resource.

" + } + }, + "TrafficPolicyId": { + "target": "com.amazonaws.mailmanager#TrafficPolicyId", + "traits": { + "smithy.api#documentation": "

The identifier of an existing traffic policy that you attach to an ingress endpoint\n resource.

" + } + }, + "IngressPointConfiguration": { + "target": "com.amazonaws.mailmanager#IngressPointConfiguration", + "traits": { + "smithy.api#documentation": "

If you choose an Authenticated ingress endpoint, you must configure either an SMTP password or a secret\n ARN.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#UpdateIngressPointResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#UpdateRelay": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#UpdateRelayRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#UpdateRelayResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the attributes of an existing relay resource.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#UpdateRelayRequest": { + "type": "structure", + "members": { + "RelayId": { + "target": "com.amazonaws.mailmanager#RelayId", + "traits": { + "smithy.api#documentation": "

The unique relay identifier.

", + "smithy.api#required": {} + } + }, + "RelayName": { + "target": "com.amazonaws.mailmanager#RelayName", + "traits": { + "smithy.api#documentation": "

The name of the relay resource.

" + } + }, + "ServerName": { + "target": "com.amazonaws.mailmanager#RelayServerName", + "traits": { + "smithy.api#documentation": "

The destination relay server address.

" + } + }, + "ServerPort": { + "target": "com.amazonaws.mailmanager#RelayServerPort", + "traits": { + "smithy.api#documentation": "

The destination relay server port.

" + } + }, + "Authentication": { + "target": "com.amazonaws.mailmanager#RelayAuthentication", + "traits": { + "smithy.api#documentation": "

Authentication for the relay destination server—specify the secretARN where\n the SMTP credentials are stored.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#UpdateRelayResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#UpdateRuleSet": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#UpdateRuleSetRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#UpdateRuleSetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

>Update attributes of an already provisioned rule set.

", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#UpdateRuleSetRequest": { + "type": "structure", + "members": { + "RuleSetId": { + "target": "com.amazonaws.mailmanager#RuleSetId", + "traits": { + "smithy.api#documentation": "

The identifier of a rule set you want to update.

", + "smithy.api#required": {} + } + }, + "RuleSetName": { + "target": "com.amazonaws.mailmanager#RuleSetName", + "traits": { + "smithy.api#documentation": "

A user-friendly name for the rule set resource.

" + } + }, + "Rules": { + "target": "com.amazonaws.mailmanager#Rules", + "traits": { + "smithy.api#documentation": "

A new set of rules to replace the current rules of the rule set—these rules will\n override all the rules of the rule set.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#UpdateRuleSetResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#UpdateTrafficPolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.mailmanager#UpdateTrafficPolicyRequest" + }, + "output": { + "target": "com.amazonaws.mailmanager#UpdateTrafficPolicyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mailmanager#ConflictException" + }, + { + "target": "com.amazonaws.mailmanager#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.mailmanager#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Update attributes of an already provisioned traffic policy resource.

", + "smithy.api#examples": [ + { + "title": "Update TrafficPolicy with new Name", + "input": { + "TrafficPolicyId": "tp-12345", + "TrafficPolicyName": "trafficPolicyNewName" + }, + "output": {} + }, + { + "title": "Update TrafficPolicy with new PolicyStatements", + "input": { + "TrafficPolicyId": "tp-12345", + "PolicyStatements": [ + { + "Conditions": [ + { + "StringExpression": { + "Evaluate": { + "Attribute": "RECIPIENT" + }, + "Operator": "EQUALS", + "Values": [ + "example@amazon.com", + "example@gmail.com" + ] + } + } + ], + "Action": "ALLOW" + } + ] + }, + "output": {} + }, + { + "title": "Update TrafficPolicy with new DefaultAction", + "input": { + "TrafficPolicyId": "tp-12345", + "DefaultAction": "ALLOW" + }, + "output": {} + } + ], + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.mailmanager#UpdateTrafficPolicyRequest": { + "type": "structure", + "members": { + "TrafficPolicyId": { + "target": "com.amazonaws.mailmanager#TrafficPolicyId", + "traits": { + "smithy.api#documentation": "

The identifier of the traffic policy that you want to update.

", + "smithy.api#required": {} + } + }, + "TrafficPolicyName": { + "target": "com.amazonaws.mailmanager#TrafficPolicyName", + "traits": { + "smithy.api#documentation": "

A user-friendly name for the traffic policy resource.

" + } + }, + "PolicyStatements": { + "target": "com.amazonaws.mailmanager#PolicyStatementList", + "traits": { + "smithy.api#documentation": "

The list of conditions to be updated for filtering email traffic.

" + } + }, + "DefaultAction": { + "target": "com.amazonaws.mailmanager#AcceptAction", + "traits": { + "smithy.api#documentation": "

Default action instructs the traffic policy to either Allow or Deny (block) messages that fall outside of (or not addressed by) the conditions of your policy statements

" + } + }, + "MaxMessageSizeBytes": { + "target": "com.amazonaws.mailmanager#MaxMessageSizeBytes", + "traits": { + "smithy.api#documentation": "

The maximum message size in bytes of email which is allowed in by this traffic\n policy—anything larger will be blocked.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mailmanager#UpdateTrafficPolicyResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.mailmanager#ValidationException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.mailmanager#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The request validation has failed. For details, see the accompanying error message.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + } + } +} \ No newline at end of file diff --git a/models/managedblockchain.json b/models/managedblockchain.json index 708517c9ab..a98e670fd0 100644 --- a/models/managedblockchain.json +++ b/models/managedblockchain.json @@ -350,7 +350,7 @@ "NetworkType": { "target": "com.amazonaws.managedblockchain#AccessorNetworkType", "traits": { - "smithy.api#documentation": "

The blockchain network that the Accessor token is created for.

\n \n

We recommend using the appropriate networkType \n value for the blockchain network that you are creating the Accessor \n token for. You cannnot use the value ETHEREUM_MAINNET_AND_GOERLI to \n specify a networkType for your Accessor token.

\n

The default value of ETHEREUM_MAINNET_AND_GOERLI is only applied:

\n
    \n
  • \n

    when the CreateAccessor action does not set a networkType.

    \n
  • \n
  • \n

    to all existing Accessor tokens that were created before the networkType property was introduced.\n

    \n
  • \n
\n
" + "smithy.api#documentation": "

The blockchain network that the Accessor token is created for.

\n \n
    \n
  • \n

    Use the actual networkType value for the blockchain network that you are creating \n the Accessor token for.

    \n
  • \n
  • \n

    With the shut down of the Ethereum Goerli and Polygon Mumbai \n Testnet networks the following networkType values are no longer available \n for selection and use.

    \n
      \n
    • \n

      \n ETHEREUM_MAINNET_AND_GOERLI\n

      \n
    • \n
    • \n

      \n ETHEREUM_GOERLI\n

      \n
    • \n
    • \n

      \n POLYGON_MUMBAI\n

      \n
    • \n
    \n

    However, your existing Accessor tokens with these networkType \n values will remain unchanged.

    \n
  • \n
\n
" } } }, @@ -671,7 +671,7 @@ "NetworkId": { "target": "com.amazonaws.managedblockchain#ResourceIdString", "traits": { - "smithy.api#documentation": "

The unique identifier of the network for the node.

\n

Ethereum public networks have the following NetworkIds:

\n
    \n
  • \n

    \n n-ethereum-mainnet\n

    \n
  • \n
  • \n

    \n n-ethereum-goerli\n

    \n
  • \n
", + "smithy.api#documentation": "

The unique identifier of the network for the node.

\n

Ethereum public networks have the following NetworkIds:

\n
    \n
  • \n

    \n n-ethereum-mainnet\n

    \n
  • \n
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -986,7 +986,7 @@ "NetworkId": { "target": "com.amazonaws.managedblockchain#ResourceIdString", "traits": { - "smithy.api#documentation": "

The unique identifier of the network that the node is on.

\n

Ethereum public networks have the following NetworkIds:

\n
    \n
  • \n

    \n n-ethereum-mainnet\n

    \n
  • \n
  • \n

    \n n-ethereum-goerli\n

    \n
  • \n
", + "smithy.api#documentation": "

The unique identifier of the network that the node is on.

\n

Ethereum public networks have the following NetworkIds:

\n
    \n
  • \n

    \n n-ethereum-mainnet\n

    \n
  • \n
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2760,7 +2760,7 @@ "ChainId": { "target": "com.amazonaws.managedblockchain#String", "traits": { - "smithy.api#documentation": "

The Ethereum CHAIN_ID associated with the Ethereum network. Chain IDs are as follows:

\n
    \n
  • \n

    mainnet = 1\n

    \n
  • \n
  • \n

    goerli = 5\n

    \n
  • \n
" + "smithy.api#documentation": "

The Ethereum CHAIN_ID associated with the Ethereum network. Chain IDs are as follows:

\n
    \n
  • \n

    mainnet = 1\n

    \n
  • \n
" } } }, diff --git a/models/mediaconvert.json b/models/mediaconvert.json index f895d5fced..7c304a5553 100644 --- a/models/mediaconvert.json +++ b/models/mediaconvert.json @@ -3893,6 +3893,13 @@ "smithy.api#jsonName": "codecSpecification" } }, + "DashIFrameTrickPlayNameModifier": { + "target": "com.amazonaws.mediaconvert#__stringMin1Max256", + "traits": { + "smithy.api#documentation": "Specify whether MediaConvert generates I-frame only video segments for DASH trick play, also known as trick mode. When specified, the I-frame only video segments are included within an additional AdaptationSet in your DASH output manifest. To generate I-frame only video segments: Enter a name as a text string, up to 256 character long. This name is appended to the end of this output group's base filename, that you specify as part of your destination URI, and used for the I-frame only video segment files. You may also include format identifiers. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/using-variables-in-your-job-settings.html#using-settings-variables-with-streaming-outputs To not generate I-frame only video segments: Leave blank.", + "smithy.api#jsonName": "dashIFrameTrickPlayNameModifier" + } + }, "DashManifestStyle": { "target": "com.amazonaws.mediaconvert#DashManifestStyle", "traits": { @@ -5841,6 +5848,13 @@ "smithy.api#jsonName": "baseUrl" } }, + "DashIFrameTrickPlayNameModifier": { + "target": "com.amazonaws.mediaconvert#__stringMin1Max256", + "traits": { + "smithy.api#documentation": "Specify whether MediaConvert generates I-frame only video segments for DASH trick play, also known as trick mode. When specified, the I-frame only video segments are included within an additional AdaptationSet in your DASH output manifest. To generate I-frame only video segments: Enter a name as a text string, up to 256 character long. This name is appended to the end of this output group's base filename, that you specify as part of your destination URI, and used for the I-frame only video segment files. You may also include format identifiers. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/using-variables-in-your-job-settings.html#using-settings-variables-with-streaming-outputs To not generate I-frame only video segments: Leave blank.", + "smithy.api#jsonName": "dashIFrameTrickPlayNameModifier" + } + }, "DashManifestStyle": { "target": "com.amazonaws.mediaconvert#DashManifestStyle", "traits": { @@ -6682,7 +6696,7 @@ "smithy.api#deprecated": { "message": "DescribeEndpoints and account specific endpoints are no longer required. We recommend that you send your requests directly to the regional endpoint instead." }, - "smithy.api#documentation": "Send an request with an empty body to the regional API endpoint to get your account API endpoint.", + "smithy.api#documentation": "Send a request with an empty body to the regional API endpoint to get your account API endpoint. Note that DescribeEndpoints is no longer required. We recommend that you send your requests directly to the regional endpoint instead.", "smithy.api#http": { "method": "POST", "uri": "/2017-08-29/endpoints", @@ -13043,14 +13057,14 @@ "type": "structure", "members": { "EndTimecode": { - "target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092", + "target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092090909", "traits": { "smithy.api#documentation": "Set End timecode to the end of the portion of the input you are clipping. The frame corresponding to the End timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for timecode source under input settings. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to end six minutes into the video, use 01:06:00:00.", "smithy.api#jsonName": "endTimecode" } }, "StartTimecode": { - "target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092", + "target": "com.amazonaws.mediaconvert#__stringPattern010920405090509092090909", "traits": { "smithy.api#documentation": "Set Start timecode to the beginning of the portion of the input you are clipping. The frame corresponding to the Start timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:05:00:00.", "smithy.api#jsonName": "startTimecode" @@ -13471,12 +13485,40 @@ "com.amazonaws.mediaconvert#InputVideoGenerator": { "type": "structure", "members": { + "Channels": { + "target": "com.amazonaws.mediaconvert#__integerMin1Max32", + "traits": { + "smithy.api#documentation": "Specify the number of audio channels to include in your video generator input. MediaConvert creates these audio channels as silent audio within a single audio track. Enter an integer from 1 to 32.", + "smithy.api#jsonName": "channels" + } + }, "Duration": { "target": "com.amazonaws.mediaconvert#__integerMin50Max86400000", "traits": { - "smithy.api#documentation": "Specify an integer value for Black video duration from 50 to 86400000 to generate a black video input for that many milliseconds. Required when you include Video generator.", + "smithy.api#documentation": "Specify the duration, in milliseconds, for your video generator input.\nEnter an integer from 50 to 86400000.", "smithy.api#jsonName": "duration" } + }, + "FramerateDenominator": { + "target": "com.amazonaws.mediaconvert#__integerMin1Max1001", + "traits": { + "smithy.api#documentation": "Specify the denominator of the fraction that represents the frame rate for your video generator input. When you do, you must also specify a value for Frame rate numerator. MediaConvert uses a default frame rate of 29.97 when you leave Frame rate numerator and Frame rate denominator blank.", + "smithy.api#jsonName": "framerateDenominator" + } + }, + "FramerateNumerator": { + "target": "com.amazonaws.mediaconvert#__integerMin1Max60000", + "traits": { + "smithy.api#documentation": "Specify the numerator of the fraction that represents the frame rate for your video generator input. When you do, you must also specify a value for Frame rate denominator. MediaConvert uses a default frame rate of 29.97 when you leave Frame rate numerator and Frame rate denominator blank.", + "smithy.api#jsonName": "framerateNumerator" + } + }, + "SampleRate": { + "target": "com.amazonaws.mediaconvert#__integerMin32000Max48000", + "traits": { + "smithy.api#documentation": "Specify the audio sample rate, in Hz, for the silent audio in your video generator input.\nEnter an integer from 32000 to 48000.", + "smithy.api#jsonName": "sampleRate" + } } }, "traits": { @@ -16953,6 +16995,9 @@ { "target": "com.amazonaws.mediaconvert#PutPolicy" }, + { + "target": "com.amazonaws.mediaconvert#SearchJobs" + }, { "target": "com.amazonaws.mediaconvert#TagResource" }, @@ -16981,6 +17026,9 @@ "name": "mediaconvert" }, "aws.protocols#restJson1": {}, + "smithy.api#auth": [ + "aws.auth#sigv4" + ], "smithy.api#documentation": "AWS Elemental MediaConvert", "smithy.api#title": "AWS Elemental MediaConvert", "smithy.rules#endpointRuleSet": { @@ -20255,7 +20303,7 @@ } }, "NameModifier": { - "target": "com.amazonaws.mediaconvert#__stringMin1", + "target": "com.amazonaws.mediaconvert#__stringMin1Max256", "traits": { "smithy.api#documentation": "Use Name modifier to have the service add a string to the end of each output filename. You specify the base filename as part of your destination URI. When you create multiple outputs in the same output group, Name modifier is required. Name modifier also accepts format identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in one output, you must use them in the same way in all outputs of the output group.", "smithy.api#jsonName": "nameModifier" @@ -21886,6 +21934,121 @@ "smithy.api#documentation": "Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html." } }, + "com.amazonaws.mediaconvert#SearchJobs": { + "type": "operation", + "input": { + "target": "com.amazonaws.mediaconvert#SearchJobsRequest" + }, + "output": { + "target": "com.amazonaws.mediaconvert#SearchJobsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.mediaconvert#BadRequestException" + }, + { + "target": "com.amazonaws.mediaconvert#ConflictException" + }, + { + "target": "com.amazonaws.mediaconvert#ForbiddenException" + }, + { + "target": "com.amazonaws.mediaconvert#InternalServerErrorException" + }, + { + "target": "com.amazonaws.mediaconvert#NotFoundException" + }, + { + "target": "com.amazonaws.mediaconvert#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieve a JSON array that includes job details for up to twenty of your most recent jobs. Optionally filter results further according to input file, queue, or status. To retrieve the twenty next most recent jobs, use the nextToken string returned with the array.", + "smithy.api#http": { + "method": "GET", + "uri": "/2017-08-29/search", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Jobs", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.mediaconvert#SearchJobsRequest": { + "type": "structure", + "members": { + "InputFile": { + "target": "com.amazonaws.mediaconvert#__string", + "traits": { + "smithy.api#documentation": "Optional. Provide your input file URL or your partial input file name. The maximum length for an input file is 300 characters.", + "smithy.api#httpQuery": "inputFile" + } + }, + "MaxResults": { + "target": "com.amazonaws.mediaconvert#__integerMin1Max20", + "traits": { + "smithy.api#documentation": "Optional. Number of jobs, up to twenty, that will be returned at one time.", + "smithy.api#httpQuery": "maxResults" + } + }, + "NextToken": { + "target": "com.amazonaws.mediaconvert#__string", + "traits": { + "smithy.api#documentation": "Optional. Use this string, provided with the response to a previous request, to request the next batch of jobs.", + "smithy.api#httpQuery": "nextToken" + } + }, + "Order": { + "target": "com.amazonaws.mediaconvert#Order", + "traits": { + "smithy.api#documentation": "Optional. When you request lists of resources, you can specify whether they are sorted in ASCENDING or DESCENDING order. Default varies by resource.", + "smithy.api#httpQuery": "order" + } + }, + "Queue": { + "target": "com.amazonaws.mediaconvert#__string", + "traits": { + "smithy.api#documentation": "Optional. Provide a queue name, or a queue ARN, to return only jobs from that queue.", + "smithy.api#httpQuery": "queue" + } + }, + "Status": { + "target": "com.amazonaws.mediaconvert#JobStatus", + "traits": { + "smithy.api#documentation": "Optional. A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR.", + "smithy.api#httpQuery": "status" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.mediaconvert#SearchJobsResponse": { + "type": "structure", + "members": { + "Jobs": { + "target": "com.amazonaws.mediaconvert#__listOfJob", + "traits": { + "smithy.api#documentation": "List of jobs.", + "smithy.api#jsonName": "jobs" + } + }, + "NextToken": { + "target": "com.amazonaws.mediaconvert#__string", + "traits": { + "smithy.api#documentation": "Use this string to request the next batch of jobs.", + "smithy.api#jsonName": "nextToken" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.mediaconvert#SimulateReservedQueue": { "type": "enum", "members": { @@ -26964,6 +27127,12 @@ "smithy.api#pattern": "^([01][0-9]|2[0-4]):[0-5][0-9]:[0-5][0-9][:;][0-9]{2}$" } }, + "com.amazonaws.mediaconvert#__stringPattern010920405090509092090909": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([01][0-9]|2[0-4]):[0-5][0-9]:[0-5][0-9][:;][0-9]{2}(@[0-9]+(\\.[0-9]+)?(:[0-9]+)?)?$" + } + }, "com.amazonaws.mediaconvert#__stringPattern01D20305D205D": { "type": "string", "traits": { diff --git a/models/medialive.json b/models/medialive.json index 08e24986a1..319afa1374 100644 --- a/models/medialive.json +++ b/models/medialive.json @@ -1545,6 +1545,13 @@ "smithy.api#documentation": "Controls how SCTE-35 messages create cues. Splice Insert mode treats all segmentation signals traditionally. With Time Signal APOS mode only Time Signal Placement Opportunity and Break messages create segment breaks. With ESAM mode, signals are forwarded to an ESAM server for possible update.", "smithy.api#jsonName": "availSettings" } + }, + "Scte35SegmentationScope": { + "target": "com.amazonaws.medialive#Scte35SegmentationScope", + "traits": { + "smithy.api#documentation": "Configures whether SCTE 35 passthrough triggers segment breaks in all output groups that use segmented outputs. Insertion of a SCTE 35 message typically results in a segment break, in addition to the regular cadence of breaks. The segment breaks appear in video outputs, audio outputs, and captions outputs (if any).\n\nALL_OUTPUT_GROUPS: Default. Insert the segment break in in all output groups that have segmented outputs. This is the legacy behavior.\nSCTE35_ENABLED_OUTPUT_GROUPS: Insert the segment break only in output groups that have SCTE 35 passthrough enabled. This is the recommended value, because it reduces unnecessary segment breaks.", + "smithy.api#jsonName": "scte35SegmentationScope" + } } }, "traits": { @@ -25567,6 +25574,26 @@ "smithy.api#documentation": "Corresponds to SCTE-35 segmentation_descriptor." } }, + "com.amazonaws.medialive#Scte35SegmentationScope": { + "type": "enum", + "members": { + "ALL_OUTPUT_GROUPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALL_OUTPUT_GROUPS" + } + }, + "SCTE35_ENABLED_OUTPUT_GROUPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SCTE35_ENABLED_OUTPUT_GROUPS" + } + } + }, + "traits": { + "smithy.api#documentation": "Scte35 Segmentation Scope" + } + }, "com.amazonaws.medialive#Scte35SpliceInsert": { "type": "structure", "members": { diff --git a/models/mediapackagev2.json b/models/mediapackagev2.json index 96dbab3844..74d29edc6f 100644 --- a/models/mediapackagev2.json +++ b/models/mediapackagev2.json @@ -180,6 +180,12 @@ "traits": { "smithy.api#documentation": "

Any descriptive information that you want to add to the channel for future identification purposes.

" } + }, + "InputType": { + "target": "com.amazonaws.mediapackagev2#InputType", + "traits": { + "smithy.api#documentation": "

The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.

\n

The allowed values are:

\n
    \n
  • \n

    \n HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments).

    \n
  • \n
  • \n

    \n CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).

    \n
  • \n
" + } } }, "traits": { @@ -388,6 +394,7 @@ "input": { "ChannelGroupName": "exampleChannelGroup", "ChannelName": "exampleChannel", + "InputType": "HLS", "Description": "Description for exampleChannel", "Tags": { "key1": "value1", @@ -401,6 +408,7 @@ "Arn": "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannel", "CreatedAt": "2022-10-18T09:36:00.00Z", "ModifiedAt": "2022-10-18T09:36:00.00Z", + "InputType": "HLS", "IngestEndpoints": [ { "Id": "1", @@ -623,6 +631,12 @@ "smithy.api#idempotencyToken": {} } }, + "InputType": { + "target": "com.amazonaws.mediapackagev2#InputType", + "traits": { + "smithy.api#documentation": "

The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.

\n

The allowed values are:

\n
    \n
  • \n

    \n HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments).

    \n
  • \n
  • \n

    \n CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).

    \n
  • \n
" + } + }, "Description": { "target": "com.amazonaws.mediapackagev2#ResourceDescription", "traits": { @@ -689,6 +703,12 @@ "IngestEndpoints": { "target": "com.amazonaws.mediapackagev2#IngestEndpointList" }, + "InputType": { + "target": "com.amazonaws.mediapackagev2#InputType", + "traits": { + "smithy.api#documentation": "

The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.

\n

The allowed values are:

\n
    \n
  • \n

    \n HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments).

    \n
  • \n
  • \n

    \n CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).

    \n
  • \n
" + } + }, "ETag": { "target": "com.amazonaws.mediapackagev2#EntityTag", "traits": { @@ -953,6 +973,14 @@ "ContainerType": "TS", "Description": "Description for exampleOriginEndpointTS", "StartoverWindowSeconds": 300, + "ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": [ + "STALE_MANIFEST", + "INCOMPLETE_MANIFEST", + "MISSING_DRM_KEY", + "SLATE_INPUT" + ] + }, "Segment": { "SegmentDurationSeconds": 6, "SegmentName": "segmentName", @@ -1040,6 +1068,14 @@ "ModifiedAt": "2022-10-18T09:36:00.00Z", "ContainerType": "TS", "StartoverWindowSeconds": 300, + "ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": [ + "STALE_MANIFEST", + "INCOMPLETE_MANIFEST", + "MISSING_DRM_KEY", + "SLATE_INPUT" + ] + }, "Segment": { "SegmentDurationSeconds": 6, "SegmentName": "segmentName", @@ -1131,6 +1167,14 @@ "OriginEndpointName": "exampleOriginEndpointCMAF", "ContainerType": "CMAF", "StartoverWindowSeconds": 300, + "ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": [ + "STALE_MANIFEST", + "INCOMPLETE_MANIFEST", + "MISSING_DRM_KEY", + "SLATE_INPUT" + ] + }, "Segment": { "SegmentDurationSeconds": 6, "SegmentName": "segmentName", @@ -1251,6 +1295,14 @@ "ModifiedAt": "2022-10-18T09:36:00.00Z", "ContainerType": "CMAF", "StartoverWindowSeconds": 300, + "ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": [ + "STALE_MANIFEST", + "INCOMPLETE_MANIFEST", + "MISSING_DRM_KEY", + "SLATE_INPUT" + ] + }, "Segment": { "SegmentDurationSeconds": 6, "SegmentName": "segmentName", @@ -1462,6 +1514,12 @@ "smithy.api#documentation": "

A DASH manifest configuration.

" } }, + "ForceEndpointErrorConfiguration": { + "target": "com.amazonaws.mediapackagev2#ForceEndpointErrorConfiguration", + "traits": { + "smithy.api#documentation": "

The failover settings for the endpoint.

" + } + }, "Tags": { "target": "com.amazonaws.mediapackagev2#TagMap", "traits": { @@ -1563,6 +1621,12 @@ "smithy.api#documentation": "

A DASH manifest configuration.

" } }, + "ForceEndpointErrorConfiguration": { + "target": "com.amazonaws.mediapackagev2#ForceEndpointErrorConfiguration", + "traits": { + "smithy.api#documentation": "

The failover settings for the endpoint.

" + } + }, "ETag": { "target": "com.amazonaws.mediapackagev2#EntityTag", "traits": { @@ -2221,6 +2285,41 @@ "smithy.api#documentation": "

The encryption type.

" } }, + "com.amazonaws.mediapackagev2#EndpointErrorCondition": { + "type": "enum", + "members": { + "STALE_MANIFEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STALE_MANIFEST" + } + }, + "INCOMPLETE_MANIFEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INCOMPLETE_MANIFEST" + } + }, + "MISSING_DRM_KEY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MISSING_DRM_KEY" + } + }, + "SLATE_INPUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SLATE_INPUT" + } + } + } + }, + "com.amazonaws.mediapackagev2#EndpointErrorConditions": { + "type": "list", + "member": { + "target": "com.amazonaws.mediapackagev2#EndpointErrorCondition" + } + }, "com.amazonaws.mediapackagev2#EntityTag": { "type": "string", "traits": { @@ -2271,6 +2370,20 @@ "smithy.api#documentation": "

Filter configuration includes settings for manifest filtering, start and end times, and time delay that apply to all of your egress requests for this manifest.

" } }, + "com.amazonaws.mediapackagev2#ForceEndpointErrorConfiguration": { + "type": "structure", + "members": { + "EndpointErrorConditions": { + "target": "com.amazonaws.mediapackagev2#EndpointErrorConditions", + "traits": { + "smithy.api#documentation": "

The failover conditions for the endpoint. The options are:

\n
    \n
  • \n

    \n STALE_MANIFEST - The manifest stalled and there are no new segments or parts.

    \n
  • \n
  • \n

    \n INCOMPLETE_MANIFEST - There is a gap in the manifest.

    \n
  • \n
  • \n

    \n MISSING_DRM_KEY - Key rotation is enabled but we're unable to fetch the key for the current key period.

    \n
  • \n
  • \n

    \n SLATE_INPUT - The segments which contain slate content are considered to be missing content.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

The failover settings for the endpoint.

" + } + }, "com.amazonaws.mediapackagev2#GetChannel": { "type": "operation", "input": { @@ -2313,6 +2426,7 @@ "Arn": "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannel", "CreatedAt": "2022-10-18T09:36:00.00Z", "ModifiedAt": "2022-10-18T09:36:00.00Z", + "InputType": "HLS", "IngestEndpoints": [ { "Id": "1", @@ -2647,6 +2761,12 @@ "IngestEndpoints": { "target": "com.amazonaws.mediapackagev2#IngestEndpointList" }, + "InputType": { + "target": "com.amazonaws.mediapackagev2#InputType", + "traits": { + "smithy.api#documentation": "

The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.

\n

The allowed values are:

\n
    \n
  • \n

    \n HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments).

    \n
  • \n
  • \n

    \n CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).

    \n
  • \n
" + } + }, "ETag": { "target": "com.amazonaws.mediapackagev2#EntityTag", "traits": { @@ -2901,6 +3021,14 @@ "ModifiedAt": "2022-10-18T09:36:00.00Z", "ContainerType": "TS", "StartoverWindowSeconds": 300, + "ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": [ + "STALE_MANIFEST", + "INCOMPLETE_MANIFEST", + "MISSING_DRM_KEY", + "SLATE_INPUT" + ] + }, "Segment": { "SegmentDurationSeconds": 6, "SegmentName": "segmentName", @@ -3225,6 +3353,18 @@ "smithy.api#documentation": "

A low-latency HLS manifest configuration.

" } }, + "DashManifests": { + "target": "com.amazonaws.mediapackagev2#GetDashManifests", + "traits": { + "smithy.api#documentation": "

A DASH manifest configuration.

" + } + }, + "ForceEndpointErrorConfiguration": { + "target": "com.amazonaws.mediapackagev2#ForceEndpointErrorConfiguration", + "traits": { + "smithy.api#documentation": "

The failover settings for the endpoint.

" + } + }, "ETag": { "target": "com.amazonaws.mediapackagev2#EntityTag", "traits": { @@ -3238,12 +3378,6 @@ "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The comma-separated list of tag key:value pairs assigned to the origin endpoint.

" } - }, - "DashManifests": { - "target": "com.amazonaws.mediapackagev2#GetDashManifests", - "traits": { - "smithy.api#documentation": "

A DASH manifest configuration.

" - } } }, "traits": { @@ -3289,6 +3423,23 @@ "smithy.api#documentation": "

The list of ingest endpoints.

" } }, + "com.amazonaws.mediapackagev2#InputType": { + "type": "enum", + "members": { + "HLS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HLS" + } + }, + "CMAF": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CMAF" + } + } + } + }, "com.amazonaws.mediapackagev2#InternalServerException": { "type": "structure", "members": { @@ -3666,6 +3817,14 @@ "CreatedAt": "2022-10-18T09:36:00.00Z", "ModifiedAt": "2022-10-18T09:36:00.00Z", "ContainerType": "TS", + "ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": [ + "STALE_MANIFEST", + "INCOMPLETE_MANIFEST", + "MISSING_DRM_KEY", + "SLATE_INPUT" + ] + }, "HlsManifests": [ { "ManifestName": "exampleManifest1", @@ -3699,6 +3858,14 @@ "CreatedAt": "2022-10-18T09:36:00.00Z", "ModifiedAt": "2022-10-18T09:36:00.00Z", "ContainerType": "CMAF", + "ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": [ + "STALE_MANIFEST", + "INCOMPLETE_MANIFEST", + "MISSING_DRM_KEY", + "SLATE_INPUT" + ] + }, "HlsManifests": [ { "ManifestName": "exampleManifest1", @@ -3972,6 +4139,12 @@ "traits": { "smithy.api#documentation": "

A DASH manifest configuration.

" } + }, + "ForceEndpointErrorConfiguration": { + "target": "com.amazonaws.mediapackagev2#ForceEndpointErrorConfiguration", + "traits": { + "smithy.api#documentation": "

The failover settings for the endpoint.

" + } } }, "traits": { @@ -4885,6 +5058,7 @@ "Arn": "arn:aws:mediapackagev2:us-west-2:123456789012:channelGroup/exampleChannelGroup/channel/exampleChannel", "CreatedAt": "2022-10-18T09:36:00.00Z", "ModifiedAt": "2022-10-18T10:36:00.00Z", + "InputType": "HLS", "IngestEndpoints": [ { "Id": "1", @@ -5151,6 +5325,12 @@ "IngestEndpoints": { "target": "com.amazonaws.mediapackagev2#IngestEndpointList" }, + "InputType": { + "target": "com.amazonaws.mediapackagev2#InputType", + "traits": { + "smithy.api#documentation": "

The input type will be an immutable field which will be used to define whether the channel will allow CMAF ingest or HLS ingest. If unprovided, it will default to HLS to preserve current behavior.

\n

The allowed values are:

\n
    \n
  • \n

    \n HLS - The HLS streaming specification (which defines M3U8 manifests and TS segments).

    \n
  • \n
  • \n

    \n CMAF - The DASH-IF CMAF Ingest specification (which defines CMAF segments with optional DASH manifests).

    \n
  • \n
" + } + }, "ETag": { "target": "com.amazonaws.mediapackagev2#EntityTag", "traits": { @@ -5215,6 +5395,14 @@ "ContainerType": "TS", "Description": "Updated description for exampleOriginEndpointTS", "StartoverWindowSeconds": 600, + "ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": [ + "STALE_MANIFEST", + "INCOMPLETE_MANIFEST", + "MISSING_DRM_KEY", + "SLATE_INPUT" + ] + }, "Segment": { "SegmentDurationSeconds": 7, "SegmentName": "segmentName2", @@ -5297,6 +5485,14 @@ "ModifiedAt": "2022-10-18T09:36:00.00Z", "ContainerType": "TS", "StartoverWindowSeconds": 600, + "ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": [ + "STALE_MANIFEST", + "INCOMPLETE_MANIFEST", + "MISSING_DRM_KEY", + "SLATE_INPUT" + ] + }, "Segment": { "SegmentDurationSeconds": 7, "SegmentName": "segmentName2", @@ -5465,6 +5661,12 @@ "smithy.api#documentation": "

A DASH manifest configuration.

" } }, + "ForceEndpointErrorConfiguration": { + "target": "com.amazonaws.mediapackagev2#ForceEndpointErrorConfiguration", + "traits": { + "smithy.api#documentation": "

The failover settings for the endpoint.

" + } + }, "ETag": { "target": "com.amazonaws.mediapackagev2#EntityTag", "traits": { @@ -5561,6 +5763,12 @@ "smithy.api#documentation": "

A low-latency HLS manifest configuration.

" } }, + "ForceEndpointErrorConfiguration": { + "target": "com.amazonaws.mediapackagev2#ForceEndpointErrorConfiguration", + "traits": { + "smithy.api#documentation": "

The failover settings for the endpoint.

" + } + }, "ETag": { "target": "com.amazonaws.mediapackagev2#EntityTag", "traits": { @@ -5877,6 +6085,18 @@ "traits": { "smithy.api#enumValue": "DRM_SIGNALING_MISMATCH_SEGMENT_ENCRYPTION_STATUS" } + }, + "ONLY_CMAF_INPUT_TYPE_ALLOW_FORCE_ENDPOINT_ERROR_CONFIGURATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ONLY_CMAF_INPUT_TYPE_ALLOW_FORCE_ENDPOINT_ERROR_CONFIGURATION" + } + }, + "SOURCE_DISRUPTIONS_ENABLED_INCORRECTLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SOURCE_DISRUPTIONS_ENABLED_INCORRECTLY" + } } } }, diff --git a/models/medical-imaging.json b/models/medical-imaging.json index 36b404b30f..4d0db1b204 100644 --- a/models/medical-imaging.json +++ b/models/medical-imaging.json @@ -770,6 +770,16 @@ "smithy.api#pattern": "^arn:aws((-us-gov)|(-iso)|(-iso-b)|(-cn))?:medical-imaging:[a-z0-9-]+:[0-9]{12}:datastore/[0-9a-z]{32}(/imageset/[0-9a-z]{32})?$" } }, + "com.amazonaws.medicalimaging#AwsAccountId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 12, + "max": 12 + }, + "smithy.api#pattern": "^\\d+$" + } + }, "com.amazonaws.medicalimaging#ClientToken": { "type": "string", "traits": { @@ -3409,7 +3419,7 @@ } ], "traits": { - "smithy.api#documentation": "

Search image sets based on defined input attributes.

\n \n

\n SearchImageSets accepts a single search \n query parameter and returns a paginated response of all image sets that have the \n matching criteria. All range queries must be input as (lowerBound, upperBound).

\n

\n SearchImageSets uses the updatedAt field for sorting \n in decreasing order from latest to oldest.

\n
", + "smithy.api#documentation": "

Search image sets based on defined input attributes.

\n \n

\n SearchImageSets accepts a single search query parameter and returns a paginated\n response of all image sets that have the matching criteria. All date range queries must be input\n as (lowerBound, upperBound).

\n

By default, SearchImageSets uses the updatedAt field for sorting \n in descending order from newest to oldest.

\n
", "smithy.api#endpoint": { "hostPrefix": "runtime-" }, @@ -3656,6 +3666,12 @@ "smithy.api#documentation": "

The output prefix of the S3 bucket to upload the results of the DICOM import job.

", "smithy.api#required": {} } + }, + "inputOwnerAccountId": { + "target": "com.amazonaws.medicalimaging#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the source S3 bucket owner.

" + } } }, "traits": { diff --git a/models/mwaa.json b/models/mwaa.json index 7e15e90fa9..8f7136ac42 100644 --- a/models/mwaa.json +++ b/models/mwaa.json @@ -1115,7 +1115,7 @@ "EnvironmentClass": { "target": "com.amazonaws.mwaa#EnvironmentClass", "traits": { - "smithy.api#documentation": "

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large. For more information, see Amazon MWAA environment class.

" + "smithy.api#documentation": "

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class.

" } }, "MaxWorkers": { @@ -1169,7 +1169,7 @@ "Schedulers": { "target": "com.amazonaws.mwaa#Schedulers", "traits": { - "smithy.api#documentation": "

The number of Apache Airflow schedulers to run in your environment. Valid values:

\n
    \n
  • \n

    v2 - Accepts between 2 to 5. Defaults to 2.

    \n
  • \n
  • \n

    v1 - Accepts 1.

    \n
  • \n
" + "smithy.api#documentation": "

The number of Apache Airflow schedulers to run in your environment. Valid values:

\n
    \n
  • \n

    v2 - Accepts between 2 to 5. Defaults to 2.

    \n
  • \n
  • \n

    v1 - Accepts 1.

    \n
  • \n
" } }, "EndpointManagement": { @@ -1177,6 +1177,18 @@ "traits": { "smithy.api#documentation": "

Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to SERVICE, Amazon MWAA will create and manage the required VPC endpoints in\n your VPC. If set to CUSTOMER, you must create, and manage, the VPC endpoints for your VPC. If you choose to create an environment in a shared VPC, you must set this value to CUSTOMER.\n In a shared VPC deployment, the environment will remain in PENDING status until you create the VPC endpoints. If you do not take action to\n create the endpoints within 72 hours, the status will change to CREATE_FAILED. You can delete the failed environment and create a new one.

" } + }, + "MinWebservers": { + "target": "com.amazonaws.mwaa#MinWebservers", + "traits": { + "smithy.api#documentation": "

\n The minimum number of web servers that you want to run in your environment.\n Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers\n when you interact with your Apache Airflow environment using Apache Airflow REST API, or\n the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease,\n Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.\n

\n

Valid values: Accepts between 2 and 5. Defaults to 2.

" + } + }, + "MaxWebservers": { + "target": "com.amazonaws.mwaa#MaxWebservers", + "traits": { + "smithy.api#documentation": "

\n The maximum number of web servers that you want to run in your environment.\n Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers\n when you interact with your Apache Airflow environment using Apache Airflow REST API, or\n the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS)\n rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease\n Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.\n

\n

Valid values: Accepts between 2 and 5. Defaults to 2.

" + } } }, "traits": { @@ -1428,7 +1440,7 @@ "AirflowVersion": { "target": "com.amazonaws.mwaa#AirflowVersion", "traits": { - "smithy.api#documentation": "

The Apache Airflow version on your environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2.

" + "smithy.api#documentation": "

The Apache Airflow version on your environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1.

" } }, "SourceBucketArn": { @@ -1488,7 +1500,7 @@ "EnvironmentClass": { "target": "com.amazonaws.mwaa#EnvironmentClass", "traits": { - "smithy.api#documentation": "

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large. For more information, see Amazon MWAA environment class.

" + "smithy.api#documentation": "

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class.

" } }, "MaxWorkers": { @@ -1568,6 +1580,18 @@ "traits": { "smithy.api#documentation": "

Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to SERVICE, Amazon MWAA will create and manage the required VPC endpoints in\n your VPC. If set to CUSTOMER, you must create, and manage, the VPC endpoints in your VPC.

" } + }, + "MinWebservers": { + "target": "com.amazonaws.mwaa#MinWebservers", + "traits": { + "smithy.api#documentation": "

\n The minimum number of web servers that you want to run in your environment.\n Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers\n when you interact with your Apache Airflow environment using Apache Airflow REST API, or\n the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease,\n Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.\n

\n

Valid values: Accepts between 2 and 5. Defaults to 2.

" + } + }, + "MaxWebservers": { + "target": "com.amazonaws.mwaa#MaxWebservers", + "traits": { + "smithy.api#documentation": "

\n The maximum number of web servers that you want to run in your environment.\n Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers\n when you interact with your Apache Airflow environment using Apache Airflow REST API, or\n the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS)\n rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease\n Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.\n

\n

Valid values: Accepts between 2 and 5. Defaults to 2.

" + } } }, "traits": { @@ -2049,6 +2073,14 @@ ] } }, + "com.amazonaws.mwaa#MaxWebservers": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 2 + } + } + }, "com.amazonaws.mwaa#MaxWorkers": { "type": "integer", "traits": { @@ -2117,6 +2149,14 @@ "smithy.api#documentation": "

\n Internal only. Collects Apache Airflow metrics. To learn more about the metrics published to Amazon CloudWatch, see Amazon MWAA performance metrics in Amazon CloudWatch.

" } }, + "com.amazonaws.mwaa#MinWebservers": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 2 + } + } + }, "com.amazonaws.mwaa#MinWorkers": { "type": "integer", "traits": { @@ -2728,7 +2768,7 @@ "AirflowVersion": { "target": "com.amazonaws.mwaa#AirflowVersion", "traits": { - "smithy.api#documentation": "

The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

\n

Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating\n your resources, see Upgrading an Amazon MWAA environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2.

" + "smithy.api#documentation": "

The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

\n

Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating\n your resources, see Upgrading an Amazon MWAA environment.

\n

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1.

" } }, "SourceBucketArn": { @@ -2788,7 +2828,7 @@ "EnvironmentClass": { "target": "com.amazonaws.mwaa#EnvironmentClass", "traits": { - "smithy.api#documentation": "

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large. For more information, see Amazon MWAA environment class.

" + "smithy.api#documentation": "

The environment class type. Valid values: mw1.small, mw1.medium, mw1.large, mw1.xlarge, and mw1.2xlarge. For more information, see Amazon MWAA environment class.

" } }, "MaxWorkers": { @@ -2832,6 +2872,18 @@ "traits": { "smithy.api#documentation": "

The number of Apache Airflow schedulers to run in your Amazon MWAA environment.

" } + }, + "MinWebservers": { + "target": "com.amazonaws.mwaa#MinWebservers", + "traits": { + "smithy.api#documentation": "

\n The minimum number of web servers that you want to run in your environment.\n Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers\n when you interact with your Apache Airflow environment using Apache Airflow REST API, or\n the Apache Airflow CLI. As the transaction-per-second rate, and the network load, decrease,\n Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.\n

\n

Valid values: Accepts between 2 and 5. Defaults to 2.

" + } + }, + "MaxWebservers": { + "target": "com.amazonaws.mwaa#MaxWebservers", + "traits": { + "smithy.api#documentation": "

\n The maximum number of web servers that you want to run in your environment.\n Amazon MWAA scales the number of Apache Airflow web servers up to the number you specify for MaxWebservers\n when you interact with your Apache Airflow environment using Apache Airflow REST API, or\n the Apache Airflow CLI. For example, in scenarios where your workload requires network calls to the Apache Airflow REST API with a high transaction-per-second (TPS)\n rate, Amazon MWAA will increase the number of web servers up to the number set in MaxWebserers. As TPS rates decrease\n Amazon MWAA disposes of the additional web servers, and scales down to the number set in MinxWebserers.\n

\n

Valid values: Accepts between 2 and 5. Defaults to 2.

" + } } } }, diff --git a/models/networkmanager.json b/models/networkmanager.json index 96701f32c9..42f5b4a817 100644 --- a/models/networkmanager.json +++ b/models/networkmanager.json @@ -603,6 +603,12 @@ "smithy.api#documentation": "

The name of the segment attachment.

" } }, + "NetworkFunctionGroupName": { + "target": "com.amazonaws.networkmanager#NetworkFunctionGroupName", + "traits": { + "smithy.api#documentation": "

The name of the network function group.

" + } + }, "Tags": { "target": "com.amazonaws.networkmanager#TagList", "traits": { @@ -615,6 +621,12 @@ "smithy.api#documentation": "

The attachment to move from one segment to another.

" } }, + "ProposedNetworkFunctionGroupChange": { + "target": "com.amazonaws.networkmanager#ProposedNetworkFunctionGroupChange", + "traits": { + "smithy.api#documentation": "

Describes a proposed change to a network function group associated with the attachment.

" + } + }, "CreatedAt": { "target": "com.amazonaws.networkmanager#DateTime", "traits": { @@ -626,12 +638,115 @@ "traits": { "smithy.api#documentation": "

The timestamp when the attachment was last updated.

" } + }, + "LastModificationErrors": { + "target": "com.amazonaws.networkmanager#AttachmentErrorList", + "traits": { + "smithy.api#documentation": "

Describes the error associated with the attachment request.

" + } } }, "traits": { "smithy.api#documentation": "

Describes a core network attachment.

" } }, + "com.amazonaws.networkmanager#AttachmentError": { + "type": "structure", + "members": { + "Code": { + "target": "com.amazonaws.networkmanager#AttachmentErrorCode", + "traits": { + "smithy.api#documentation": "

The error code for the attachment request.

" + } + }, + "Message": { + "target": "com.amazonaws.networkmanager#ServerSideString", + "traits": { + "smithy.api#documentation": "

The message associated with the error code.

" + } + }, + "ResourceArn": { + "target": "com.amazonaws.networkmanager#ResourceArn", + "traits": { + "smithy.api#documentation": "

The ARN of the requested attachment resource.

" + } + }, + "RequestId": { + "target": "com.amazonaws.networkmanager#ServerSideString", + "traits": { + "smithy.api#documentation": "

The ID of the attachment request.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the error associated with an attachment request.

" + } + }, + "com.amazonaws.networkmanager#AttachmentErrorCode": { + "type": "enum", + "members": { + "VPC_NOT_FOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VPC_NOT_FOUND" + } + }, + "SUBNET_NOT_FOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUBNET_NOT_FOUND" + } + }, + "SUBNET_DUPLICATED_IN_AVAILABILITY_ZONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUBNET_DUPLICATED_IN_AVAILABILITY_ZONE" + } + }, + "SUBNET_NO_FREE_ADDRESSES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUBNET_NO_FREE_ADDRESSES" + } + }, + "SUBNET_UNSUPPORTED_AVAILABILITY_ZONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUBNET_UNSUPPORTED_AVAILABILITY_ZONE" + } + }, + "SUBNET_NO_IPV6_CIDRS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUBNET_NO_IPV6_CIDRS" + } + }, + "VPN_CONNECTION_NOT_FOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VPN_CONNECTION_NOT_FOUND" + } + }, + "MAXIMUM_NO_ENCAP_LIMIT_EXCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MAXIMUM_NO_ENCAP_LIMIT_EXCEEDED" + } + } + } + }, + "com.amazonaws.networkmanager#AttachmentErrorList": { + "type": "list", + "member": { + "target": "com.amazonaws.networkmanager#AttachmentError" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, "com.amazonaws.networkmanager#AttachmentId": { "type": "string", "traits": { @@ -878,6 +993,12 @@ "smithy.api#enumValue": "CORE_NETWORK_SEGMENT" } }, + "NETWORK_FUNCTION_GROUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NETWORK_FUNCTION_GROUP" + } + }, "CORE_NETWORK_EDGE": { "target": "smithy.api#Unit", "traits": { @@ -1062,7 +1183,13 @@ "SubnetArn": { "target": "com.amazonaws.networkmanager#SubnetArn", "traits": { - "smithy.api#documentation": "

The subnet ARN for the Connect peer.

" + "smithy.api#documentation": "

The subnet ARN for the Connect peer. This only applies only when the protocol is NO_ENCAP.

" + } + }, + "LastModificationErrors": { + "target": "com.amazonaws.networkmanager#ConnectPeerErrorList", + "traits": { + "smithy.api#documentation": "

Describes the error associated with the attachment request.

" } } }, @@ -1219,6 +1346,91 @@ "smithy.api#documentation": "

Describes a core network Connect peer configuration.

" } }, + "com.amazonaws.networkmanager#ConnectPeerError": { + "type": "structure", + "members": { + "Code": { + "target": "com.amazonaws.networkmanager#ConnectPeerErrorCode", + "traits": { + "smithy.api#documentation": "

The error code for the Connect peer request.

" + } + }, + "Message": { + "target": "com.amazonaws.networkmanager#ServerSideString", + "traits": { + "smithy.api#documentation": "

The message associated with the error code.

" + } + }, + "ResourceArn": { + "target": "com.amazonaws.networkmanager#ResourceArn", + "traits": { + "smithy.api#documentation": "

The ARN of the requested Connect peer resource.

" + } + }, + "RequestId": { + "target": "com.amazonaws.networkmanager#ServerSideString", + "traits": { + "smithy.api#documentation": "

The ID of the Connect peer request.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes an error associated with a Connect peer request

" + } + }, + "com.amazonaws.networkmanager#ConnectPeerErrorCode": { + "type": "enum", + "members": { + "EDGE_LOCATION_NO_FREE_IPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EDGE_LOCATION_NO_FREE_IPS" + } + }, + "EDGE_LOCATION_PEER_DUPLICATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EDGE_LOCATION_PEER_DUPLICATE" + } + }, + "SUBNET_NOT_FOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUBNET_NOT_FOUND" + } + }, + "IP_OUTSIDE_SUBNET_CIDR_RANGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IP_OUTSIDE_SUBNET_CIDR_RANGE" + } + }, + "INVALID_INSIDE_CIDR_BLOCK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_INSIDE_CIDR_BLOCK" + } + }, + "NO_ASSOCIATED_CIDR_BLOCK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NO_ASSOCIATED_CIDR_BLOCK" + } + } + } + }, + "com.amazonaws.networkmanager#ConnectPeerErrorList": { + "type": "list", + "member": { + "target": "com.amazonaws.networkmanager#ConnectPeerError" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, "com.amazonaws.networkmanager#ConnectPeerId": { "type": "string", "traits": { @@ -1582,6 +1794,12 @@ "smithy.api#documentation": "

The segments within a core network.

" } }, + "NetworkFunctionGroups": { + "target": "com.amazonaws.networkmanager#CoreNetworkNetworkFunctionGroupList", + "traits": { + "smithy.api#documentation": "

The network function groups associated with a core network.

" + } + }, "Edges": { "target": "com.amazonaws.networkmanager#CoreNetworkEdgeList", "traits": { @@ -1718,6 +1936,12 @@ "smithy.api#documentation": "

The segment name if the change event is associated with a segment.

" } }, + "NetworkFunctionGroupName": { + "target": "com.amazonaws.networkmanager#ConstrainedString", + "traits": { + "smithy.api#documentation": "

The changed network function group name.

" + } + }, "AttachmentId": { "target": "com.amazonaws.networkmanager#AttachmentId", "traits": { @@ -1750,6 +1974,12 @@ "smithy.api#documentation": "

The names of the segments in a core network.

" } }, + "NetworkFunctionGroupName": { + "target": "com.amazonaws.networkmanager#ConstrainedString", + "traits": { + "smithy.api#documentation": "

The network function group name if the change event is associated with a network function group.

" + } + }, "EdgeLocations": { "target": "com.amazonaws.networkmanager#ExternalRegionCodeList", "traits": { @@ -1785,6 +2015,12 @@ "traits": { "smithy.api#documentation": "

The shared segments for a core network change value.

" } + }, + "ServiceInsertionActions": { + "target": "com.amazonaws.networkmanager#ServiceInsertionActionList", + "traits": { + "smithy.api#documentation": "

Describes the service insertion action.

" + } } }, "traits": { @@ -1833,6 +2069,64 @@ "smithy.api#pattern": "^core-network-([0-9a-f]{8,17})$" } }, + "com.amazonaws.networkmanager#CoreNetworkNetworkFunctionGroup": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.networkmanager#ConstrainedString", + "traits": { + "smithy.api#documentation": "

The name of the network function group.

" + } + }, + "EdgeLocations": { + "target": "com.amazonaws.networkmanager#ExternalRegionCodeList", + "traits": { + "smithy.api#documentation": "

The core network edge locations.

" + } + }, + "Segments": { + "target": "com.amazonaws.networkmanager#ServiceInsertionSegments", + "traits": { + "smithy.api#documentation": "

The segments associated with the network function group.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes a network function group.

" + } + }, + "com.amazonaws.networkmanager#CoreNetworkNetworkFunctionGroupIdentifier": { + "type": "structure", + "members": { + "CoreNetworkId": { + "target": "com.amazonaws.networkmanager#CoreNetworkId", + "traits": { + "smithy.api#documentation": "

The ID of the core network.

" + } + }, + "NetworkFunctionGroupName": { + "target": "com.amazonaws.networkmanager#ConstrainedString", + "traits": { + "smithy.api#documentation": "

The network function group name.

" + } + }, + "EdgeLocation": { + "target": "com.amazonaws.networkmanager#ExternalRegionCode", + "traits": { + "smithy.api#documentation": "

The location for the core network edge.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes a core network

" + } + }, + "com.amazonaws.networkmanager#CoreNetworkNetworkFunctionGroupList": { + "type": "list", + "member": { + "target": "com.amazonaws.networkmanager#CoreNetworkNetworkFunctionGroup" + } + }, "com.amazonaws.networkmanager#CoreNetworkPolicy": { "type": "structure", "members": { @@ -2315,7 +2609,7 @@ "CoreNetworkAddress": { "target": "com.amazonaws.networkmanager#IPAddress", "traits": { - "smithy.api#documentation": "

A Connect peer core network address.

" + "smithy.api#documentation": "

A Connect peer core network address. This only applies only when the protocol is GRE.

" } }, "PeerAddress": { @@ -2328,7 +2622,7 @@ "BgpOptions": { "target": "com.amazonaws.networkmanager#BgpOptions", "traits": { - "smithy.api#documentation": "

The Connect peer BGP options.

" + "smithy.api#documentation": "

The Connect peer BGP options. This only applies only when the protocol is GRE.

" } }, "InsideCidrBlocks": { @@ -2353,7 +2647,7 @@ "SubnetArn": { "target": "com.amazonaws.networkmanager#SubnetArn", "traits": { - "smithy.api#documentation": "

The subnet ARN for the Connect peer.

" + "smithy.api#documentation": "

The subnet ARN for the Connect peer. This only applies only when the protocol is NO_ENCAP.

" } } }, @@ -4806,6 +5100,38 @@ "smithy.api#output": {} } }, + "com.amazonaws.networkmanager#EdgeOverride": { + "type": "structure", + "members": { + "EdgeSets": { + "target": "com.amazonaws.networkmanager#EdgeSetList", + "traits": { + "smithy.api#documentation": "

The list of edge locations.

" + } + }, + "UseEdge": { + "target": "com.amazonaws.networkmanager#ConstrainedString", + "traits": { + "smithy.api#documentation": "

The edge that should be used when overriding the current edge order.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the edge that's used for the override.

" + } + }, + "com.amazonaws.networkmanager#EdgeSet": { + "type": "list", + "member": { + "target": "com.amazonaws.networkmanager#ConstrainedString" + } + }, + "com.amazonaws.networkmanager#EdgeSetList": { + "type": "list", + "member": { + "target": "com.amazonaws.networkmanager#EdgeSet" + } + }, "com.amazonaws.networkmanager#ExceptionContextKey": { "type": "string" }, @@ -6087,7 +6413,7 @@ "ResourceType": { "target": "com.amazonaws.networkmanager#ConstrainedString", "traits": { - "smithy.api#documentation": "

The resource type.

\n

The following are the supported resource types for Direct Connect:

\n
    \n
  • \n

    \n dxcon\n

    \n
  • \n
  • \n

    \n dx-gateway\n

    \n
  • \n
  • \n

    \n dx-vif\n

    \n
  • \n
\n

The following are the supported resource types for Network Manager:

\n
    \n
  • \n

    \n connection\n

    \n
  • \n
  • \n

    \n device\n

    \n
  • \n
  • \n

    \n link\n

    \n
  • \n
  • \n

    \n site\n

    \n
  • \n
\n

The following are the supported resource types for Amazon VPC:

\n
    \n
  • \n

    \n customer-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway-attachment\n

    \n
  • \n
  • \n

    \n transit-gateway-connect-peer\n

    \n
  • \n
  • \n

    \n transit-gateway-route-table\n

    \n
  • \n
  • \n

    \n vpn-connection\n

    \n
  • \n
", + "smithy.api#documentation": "

The resource type.

\n

The following are the supported resource types for Direct Connect:

\n
    \n
  • \n

    \n dxcon\n

    \n
  • \n
  • \n

    \n dx-gateway\n

    \n
  • \n
  • \n

    \n dx-vif\n

    \n
  • \n
\n

The following are the supported resource types for Network Manager:

\n
    \n
  • \n

    \n attachment\n

    \n
  • \n
  • \n

    \n connect-peer\n

    \n
  • \n
  • \n

    \n connection\n

    \n
  • \n
  • \n

    \n core-network\n

    \n
  • \n
  • \n

    \n device\n

    \n
  • \n
  • \n

    \n link\n

    \n
  • \n
  • \n

    \n peering\n

    \n
  • \n
  • \n

    \n site\n

    \n
  • \n
\n

The following are the supported resource types for Amazon VPC:

\n
    \n
  • \n

    \n customer-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway-attachment\n

    \n
  • \n
  • \n

    \n transit-gateway-connect-peer\n

    \n
  • \n
  • \n

    \n transit-gateway-route-table\n

    \n
  • \n
  • \n

    \n vpn-connection\n

    \n
  • \n
", "smithy.api#httpQuery": "resourceType" } }, @@ -6212,7 +6538,7 @@ "ResourceType": { "target": "com.amazonaws.networkmanager#ConstrainedString", "traits": { - "smithy.api#documentation": "

The resource type.

\n

The following are the supported resource types for Direct Connect:

\n
    \n
  • \n

    \n dxcon\n

    \n
  • \n
  • \n

    \n dx-gateway\n

    \n
  • \n
  • \n

    \n dx-vif\n

    \n
  • \n
\n

The following are the supported resource types for Network Manager:

\n
    \n
  • \n

    \n connection\n

    \n
  • \n
  • \n

    \n device\n

    \n
  • \n
  • \n

    \n link\n

    \n
  • \n
  • \n

    \n site\n

    \n
  • \n
\n

The following are the supported resource types for Amazon VPC:

\n
    \n
  • \n

    \n customer-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway-attachment\n

    \n
  • \n
  • \n

    \n transit-gateway-connect-peer\n

    \n
  • \n
  • \n

    \n transit-gateway-route-table\n

    \n
  • \n
  • \n

    \n vpn-connection\n

    \n
  • \n
", + "smithy.api#documentation": "

The resource type.

\n

The following are the supported resource types for Direct Connect:

\n
    \n
  • \n

    \n dxcon\n

    \n
  • \n
  • \n

    \n dx-gateway\n

    \n
  • \n
  • \n

    \n dx-vif\n

    \n
  • \n
\n

The following are the supported resource types for Network Manager:

\n
    \n
  • \n

    \n attachment\n

    \n
  • \n
  • \n

    \n connect-peer\n

    \n
  • \n
  • \n

    \n connection\n

    \n
  • \n
  • \n

    \n core-network\n

    \n
  • \n
  • \n

    \n device\n

    \n
  • \n
  • \n

    \n link\n

    \n
  • \n
  • \n

    \n peering\n

    \n
  • \n
  • \n

    \n site\n

    \n
  • \n
\n

The following are the supported resource types for Amazon VPC:

\n
    \n
  • \n

    \n customer-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway-attachment\n

    \n
  • \n
  • \n

    \n transit-gateway-connect-peer\n

    \n
  • \n
  • \n

    \n transit-gateway-route-table\n

    \n
  • \n
  • \n

    \n vpn-connection\n

    \n
  • \n
", "smithy.api#httpQuery": "resourceType" } }, @@ -6344,7 +6670,7 @@ "ResourceType": { "target": "com.amazonaws.networkmanager#ConstrainedString", "traits": { - "smithy.api#documentation": "

The resource type.

\n

The following are the supported resource types for Direct Connect:

\n \n

The following are the supported resource types for Network Manager:

\n
    \n
  • \n

    \n connection - The definition model is\n Connection.

    \n
  • \n
  • \n

    \n device - The definition model is\n Device.

    \n
  • \n
  • \n

    \n link - The definition model is\n Link.

    \n
  • \n
  • \n

    \n site - The definition model is\n Site.

    \n
  • \n
\n

The following are the supported resource types for Amazon VPC:

\n ", + "smithy.api#documentation": "

The resource type.

\n

The following are the supported resource types for Direct Connect:

\n
    \n
  • \n

    \n dxcon\n

    \n
  • \n
  • \n

    \n dx-gateway\n

    \n
  • \n
  • \n

    \n dx-vif\n

    \n
  • \n
\n

The following are the supported resource types for Network Manager:

\n
    \n
  • \n

    \n attachment\n

    \n
  • \n
  • \n

    \n connect-peer\n

    \n
  • \n
  • \n

    \n connection\n

    \n
  • \n
  • \n

    \n core-network\n

    \n
  • \n
  • \n

    \n device\n

    \n
  • \n
  • \n

    \n link\n

    \n
  • \n
  • \n

    \n peering\n

    \n
  • \n
  • \n

    \n site\n

    \n
  • \n
\n

The following are the supported resource types for Amazon VPC:

\n
    \n
  • \n

    \n customer-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway-attachment\n

    \n
  • \n
  • \n

    \n transit-gateway-connect-peer\n

    \n
  • \n
  • \n

    \n transit-gateway-route-table\n

    \n
  • \n
  • \n

    \n vpn-connection\n

    \n
  • \n
", "smithy.api#httpQuery": "resourceType" } }, @@ -6619,7 +6945,7 @@ "ResourceType": { "target": "com.amazonaws.networkmanager#ConstrainedString", "traits": { - "smithy.api#documentation": "

The resource type.

\n

The following are the supported resource types for Direct Connect:

\n
    \n
  • \n

    \n dxcon\n

    \n
  • \n
  • \n

    \n dx-gateway\n

    \n
  • \n
  • \n

    \n dx-vif\n

    \n
  • \n
\n

The following are the supported resource types for Network Manager:

\n
    \n
  • \n

    \n connection\n

    \n
  • \n
  • \n

    \n device\n

    \n
  • \n
  • \n

    \n link\n

    \n
  • \n
  • \n

    \n site\n

    \n
  • \n
\n

The following are the supported resource types for Amazon VPC:

\n
    \n
  • \n

    \n customer-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway-attachment\n

    \n
  • \n
  • \n

    \n transit-gateway-connect-peer\n

    \n
  • \n
  • \n

    \n transit-gateway-route-table\n

    \n
  • \n
  • \n

    \n vpn-connection\n

    \n
  • \n
", + "smithy.api#documentation": "

The resource type. The following are the supported resource types:

\n
    \n
  • \n

    \n connect-peer\n

    \n
  • \n
  • \n

    \n transit-gateway-connect-peer\n

    \n
  • \n
  • \n

    \n vpn-connection\n

    \n
  • \n
", "smithy.api#httpQuery": "resourceType" } }, @@ -8333,6 +8659,32 @@ } } }, + "com.amazonaws.networkmanager#NetworkFunctionGroup": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.networkmanager#ConstrainedString", + "traits": { + "smithy.api#documentation": "

The name of the network function group.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes a network function group for service insertion.

" + } + }, + "com.amazonaws.networkmanager#NetworkFunctionGroupList": { + "type": "list", + "member": { + "target": "com.amazonaws.networkmanager#NetworkFunctionGroup" + } + }, + "com.amazonaws.networkmanager#NetworkFunctionGroupName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, "com.amazonaws.networkmanager#NetworkManager": { "type": "service", "version": "2019-07-05", @@ -9431,7 +9783,7 @@ "ResourceType": { "target": "com.amazonaws.networkmanager#ConstrainedString", "traits": { - "smithy.api#documentation": "

The resource type.

\n

The following are the supported resource types for Direct Connect:

\n
    \n
  • \n

    \n dxcon\n

    \n
  • \n
  • \n

    \n dx-gateway\n

    \n
  • \n
  • \n

    \n dx-vif\n

    \n
  • \n
\n

The following are the supported resource types for Network Manager:

\n
    \n
  • \n

    \n connection\n

    \n
  • \n
  • \n

    \n device\n

    \n
  • \n
  • \n

    \n link\n

    \n
  • \n
  • \n

    \n site\n

    \n
  • \n
\n

The following are the supported resource types for Amazon VPC:

\n
    \n
  • \n

    \n customer-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway-attachment\n

    \n
  • \n
  • \n

    \n transit-gateway-connect-peer\n

    \n
  • \n
  • \n

    \n transit-gateway-route-table\n

    \n
  • \n
  • \n

    \n vpn-connection\n

    \n
  • \n
" + "smithy.api#documentation": "

The resource type.

\n

The following are the supported resource types for Direct Connect:

\n
    \n
  • \n

    \n dxcon\n

    \n
  • \n
  • \n

    \n dx-gateway\n

    \n
  • \n
  • \n

    \n dx-vif\n

    \n
  • \n
\n

The following are the supported resource types for Network Manager:

\n
    \n
  • \n

    \n attachment\n

    \n
  • \n
  • \n

    \n connect-peer\n

    \n
  • \n
  • \n

    \n connection\n

    \n
  • \n
  • \n

    \n core-network\n

    \n
  • \n
  • \n

    \n device\n

    \n
  • \n
  • \n

    \n link\n

    \n
  • \n
  • \n

    \n peering\n

    \n
  • \n
  • \n

    \n site\n

    \n
  • \n
\n

The following are the supported resource types for Amazon VPC:

\n
    \n
  • \n

    \n customer-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway\n

    \n
  • \n
  • \n

    \n transit-gateway-attachment\n

    \n
  • \n
  • \n

    \n transit-gateway-connect-peer\n

    \n
  • \n
  • \n

    \n transit-gateway-route-table\n

    \n
  • \n
  • \n

    \n vpn-connection\n

    \n
  • \n
" } }, "ResourceId": { @@ -9620,6 +9972,12 @@ "smithy.api#documentation": "

The name of the segment.

" } }, + "NetworkFunctionGroupName": { + "target": "com.amazonaws.networkmanager#ConstrainedString", + "traits": { + "smithy.api#documentation": "

The network function group name associated with the destination.

" + } + }, "EdgeLocation": { "target": "com.amazonaws.networkmanager#ExternalRegionCode", "traits": { @@ -9879,12 +10237,109 @@ "traits": { "smithy.api#documentation": "

The timestamp when the attachment peer was created.

" } + }, + "LastModificationErrors": { + "target": "com.amazonaws.networkmanager#PeeringErrorList", + "traits": { + "smithy.api#documentation": "

Describes the error associated with the Connect peer request.

" + } } }, "traits": { "smithy.api#documentation": "

Describes a peering connection.

" } }, + "com.amazonaws.networkmanager#PeeringError": { + "type": "structure", + "members": { + "Code": { + "target": "com.amazonaws.networkmanager#PeeringErrorCode", + "traits": { + "smithy.api#documentation": "

The error code for the peering request.

" + } + }, + "Message": { + "target": "com.amazonaws.networkmanager#ServerSideString", + "traits": { + "smithy.api#documentation": "

The message associated with the error code.

" + } + }, + "ResourceArn": { + "target": "com.amazonaws.networkmanager#ResourceArn", + "traits": { + "smithy.api#documentation": "

The ARN of the requested peering resource.

" + } + }, + "RequestId": { + "target": "com.amazonaws.networkmanager#ServerSideString", + "traits": { + "smithy.api#documentation": "

The ID of the Peering request.

" + } + }, + "MissingPermissionsContext": { + "target": "com.amazonaws.networkmanager#PermissionsErrorContext", + "traits": { + "smithy.api#documentation": "

Provides additional information about missing permissions for the peering\n error.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes an error associated with a peering request.

" + } + }, + "com.amazonaws.networkmanager#PeeringErrorCode": { + "type": "enum", + "members": { + "TRANSIT_GATEWAY_NOT_FOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TRANSIT_GATEWAY_NOT_FOUND" + } + }, + "TRANSIT_GATEWAY_PEERS_LIMIT_EXCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TRANSIT_GATEWAY_PEERS_LIMIT_EXCEEDED" + } + }, + "MISSING_REQUIRED_PERMISSIONS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MISSING_PERMISSIONS" + } + }, + "INTERNAL_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNAL_ERROR" + } + }, + "EDGE_LOCATION_PEER_DUPLICATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EDGE_LOCATION_PEER_DUPLICATE" + } + }, + "INVALID_TRANSIT_GATEWAY_STATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_TRANSIT_GATEWAY_STATE" + } + } + } + }, + "com.amazonaws.networkmanager#PeeringErrorList": { + "type": "list", + "member": { + "target": "com.amazonaws.networkmanager#PeeringError" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, "com.amazonaws.networkmanager#PeeringId": { "type": "string", "traits": { @@ -9941,6 +10396,46 @@ } } }, + "com.amazonaws.networkmanager#PermissionsErrorContext": { + "type": "structure", + "members": { + "MissingPermission": { + "target": "com.amazonaws.networkmanager#ServerSideString", + "traits": { + "smithy.api#documentation": "

The missing permissions.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes additional information about missing permissions.

" + } + }, + "com.amazonaws.networkmanager#ProposedNetworkFunctionGroupChange": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.networkmanager#TagList", + "traits": { + "smithy.api#documentation": "

The list of proposed changes to the key-value tags associated with the network function group.

" + } + }, + "AttachmentPolicyRuleNumber": { + "target": "com.amazonaws.networkmanager#Integer", + "traits": { + "smithy.api#documentation": "

The proposed new attachment policy rule number for the network function group.

" + } + }, + "NetworkFunctionGroupName": { + "target": "com.amazonaws.networkmanager#ConstrainedString", + "traits": { + "smithy.api#documentation": "

The proposed name change for the network function group name.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes proposed changes to a network function group.

" + } + }, "com.amazonaws.networkmanager#ProposedSegmentChange": { "type": "structure", "members": { @@ -10766,6 +11261,12 @@ "traits": { "smithy.api#documentation": "

The segment edge in a core network.

" } + }, + "CoreNetworkNetworkFunctionGroup": { + "target": "com.amazonaws.networkmanager#CoreNetworkNetworkFunctionGroupIdentifier", + "traits": { + "smithy.api#documentation": "

The route table identifier associated with the network function group.

" + } } }, "traits": { @@ -10786,6 +11287,12 @@ "traits": { "smithy.api#enumValue": "CORE_NETWORK_SEGMENT" } + }, + "NETWORK_FUNCTION_GROUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NETWORK_FUNCTION_GROUP" + } } } }, @@ -10821,6 +11328,40 @@ } } }, + "com.amazonaws.networkmanager#SegmentActionServiceInsertion": { + "type": "enum", + "members": { + "SEND_VIA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "send-via" + } + }, + "SEND_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "send-to" + } + } + } + }, + "com.amazonaws.networkmanager#SendViaMode": { + "type": "enum", + "members": { + "DUAL_HOP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "dual-hop" + } + }, + "SINGLE_HOP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "single-hop" + } + } + } + }, "com.amazonaws.networkmanager#ServerSideString": { "type": "string", "traits": { @@ -10831,6 +11372,64 @@ "smithy.api#pattern": "^[\\s\\S]*$" } }, + "com.amazonaws.networkmanager#ServiceInsertionAction": { + "type": "structure", + "members": { + "Action": { + "target": "com.amazonaws.networkmanager#SegmentActionServiceInsertion", + "traits": { + "smithy.api#documentation": "

The action the service insertion takes for traffic. \n send-via sends east-west traffic between attachments. \n send-to sends north-south traffic to the \n security appliance, and then from that to either the Internet or to an on-premesis \n location.

" + } + }, + "Mode": { + "target": "com.amazonaws.networkmanager#SendViaMode", + "traits": { + "smithy.api#documentation": "

Describes the mode packets take for the send-via action. This is not used when the action is send-to. dual-hop packets traverse attachments in both the source to the destination core network edges. This mode requires that an inspection attachment must be present in all Regions of the service insertion-enabled segments. \n For single-hop, packets traverse a single intermediate inserted attachment. You can use EdgeOverride to specify a specific edge to use.

" + } + }, + "WhenSentTo": { + "target": "com.amazonaws.networkmanager#WhenSentTo", + "traits": { + "smithy.api#documentation": "

The list of destination segments if the service insertion action is send-via.

" + } + }, + "Via": { + "target": "com.amazonaws.networkmanager#Via", + "traits": { + "smithy.api#documentation": "

The list of network function groups and any edge overrides for the chosen service\n insertion action. Used for both send-to or send-via.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the action that the service insertion will take for any segments associated with it.

" + } + }, + "com.amazonaws.networkmanager#ServiceInsertionActionList": { + "type": "list", + "member": { + "target": "com.amazonaws.networkmanager#ServiceInsertionAction" + } + }, + "com.amazonaws.networkmanager#ServiceInsertionSegments": { + "type": "structure", + "members": { + "SendVia": { + "target": "com.amazonaws.networkmanager#ConstrainedStringList", + "traits": { + "smithy.api#documentation": "

The list of segments associated with the send-via action.

" + } + }, + "SendTo": { + "target": "com.amazonaws.networkmanager#ConstrainedStringList", + "traits": { + "smithy.api#documentation": "

The list of segments associated with the send-to action.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the segments associated with the service insertion action.

" + } + }, "com.amazonaws.networkmanager#ServiceQuotaExceededException": { "type": "structure", "members": { @@ -12527,6 +13126,26 @@ } } }, + "com.amazonaws.networkmanager#Via": { + "type": "structure", + "members": { + "NetworkFunctionGroups": { + "target": "com.amazonaws.networkmanager#NetworkFunctionGroupList", + "traits": { + "smithy.api#documentation": "

The list of network function groups associated with the service insertion action.

" + } + }, + "WithEdgeOverrides": { + "target": "com.amazonaws.networkmanager#WithEdgeOverridesList", + "traits": { + "smithy.api#documentation": "

Describes any edge overrides. An edge override is a specific edge to be used for traffic.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The list of network function groups and edge overrides for the service insertion\n action. Used for both the send-to and send-via actions.

" + } + }, "com.amazonaws.networkmanager#VpcArn": { "type": "string", "traits": { @@ -12594,6 +13213,32 @@ }, "smithy.api#pattern": "^arn:[^:]{1,63}:ec2:[^:]{0,63}:[^:]{0,63}:vpn-connection\\/vpn-[0-9a-f]{8,17}$" } + }, + "com.amazonaws.networkmanager#WhenSentTo": { + "type": "structure", + "members": { + "WhenSentToSegmentsList": { + "target": "com.amazonaws.networkmanager#WhenSentToSegmentsList", + "traits": { + "smithy.api#documentation": "

The list of destination segments when the service insertion action is send-to.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Displays a list of the destination segments. Used only when the service insertion\n action is send-to.

" + } + }, + "com.amazonaws.networkmanager#WhenSentToSegmentsList": { + "type": "list", + "member": { + "target": "com.amazonaws.networkmanager#ConstrainedString" + } + }, + "com.amazonaws.networkmanager#WithEdgeOverridesList": { + "type": "list", + "member": { + "target": "com.amazonaws.networkmanager#EdgeOverride" + } } } } diff --git a/models/opensearch.json b/models/opensearch.json index 078f76fc48..a04daec615 100644 --- a/models/opensearch.json +++ b/models/opensearch.json @@ -29,6 +29,51 @@ ] }, "shapes": { + "com.amazonaws.opensearch#AIMLOptionsInput": { + "type": "structure", + "members": { + "NaturalLanguageQueryGenerationOptions": { + "target": "com.amazonaws.opensearch#NaturalLanguageQueryGenerationOptionsInput", + "traits": { + "smithy.api#documentation": "

Container for parameters required for natural language query generation on the specified domain.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Container for parameters required to enable all machine learning features.

" + } + }, + "com.amazonaws.opensearch#AIMLOptionsOutput": { + "type": "structure", + "members": { + "NaturalLanguageQueryGenerationOptions": { + "target": "com.amazonaws.opensearch#NaturalLanguageQueryGenerationOptionsOutput", + "traits": { + "smithy.api#documentation": "

Container for parameters required for natural language query generation on the specified domain.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Container for parameters representing the state of machine learning features on the specified domain.

" + } + }, + "com.amazonaws.opensearch#AIMLOptionsStatus": { + "type": "structure", + "members": { + "Options": { + "target": "com.amazonaws.opensearch#AIMLOptionsOutput", + "traits": { + "smithy.api#documentation": "

Machine learning options on the specified domain.

" + } + }, + "Status": { + "target": "com.amazonaws.opensearch#OptionStatus" + } + }, + "traits": { + "smithy.api#documentation": "

The status of machine learning options on the specified domain.

" + } + }, "com.amazonaws.opensearch#ARN": { "type": "string", "traits": { @@ -485,6 +530,12 @@ "smithy.api#documentation": "

Container for information about the SAML configuration for OpenSearch Dashboards.

" } }, + "JWTOptions": { + "target": "com.amazonaws.opensearch#JWTOptionsOutput", + "traits": { + "smithy.api#documentation": "

Container for information about the JWT configuration of the Amazon OpenSearch Service.

" + } + }, "AnonymousAuthDisableDate": { "target": "com.amazonaws.opensearch#DisableTimestamp", "traits": { @@ -529,6 +580,12 @@ "smithy.api#documentation": "

Container for information about the SAML configuration for OpenSearch Dashboards.

" } }, + "JWTOptions": { + "target": "com.amazonaws.opensearch#JWTOptionsInput", + "traits": { + "smithy.api#documentation": "

Container for information about the JWT configuration of the Amazon OpenSearch Service.

" + } + }, "AnonymousAuthEnabled": { "target": "com.amazonaws.opensearch#Boolean", "traits": { @@ -3306,6 +3363,12 @@ "traits": { "smithy.api#documentation": "

Software update options for the domain.

" } + }, + "AIMLOptions": { + "target": "com.amazonaws.opensearch#AIMLOptionsInput", + "traits": { + "smithy.api#documentation": "

Options for all machine learning features for the specified domain.

" + } } }, "traits": { @@ -3669,6 +3732,12 @@ "traits": { "smithy.api#documentation": "

A description of the data source.

" } + }, + "Status": { + "target": "com.amazonaws.opensearch#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source.

" + } } }, "traits": { @@ -3691,6 +3760,23 @@ "smithy.api#pattern": "^[a-z][a-z0-9_]+$" } }, + "com.amazonaws.opensearch#DataSourceStatus": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.opensearch#DataSourceType": { "type": "union", "members": { @@ -5675,6 +5761,12 @@ "traits": { "smithy.api#documentation": "

Information about the domain properties that are currently being modified.

" } + }, + "AIMLOptions": { + "target": "com.amazonaws.opensearch#AIMLOptionsStatus", + "traits": { + "smithy.api#documentation": "

Container for parameters required to enable all machine learning features.

" + } } }, "traits": { @@ -6210,7 +6302,7 @@ "DomainEndpointV2HostedZoneId": { "target": "com.amazonaws.opensearch#HostedZoneId", "traits": { - "smithy.api#documentation": "

The DualStack Hosted Zone Id for the domain.

" + "smithy.api#documentation": "

The dual stack hosted zone ID for the domain.

" } }, "Processing": { @@ -6351,6 +6443,12 @@ "traits": { "smithy.api#documentation": "

Information about the domain properties that are currently being modified.

" } + }, + "AIMLOptions": { + "target": "com.amazonaws.opensearch#AIMLOptionsOutput", + "traits": { + "smithy.api#documentation": "

Container for parameters required to enable all machine learning features.

" + } } }, "traits": { @@ -6856,6 +6954,12 @@ "traits": { "smithy.api#documentation": "

A description of the data source.

" } + }, + "Status": { + "target": "com.amazonaws.opensearch#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source.

" + } } }, "traits": { @@ -7612,6 +7716,70 @@ "target": "com.amazonaws.opensearch#Issue" } }, + "com.amazonaws.opensearch#JWTOptionsInput": { + "type": "structure", + "members": { + "Enabled": { + "target": "com.amazonaws.opensearch#Boolean", + "traits": { + "smithy.api#documentation": "

True to enable JWT authentication and authorization for a domain.

" + } + }, + "SubjectKey": { + "target": "com.amazonaws.opensearch#SubjectKey", + "traits": { + "smithy.api#documentation": "

Element of the JWT assertion to use for the user name.

" + } + }, + "RolesKey": { + "target": "com.amazonaws.opensearch#RolesKey", + "traits": { + "smithy.api#documentation": "

Element of the JWT assertion to use for roles.

" + } + }, + "PublicKey": { + "target": "com.amazonaws.opensearch#String", + "traits": { + "smithy.api#documentation": "

Element of the JWT assertion used by the cluster to verify JWT signatures.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The JWT authentication and authorization configuration for an Amazon OpenSearch Service domain.

" + } + }, + "com.amazonaws.opensearch#JWTOptionsOutput": { + "type": "structure", + "members": { + "Enabled": { + "target": "com.amazonaws.opensearch#Boolean", + "traits": { + "smithy.api#documentation": "

True if JWT use is enabled.

" + } + }, + "SubjectKey": { + "target": "com.amazonaws.opensearch#String", + "traits": { + "smithy.api#documentation": "

The key used for matching the JWT subject attribute.

" + } + }, + "RolesKey": { + "target": "com.amazonaws.opensearch#String", + "traits": { + "smithy.api#documentation": "

The key used for matching the JWT roles attribute.

" + } + }, + "PublicKey": { + "target": "com.amazonaws.opensearch#String", + "traits": { + "smithy.api#documentation": "

The key used to verify the signature of incoming JWT requests.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the JWT options configured for the domain.

" + } + }, "com.amazonaws.opensearch#KmsKeyId": { "type": "string", "traits": { @@ -8931,6 +9099,104 @@ "target": "com.amazonaws.opensearch#ModifyingProperties" } }, + "com.amazonaws.opensearch#NaturalLanguageQueryGenerationCurrentState": { + "type": "enum", + "members": { + "NotEnabled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_ENABLED" + } + }, + "EnableComplete": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLE_COMPLETE" + } + }, + "EnableInProgress": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLE_IN_PROGRESS" + } + }, + "EnableFailed": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLE_FAILED" + } + }, + "DisableComplete": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLE_COMPLETE" + } + }, + "DisableInProgress": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLE_IN_PROGRESS" + } + }, + "DisableFailed": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLE_FAILED" + } + } + } + }, + "com.amazonaws.opensearch#NaturalLanguageQueryGenerationDesiredState": { + "type": "enum", + "members": { + "Enabled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "Disabled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, + "com.amazonaws.opensearch#NaturalLanguageQueryGenerationOptionsInput": { + "type": "structure", + "members": { + "DesiredState": { + "target": "com.amazonaws.opensearch#NaturalLanguageQueryGenerationDesiredState", + "traits": { + "smithy.api#documentation": "

The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Container for parameters required to enable the natural language query generation feature.

" + } + }, + "com.amazonaws.opensearch#NaturalLanguageQueryGenerationOptionsOutput": { + "type": "structure", + "members": { + "DesiredState": { + "target": "com.amazonaws.opensearch#NaturalLanguageQueryGenerationDesiredState", + "traits": { + "smithy.api#documentation": "

The desired state of the natural language query generation feature. Valid values are ENABLED and DISABLED.

" + } + }, + "CurrentState": { + "target": "com.amazonaws.opensearch#NaturalLanguageQueryGenerationCurrentState", + "traits": { + "smithy.api#documentation": "

The current state of the natural language query generation feature, indicating completion, in progress, or failure.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Container for parameters representing the state of the natural language query generation feature on the specified domain.

" + } + }, "com.amazonaws.opensearch#NextToken": { "type": "string", "traits": { @@ -10925,6 +11191,15 @@ "smithy.api#pattern": "^arn:(aws|aws\\-cn|aws\\-us\\-gov|aws\\-iso|aws\\-iso\\-b):iam::[0-9]+:role\\/" } }, + "com.amazonaws.opensearch#RolesKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, "com.amazonaws.opensearch#RollbackOnDisable": { "type": "enum", "members": { @@ -11739,6 +12014,15 @@ "target": "com.amazonaws.opensearch#String" } }, + "com.amazonaws.opensearch#SubjectKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, "com.amazonaws.opensearch#TLSSecurityPolicy": { "type": "enum", "members": { @@ -11915,6 +12199,12 @@ "traits": { "smithy.api#documentation": "

A new description of the data source.

" } + }, + "Status": { + "target": "com.amazonaws.opensearch#DataSourceStatus", + "traits": { + "smithy.api#documentation": "

The status of the data source update.

" + } } }, "traits": { @@ -12092,6 +12382,12 @@ "traits": { "smithy.api#documentation": "

Service software update options for the domain.

" } + }, + "AIMLOptions": { + "target": "com.amazonaws.opensearch#AIMLOptionsInput", + "traits": { + "smithy.api#documentation": "

Options for all machine learning features for the specified domain.

" + } } }, "traits": { diff --git a/models/opsworks.json b/models/opsworks.json index 4398a58ebf..042be462c6 100644 --- a/models/opsworks.json +++ b/models/opsworks.json @@ -139,7 +139,7 @@ "Environment": { "target": "com.amazonaws.opsworks#EnvironmentVariables", "traits": { - "smithy.api#documentation": "

An array of EnvironmentVariable objects that specify environment variables to be\n associated with the app. After you deploy the app, these variables are defined on the\n associated app server instances. For more information, see Environment Variables.

\n \n

There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variable names, values, and protected flag values - cannot exceed 20 KB. This limit should accommodate most if not all use cases, but if you do exceed it, you will cause an exception (API) with an \"Environment: is too large (maximum is 20 KB)\" message.

\n
" + "smithy.api#documentation": "

An array of EnvironmentVariable objects that specify environment variables to be\n associated with the app. After you deploy the app, these variables are defined on the\n associated app server instances. For more information, see Environment Variables.

\n \n

There is no specific limit on the number of environment variables. \n However, the size of the associated data structure - which includes the variable names, values, and protected flag \n values - cannot exceed 20 KB. This limit should accommodate most if not all use cases, but if you do exceed it, you \n will cause an exception (API) with an \"Environment: is too large (maximum is 20 KB)\" message.

\n
" } } }, @@ -272,7 +272,7 @@ } ], "traits": { - "smithy.api#documentation": "

Assign a registered instance to a layer.

\n
    \n
  • \n

    You can assign registered on-premises instances to any layer type.

    \n
  • \n
  • \n

    You can assign registered Amazon EC2 instances only to custom layers.

    \n
  • \n
  • \n

    You cannot use this action with instances that were created with AWS OpsWorks Stacks.

    \n
  • \n
\n

\n Required Permissions: To use this action, an AWS Identity and Access Management\n (IAM) user must have a Manage permissions\n level for the stack or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Assign a registered instance to a layer.

\n
    \n
  • \n

    You can assign registered on-premises instances to any layer type.

    \n
  • \n
  • \n

    You can assign registered Amazon EC2 instances only to custom layers.

    \n
  • \n
  • \n

    You cannot use this action with instances that were created with OpsWorks Stacks.

    \n
  • \n
\n

\n Required Permissions: To use this action, an Identity and Access Management\n (IAM) user must have a Manage permissions\n level for the stack or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#AssignInstanceRequest": { @@ -314,7 +314,7 @@ } ], "traits": { - "smithy.api#documentation": "

Assigns one of the stack's registered Amazon EBS volumes to a specified instance. The volume must\n first be registered with the stack by calling RegisterVolume. After you register the\n volume, you must call UpdateVolume to specify a mount point before calling\n AssignVolume. For more information, see Resource Management.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Assigns one of the stack's registered Amazon EBS volumes to a specified instance. The volume must\n first be registered with the stack by calling RegisterVolume. After you register the\n volume, you must call UpdateVolume to specify a mount point before calling\n AssignVolume. For more information, see Resource Management.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage \n permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#AssignVolumeRequest": { @@ -355,7 +355,7 @@ } ], "traits": { - "smithy.api#documentation": "

Associates one of the stack's registered Elastic IP addresses with a specified instance. The\n address must first be registered with the stack by calling RegisterElasticIp. For more\n information, see Resource\n Management.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Associates one of the stack's registered Elastic IP addresses with a specified instance. The\n address must first be registered with the stack by calling RegisterElasticIp. For more\n information, see Resource\n Management.

\n

\n Required Permissions: To use this action, an IAM user must have a \n Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#AssociateElasticIpRequest": { @@ -396,7 +396,7 @@ } ], "traits": { - "smithy.api#documentation": "

Attaches an Elastic Load Balancing load balancer to a specified layer. AWS OpsWorks Stacks does not support \n Application Load Balancer. You can only use Classic Load Balancer with AWS OpsWorks Stacks. \n For more information, see Elastic Load\n Balancing.

\n \n

You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For\n more information, see Elastic\n Load Balancing Developer Guide.

\n
\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Attaches an Elastic Load Balancing load balancer to a specified layer. OpsWorks Stacks does not support \n Application Load Balancer. You can only use Classic Load Balancer with OpsWorks Stacks. \n For more information, see Elastic Load\n Balancing.

\n \n

You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, or CLI. For\n more information, see the Elastic Load Balancing Developer Guide.

\n
\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#AttachElasticLoadBalancerRequest": { @@ -439,7 +439,7 @@ "IgnoreMetricsTime": { "target": "com.amazonaws.opsworks#Minute", "traits": { - "smithy.api#documentation": "

The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks Stacks should ignore metrics\n and suppress additional scaling events. For example, AWS OpsWorks Stacks adds new instances following\n an upscaling event but the instances won't start reducing the load until they have been booted\n and configured. There is no point in raising additional scaling events during that operation,\n which typically takes several minutes. IgnoreMetricsTime allows you to direct\n AWS OpsWorks Stacks to suppress scaling events long enough to get the new instances online.

" + "smithy.api#documentation": "

The amount of time (in minutes) after a scaling event occurs that OpsWorks Stacks should ignore metrics\n and suppress additional scaling events. For example, OpsWorks Stacks adds new instances following\n an upscaling event but the instances won't start reducing the load until they have been booted\n and configured. There is no point in raising additional scaling events during that operation,\n which typically takes several minutes. IgnoreMetricsTime allows you to direct\n OpsWorks Stacks to suppress scaling events long enough to get the new instances online.

" } }, "CpuThreshold": { @@ -457,18 +457,18 @@ "LoadThreshold": { "target": "com.amazonaws.opsworks#Double", "traits": { - "smithy.api#documentation": "

The load threshold. A value of -1 disables the threshold. For more information about how load is computed, see Load (computing).

" + "smithy.api#documentation": "

The load threshold. A value of -1 disables the threshold. For more information about how load is computed, \n see Load (computing).

" } }, "Alarms": { "target": "com.amazonaws.opsworks#Strings", "traits": { - "smithy.api#documentation": "

Custom Cloudwatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names,\n which are case sensitive and must be in the same region as the stack.

\n \n

To use custom alarms, you must update your service role to allow\n cloudwatch:DescribeAlarms. You can either have AWS OpsWorks Stacks update the role for\n you when you first use this feature or you can edit the role manually. For more information,\n see Allowing AWS OpsWorks Stacks to Act on Your Behalf.

\n
" + "smithy.api#documentation": "

Custom CloudWatch auto scaling alarms, to be used as thresholds. This parameter takes a list of up to five alarm names,\n which are case sensitive and must be in the same region as the stack.

\n \n

To use custom alarms, you must update your service role to allow\n cloudwatch:DescribeAlarms. You can either have OpsWorks Stacks update the role for\n you when you first use this feature or you can edit the role manually. For more information,\n see Allowing OpsWorks Stacks to Act on Your Behalf.

\n
" } } }, "traits": { - "smithy.api#documentation": "

Describes a load-based auto scaling upscaling or downscaling threshold configuration, which specifies when AWS OpsWorks Stacks starts or stops load-based instances.

" + "smithy.api#documentation": "

Describes a load-based auto scaling upscaling or downscaling threshold configuration, which specifies when OpsWorks Stacks starts or \n stops load-based instances.

" } }, "com.amazonaws.opsworks#AutoScalingType": { @@ -494,7 +494,7 @@ "DeviceName": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The device name that is exposed to the instance, such as /dev/sdh. For the root\n device, you can use the explicit device name or you can set this parameter to\n ROOT_DEVICE and AWS OpsWorks Stacks will provide the correct device name.

" + "smithy.api#documentation": "

The device name that is exposed to the instance, such as /dev/sdh. For the root\n device, you can use the explicit device name or you can set this parameter to\n ROOT_DEVICE and OpsWorks Stacks will provide the correct device name.

" } }, "NoDevice": { @@ -582,19 +582,19 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The cloned stack name.

" + "smithy.api#documentation": "

The cloned stack name. Stack names can be a maximum of 64 characters.

" } }, "Region": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The cloned stack AWS region, such as \"ap-northeast-2\". For more information about AWS regions, see\n Regions and Endpoints.

" + "smithy.api#documentation": "

The cloned stack Amazon Web Services Region, such as ap-northeast-2. For more information \n about Amazon Web Services Regions, see\n Regions and Endpoints.

" } }, "VpcId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All\n instances are launched into this VPC, and you cannot change the ID later.

\n
    \n
  • \n

    If your account supports EC2 Classic, the default value is no VPC.

    \n
  • \n
  • \n

    If your account does not support EC2 Classic, the default value is the default VPC for the specified region.

    \n
  • \n
\n

If the VPC ID corresponds to a default VPC and you have specified either the\n DefaultAvailabilityZone or the DefaultSubnetId parameter only,\n AWS OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets\n these parameters to the first valid Availability Zone for the specified region and the\n corresponding default VPC subnet ID, respectively.

\n

If you specify a nondefault VPC ID, note the following:

\n
    \n
  • \n

    It must belong to a VPC in your account that is in the specified region.

    \n
  • \n
  • \n

    You must specify a value for DefaultSubnetId.

    \n
  • \n
\n

For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a\n VPC. For more information about default VPC and EC2 Classic, see Supported\n Platforms.

" + "smithy.api#documentation": "

The ID of the VPC that the cloned stack is to be launched into. It must be in the specified region. All\n instances are launched into this VPC, and you cannot change the ID later.

\n
    \n
  • \n

    If your account supports EC2 Classic, the default value is no VPC.

    \n
  • \n
  • \n

    If your account does not support EC2 Classic, the default value is the default VPC for the specified region.

    \n
  • \n
\n

If the VPC ID corresponds to a default VPC and you have specified either the\n DefaultAvailabilityZone or the DefaultSubnetId parameter only,\n OpsWorks Stacks infers the value of the other parameter. If you specify neither parameter, OpsWorks Stacks sets\n these parameters to the first valid Availability Zone for the specified region and the\n corresponding default VPC subnet ID, respectively.

\n

If you specify a nondefault VPC ID, note the following:

\n
    \n
  • \n

    It must belong to a VPC in your account that is in the specified region.

    \n
  • \n
  • \n

    You must specify a value for DefaultSubnetId.

    \n
  • \n
\n

For more information about how to use OpsWorks Stacks with a VPC, see Running a Stack in a\n VPC. For more information about default VPC and EC2 Classic, see Supported\n Platforms.

" } }, "Attributes": { @@ -606,20 +606,20 @@ "ServiceRoleArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS\n resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an\n existing IAM role. If you create a stack by using the AWS OpsWorks Stacks console, it creates the role for\n you. You can obtain an existing stack's IAM ARN programmatically by calling\n DescribePermissions. For more information about IAM ARNs, see Using\n Identifiers.

\n \n

You must set this parameter to a valid service role ARN or the action will fail; there is no default value. You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly.

\n
", + "smithy.api#documentation": "

The stack Identity and Access Management (IAM) role, which allows OpsWorks Stacks to work with Amazon Web Services\n resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an\n existing IAM role. If you create a stack by using the OpsWorkss Stacks console, it creates the role for\n you. You can obtain an existing stack's IAM ARN programmatically by calling\n DescribePermissions. For more information about IAM ARNs, see \n Using\n Identifiers.

\n \n

You must set this parameter to a valid service role ARN or the action will fail; there is no default value. \n You can specify the source stack's service role ARN, if you prefer, but you must do so explicitly.

\n
", "smithy.api#required": {} } }, "DefaultInstanceProfileArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances.\n For more information about IAM ARNs, see Using\n Identifiers.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's \n EC2 instances.\n For more information about IAM ARNs, see Using\n Identifiers.

" } }, "DefaultOs": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack's operating system, which must be set to one of the following.

\n
    \n
  • \n

    A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux\n 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    \n
  • \n
  • \n

    A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    \n
  • \n
  • \n

    \n CentOS Linux 7\n

    \n
  • \n
  • \n

    \n Red Hat Enterprise Linux 7\n

    \n
  • \n
  • \n

    \n Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, \n Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    \n
  • \n
  • \n

    A custom AMI: Custom. You specify the custom AMI you want to use when\n you create instances. For more information about how to use custom AMIs with OpsWorks, see Using\n Custom AMIs.

    \n
  • \n
\n

The default option is the parent stack's operating system.\n For more information about supported operating systems,\n see AWS OpsWorks Stacks Operating Systems.

\n \n

You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to Windows or Windows to Linux.

\n
" + "smithy.api#documentation": "

The stack's operating system, which must be set to one of the following.

\n
    \n
  • \n

    A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux\n 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    \n
  • \n
  • \n

    A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    \n
  • \n
  • \n

    \n CentOS Linux 7\n

    \n
  • \n
  • \n

    \n Red Hat Enterprise Linux 7\n

    \n
  • \n
  • \n

    \n Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, \n Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    \n
  • \n
  • \n

    A custom AMI: Custom. You specify the custom AMI you want to use when\n you create instances. For more information about how to use custom AMIs with OpsWorks, see Using\n Custom AMIs.

    \n
  • \n
\n

The default option is the parent stack's operating system.\n Not all operating systems are supported with all versions of Chef. For more information about supported operating systems,\n see OpsWorks Stacks Operating Systems.

\n \n

You can specify a different Linux operating system for the cloned stack, but you cannot change from Linux to \n Windows or Windows to Linux.

\n
" } }, "HostnameTheme": { @@ -643,13 +643,13 @@ "CustomJson": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration JSON values. The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information about custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes\n

" + "smithy.api#documentation": "

A string that contains user-defined, custom JSON. It is used to override the corresponding default stack configuration \n JSON values. The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information about custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes\n

" } }, "ConfigurationManager": { "target": "com.amazonaws.opsworks#StackConfigurationManager", "traits": { - "smithy.api#documentation": "

The configuration manager. When you clone a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12.

" + "smithy.api#documentation": "

The configuration manager. When you clone a stack we recommend that you use the configuration manager to specify the \n Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12.

" } }, "ChefConfiguration": { @@ -667,19 +667,19 @@ "UseOpsworksSecurityGroups": { "target": "com.amazonaws.opsworks#Boolean", "traits": { - "smithy.api#documentation": "

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

\n

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are\n associated with layers by default. With UseOpsworksSecurityGroups you can instead\n provide your own custom security groups. UseOpsworksSecurityGroups has the\n following settings:

\n
    \n
  • \n

    True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot delete the built-in security group.

    \n
  • \n
  • \n

    False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon EC2) security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

    \n
  • \n
\n

For more information, see Create a New\n Stack.

" + "smithy.api#documentation": "

Whether to associate the OpsWorks Stacks built-in security groups with the stack's layers.

\n

OpsWorks Stacks provides a standard set of security groups, one for each layer, which are\n associated with layers by default. With UseOpsworksSecurityGroups you can instead\n provide your own custom security groups. UseOpsworksSecurityGroups has the\n following settings:

\n
    \n
  • \n

    True - OpsWorks Stacks automatically associates the appropriate built-in security group with \n each layer (default setting). You can associate additional security groups with a layer after you create it but you cannot \n delete the built-in security group.

    \n
  • \n
  • \n

    False - OpsWorks Stacks does not associate built-in security groups with layers. \n You must create appropriate Amazon EC2 security groups and associate a security group with each \n layer that you create. However, you can still manually associate a built-in security group with a layer on creation; \n custom security groups are required only for those layers that need custom settings.

    \n
  • \n
\n

For more information, see Create a New\n Stack.

" } }, "CustomCookbooksSource": { "target": "com.amazonaws.opsworks#Source", "traits": { - "smithy.api#documentation": "

Contains the information required to retrieve an app or cookbook from a repository. For more information, \n see Adding Apps or Cookbooks and Recipes.

" + "smithy.api#documentation": "

Contains the information required to retrieve an app or cookbook from a repository. For more information, \n see Adding Apps or \n Cookbooks and Recipes.

" } }, "DefaultSshKeyName": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS\n OpsWorks installs the public key on the instance and you can use the private key with an SSH\n client to log in to the instance. For more information, see Using SSH to\n Communicate with an Instance and Managing SSH\n Access. You can override this setting by specifying a different key pair, or no key\n pair, when you \n create an instance.

" + "smithy.api#documentation": "

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, \n OpsWorks installs the public key on the instance and you can use the private key with an SSH\n client to log in to the instance. For more information, see Using SSH to\n Communicate with an Instance and Managing SSH\n Access. You can override this setting by specifying a different key pair, or no key\n pair, when you \n create an instance.

" } }, "ClonePermissions": { @@ -703,7 +703,7 @@ "AgentVersion": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The default AWS OpsWorks Stacks agent version. You have the following options:

\n
    \n
  • \n

    Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks\n automatically installs new agent versions on the stack's instances as soon as\n they are available.

    \n
  • \n
  • \n

    Fixed version - Set this parameter to your preferred agent version. To update \n the agent version, you must edit the stack configuration and specify a new version. \n AWS OpsWorks Stacks then automatically installs that version on the stack's instances.

    \n
  • \n
\n

The default setting is LATEST. To specify an agent version,\n you must use the complete version number, not the abbreviated number shown on the console.\n For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

\n \n

You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

\n
" + "smithy.api#documentation": "

The default OpsWorks Stacks agent version. You have the following options:

\n
    \n
  • \n

    Auto-update - Set this parameter to LATEST. OpsWorks Stacks\n automatically installs new agent versions on the stack's instances as soon as\n they are available.

    \n
  • \n
  • \n

    Fixed version - Set this parameter to your preferred agent version. To update \n the agent version, you must edit the stack configuration and specify a new version. \n OpsWorks Stacks automatically installs that version on the stack's instances.

    \n
  • \n
\n

The default setting is LATEST. To specify an agent version,\n you must use the complete version number, not the abbreviated number shown on the console.\n For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

\n \n

You can also specify an agent version when you create or update an instance, which overrides the stack's \n default setting.

\n
" } } }, @@ -743,7 +743,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the Amazon CloudWatch logs configuration for a layer.

" + "smithy.api#documentation": "

Describes the Amazon CloudWatch Logs configuration for a layer.

" } }, "com.amazonaws.opsworks#CloudWatchLogsEncoding": { @@ -1303,7 +1303,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the encoding of the log file so that the file can be read correctly. The default is utf_8. Encodings supported by Python codecs.decode() can be used here.

" + "smithy.api#documentation": "

Specifies the encoding of the log file so that the file can be read correctly. \n The default is utf_8. Encodings supported by Python codecs.decode() can be used here.

" } }, "com.amazonaws.opsworks#CloudWatchLogsInitialPosition": { @@ -1350,13 +1350,13 @@ "File": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

Specifies log files that you want to push to CloudWatch Logs.

\n

\n File can point to a specific file or multiple files (by using wild card characters such as /var/log/system.log*).\n Only the latest file is pushed to CloudWatch Logs, based on file modification time. We recommend that you use wild card characters to specify a series\n of files of the same type, such as access_log.2014-06-01-01, access_log.2014-06-01-02, and so on\n by using a pattern like access_log.*. Don't use a wildcard to match multiple file types,\n such as access_log_80 and access_log_443. To specify multiple, different file types, add another\n log stream entry to the configuration file, so that each log file type is stored in a different log group.

\n

Zipped files are not supported.

" + "smithy.api#documentation": "

Specifies log files that you want to push to CloudWatch Logs.

\n

\n File can point to a specific file or multiple files (by using wild card characters such \n as /var/log/system.log*).\n Only the latest file is pushed to CloudWatch Logs, based on file modification time. We recommend that you use \n wild card characters to specify a series\n of files of the same type, such as access_log.2014-06-01-01, access_log.2014-06-01-02, and so on\n by using a pattern like access_log.*. Don't use a wildcard to match multiple file types,\n such as access_log_80 and access_log_443. To specify multiple, different file types, add another\n log stream entry to the configuration file, so that each log file type is stored in a different log group.

\n

Zipped files are not supported.

" } }, "FileFingerprintLines": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

Specifies the range of lines for identifying a file. The valid values are one number, or two dash-delimited numbers, \n such as '1', '2-5'. The default value is '1', meaning the first line is used to calculate the fingerprint. Fingerprint lines are \n not sent to CloudWatch Logs unless all specified lines are available.

" + "smithy.api#documentation": "

Specifies the range of lines for identifying a file. The valid values are one number, or two dash-delimited numbers, \n such as '1', '2-5'. The default value is '1', meaning the first line is used to calculate the fingerprint. \n Fingerprint lines are \n not sent to CloudWatch Logs unless all specified lines are available.

" } }, "MultiLineStartPattern": { @@ -1397,7 +1397,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the Amazon CloudWatch logs configuration for a layer. For detailed information about members of this data type, see the CloudWatch Logs Agent Reference.

" + "smithy.api#documentation": "

Describes the CloudWatch Logs configuration for a layer. For detailed information about members of this data type, \n see the CloudWatch Logs Agent Reference.

" } }, "com.amazonaws.opsworks#CloudWatchLogsLogStreams": { @@ -1406,7 +1406,7 @@ "target": "com.amazonaws.opsworks#CloudWatchLogsLogStream" }, "traits": { - "smithy.api#documentation": "

Describes the Amazon CloudWatch logs configuration for a layer.

" + "smithy.api#documentation": "

Describes the Amazon CloudWatch Logs configuration for a layer.

" } }, "com.amazonaws.opsworks#CloudWatchLogsTimeZone": { @@ -1426,7 +1426,7 @@ } }, "traits": { - "smithy.api#documentation": "

The preferred time zone for logs streamed to CloudWatch Logs. Valid values are LOCAL and UTC, for Coordinated Universal Time.

" + "smithy.api#documentation": "

The preferred time zone for logs streamed to CloudWatch Logs. \n Valid values are LOCAL and UTC, for Coordinated Universal Time.

" } }, "com.amazonaws.opsworks#Command": { @@ -1561,7 +1561,7 @@ "Type": { "target": "com.amazonaws.opsworks#AppType", "traits": { - "smithy.api#documentation": "

The app type. Each supported type is associated with a particular layer. For example, PHP\n applications are associated with a PHP layer. AWS OpsWorks Stacks deploys an application to those instances\n that are members of the corresponding layer. If your app isn't one of the standard types, or\n you prefer to implement your own Deploy recipes, specify other.

", + "smithy.api#documentation": "

The app type. Each supported type is associated with a particular layer. For example, PHP\n applications are associated with a PHP layer. OpsWorks Stacks deploys an application to those instances\n that are members of the corresponding layer. If your app isn't one of the standard types, or\n you prefer to implement your own Deploy recipes, specify other.

", "smithy.api#required": {} } }, @@ -1685,7 +1685,7 @@ "CustomJson": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A string that contains user-defined, custom JSON. You can use this parameter to override some corresponding default stack configuration JSON values. The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information about custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes and \n Overriding Attributes With Custom JSON.

" + "smithy.api#documentation": "

A string that contains user-defined, custom JSON. You can use this parameter to override some corresponding default \n stack configuration JSON values. The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information about custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes and \n Overriding Attributes With Custom \n JSON.

" } } }, @@ -1748,7 +1748,7 @@ "InstanceType": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance type, such as t2.micro. For a list of supported instance types,\n open the stack in the console, choose Instances, and choose + Instance.\n The Size list contains the currently supported types. For more information, see Instance\n Families and Types. The parameter values that you use to specify the various types are\n in the API Name column of the Available Instance Types table.

", + "smithy.api#documentation": "

The instance type, such as t2.micro. For a list of supported instance types,\n open the stack in the console, choose Instances, and choose + Instance.\n The Size list contains the currently supported types. \n For more information, see Instance\n Families and Types. The parameter values that you use to specify the various types are\n in the API Name column of the Available Instance Types table.

", "smithy.api#required": {} } }, @@ -1761,13 +1761,13 @@ "Hostname": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance host name.

" + "smithy.api#documentation": "

The instance host name. The following are character limits for instance host names.

\n
    \n
  • \n

    Linux-based instances: 63 characters

    \n
  • \n
  • \n

    Windows-based instances: 15 characters

    \n
  • \n
" } }, "Os": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance's operating system, which must be set to one of the following.

\n
    \n
  • \n

    A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, \n Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    \n
  • \n
  • \n

    A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    \n
  • \n
  • \n

    \n CentOS Linux 7\n

    \n
  • \n
  • \n

    \n Red Hat Enterprise Linux 7\n

    \n
  • \n
  • \n

    A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, \n\t\t\t Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    \n
  • \n
  • \n

    A custom AMI: Custom.

    \n
  • \n
\n

For more information about the supported operating systems,\n see AWS OpsWorks Stacks Operating Systems.

\n

The default option is the current Amazon Linux version. If you set this parameter to\n Custom, you must use the CreateInstance action's AmiId parameter to\n specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. For more information about supported operating\n systems, see Operating SystemsFor more information about how to use custom AMIs with AWS OpsWorks Stacks, see Using\n Custom AMIs.

" + "smithy.api#documentation": "

The instance's operating system, which must be set to one of the following.

\n
    \n
  • \n

    A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, \n Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    \n
  • \n
  • \n

    A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    \n
  • \n
  • \n

    \n CentOS Linux 7\n

    \n
  • \n
  • \n

    \n Red Hat Enterprise Linux 7\n

    \n
  • \n
  • \n

    A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, \n\t\t\t Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    \n
  • \n
  • \n

    A custom AMI: Custom.

    \n
  • \n
\n

Not all operating systems are supported with all versions of Chef. For more information about the supported operating systems,\n see OpsWorks Stacks Operating Systems.

\n

The default option is the current Amazon Linux version. If you set this parameter to\n Custom, you must use the CreateInstance action's AmiId parameter to\n specify the custom AMI that you want to use. Block device mappings are not supported if the value is Custom. \n For more information about how to use custom AMIs with OpsWorks Stacks, see Using\n Custom AMIs.

" } }, "AmiId": { @@ -1797,7 +1797,7 @@ "SubnetId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ID of the instance's subnet. If the stack is running in a VPC, you can use this parameter to override the stack's default subnet ID value and direct AWS OpsWorks Stacks to launch the instance in a different subnet.

" + "smithy.api#documentation": "

The ID of the instance's subnet. If the stack is running in a VPC, you can use this parameter to override the stack's \n default subnet ID value and direct OpsWorks Stacks to launch the instance in a different subnet.

" } }, "Architecture": { @@ -1833,13 +1833,13 @@ "AgentVersion": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The default AWS OpsWorks Stacks agent version. You have the following options:

\n
    \n
  • \n

    \n INHERIT - Use the stack's default agent version setting.

    \n
  • \n
  • \n

    \n version_number - Use the specified agent version.\n This value overrides the stack's default setting.\n To update the agent version, edit the instance configuration and specify a\n new version.\n AWS OpsWorks Stacks then automatically installs that version on the instance.

    \n
  • \n
\n

The default setting is INHERIT. To specify an agent version,\n you must use the complete version number, not the abbreviated number shown on the console.\n For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

" + "smithy.api#documentation": "

The default OpsWorks Stacks agent version. You have the following options:

\n
    \n
  • \n

    \n INHERIT - Use the stack's default agent version setting.

    \n
  • \n
  • \n

    \n version_number - Use the specified agent version.\n This value overrides the stack's default setting.\n To update the agent version, edit the instance configuration and specify a\n new version.\n OpsWorks Stacks installs that version on the instance.

    \n
  • \n
\n

The default setting is INHERIT. To specify an agent version,\n you must use the complete version number, not the abbreviated number shown on the console.\n For a list of available agent version numbers, call DescribeAgentVersions. \n AgentVersion cannot be set to Chef 12.2.

" } }, "Tenancy": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance's tenancy option. The default option is no tenancy, or if the instance is running in a VPC, inherit tenancy settings from the VPC. The following are valid values for this parameter: dedicated, default, or host. Because there are costs associated with changes in tenancy options, we recommend that you research tenancy options before choosing them for your instances. For more information about dedicated hosts, see Dedicated Hosts Overview and Amazon EC2 Dedicated Hosts. For more information about dedicated instances, see Dedicated Instances and Amazon EC2 Dedicated Instances.

" + "smithy.api#documentation": "

The instance's tenancy option. The default option is no tenancy, or if the instance is running in a VPC, \n inherit tenancy settings from the VPC. The following are valid values for this parameter: \n dedicated, default, or host. Because there are costs associated with changes \n in tenancy options, we recommend that you research tenancy options before choosing them for your instances. \n For more information about dedicated hosts, see \n Dedicated Hosts Overview and \n Amazon EC2 Dedicated Hosts. \n For more information about dedicated instances, see \n Dedicated Instances and \n Amazon EC2 Dedicated Instances.

" } } }, @@ -1879,7 +1879,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a layer. For more information, see How to\n Create a Layer.

\n \n

You should use CreateLayer for noncustom layer types such as PHP App Server only if the stack\n does not have an existing layer of that type. A stack can have at most one instance of each\n noncustom layer; if you attempt to create a second instance, CreateLayer fails. A\n stack can have an arbitrary number of custom layers, so you can call CreateLayer as\n many times as you like for that layer type.

\n
\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Creates a layer. For more information, see How to\n Create a Layer.

\n \n

You should use CreateLayer for noncustom layer types such as \n PHP App Server only if the stack\n does not have an existing layer of that type. A stack can have at most one instance of each\n noncustom layer; if you attempt to create a second instance, CreateLayer fails. A\n stack can have an arbitrary number of custom layers, so you can call CreateLayer as\n many times as you like for that layer type.

\n
\n

\n Required Permissions: To use this action, an IAM user must \n have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#CreateLayerRequest": { @@ -1895,21 +1895,21 @@ "Type": { "target": "com.amazonaws.opsworks#LayerType", "traits": { - "smithy.api#documentation": "

The layer type. A stack cannot have more than one built-in layer of the same type. It can have any number of custom layers. Built-in layers are not available in Chef 12 stacks.

", + "smithy.api#documentation": "

The layer type. A stack cannot have more than one built-in layer of the same type. It can have any number of custom layers. \n Built-in layers are not available in Chef 12 stacks.

", "smithy.api#required": {} } }, "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The layer name, which is used by the console.

", + "smithy.api#documentation": "

The layer name, which is used by the console. Layer names can be a maximum of 32 characters.

", "smithy.api#required": {} } }, "Shortname": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef recipes. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters, which are limited to the alphanumeric characters, '-', '_', and '.'.

\n

The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference.

", + "smithy.api#documentation": "

For custom layers only, use this parameter to specify the layer's short name, which is used internally by \n OpsWorks Stacks and by Chef recipes. The short name is also used as the name for the directory where your \n app files are installed. It can have a maximum of 32 characters, which are limited to the alphanumeric \n characters, '-', '_', and '.'.

\n

Built-in layer short names are defined by OpsWorks Stacks. For more information, see the \n Layer Reference.

", "smithy.api#required": {} } }, @@ -1922,19 +1922,19 @@ "CloudWatchLogsConfiguration": { "target": "com.amazonaws.opsworks#CloudWatchLogsConfiguration", "traits": { - "smithy.api#documentation": "

Specifies CloudWatch Logs configuration options for the layer. For more information, see CloudWatchLogsLogStream.

" + "smithy.api#documentation": "

Specifies CloudWatch Logs configuration options for the layer. For more information, \n see CloudWatchLogsLogStream.

" } }, "CustomInstanceProfileArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ARN of an IAM profile to be used for the layer's EC2 instances. For more information\n about IAM ARNs, see Using Identifiers.

" + "smithy.api#documentation": "

The ARN of an IAM profile to be used for the layer's EC2 instances. For more information\n about IAM ARNs, see Using Identifiers.

" } }, "CustomJson": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A JSON-formatted string containing custom stack configuration and deployment attributes\n to be installed on the layer's instances. For more information, see\n \n Using Custom JSON. This feature is supported as of version 1.7.42 of the AWS CLI. \n

" + "smithy.api#documentation": "

A JSON-formatted string containing custom stack configuration and deployment attributes\n to be installed on the layer's instances. For more information, see\n \n Using Custom JSON. This feature is supported as of version 1.7.42 of the CLI. \n

" } }, "CustomSecurityGroupIds": { @@ -2040,21 +2040,21 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack name.

", + "smithy.api#documentation": "

The stack name. Stack names can be a maximum of 64 characters.

", "smithy.api#required": {} } }, "Region": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack's AWS region, such as ap-south-1. For more information about\n Amazon regions, see Regions and Endpoints.

\n \n

In the AWS CLI, this API maps to the --stack-region parameter. If the\n --stack-region parameter and the AWS CLI common parameter\n --region are set to the same value, the stack uses a\n regional endpoint. If the --stack-region\n parameter is not set, but the AWS CLI --region parameter is, this also\n results in a stack with a regional endpoint. However, if the\n --region parameter is set to us-east-1, and the\n --stack-region parameter is set to one of the following, then the\n stack uses a legacy or classic region: us-west-1,\n us-west-2, sa-east-1, eu-central-1, eu-west-1, ap-northeast-1, ap-southeast-1,\n ap-southeast-2. In this case, the actual API endpoint of the stack is in\n us-east-1. Only the preceding regions are supported as classic\n regions in the us-east-1 API endpoint. Because it is a best practice to\n choose the regional endpoint that is closest to where you manage AWS, we recommend\n that you use regional endpoints for new stacks. The AWS CLI common\n --region parameter always specifies a regional API endpoint; it\n cannot be used to specify a classic AWS OpsWorks Stacks region.

\n
", + "smithy.api#documentation": "

The stack's Amazon Web Services Region, such as ap-south-1. For more information about\n Amazon Web Services Regions, see Regions and Endpoints.

\n \n

In the CLI, this API maps to the --stack-region parameter. If the\n --stack-region parameter and the CLI common parameter\n --region are set to the same value, the stack uses a\n regional endpoint. If the --stack-region\n parameter is not set, but the CLI --region parameter is, this also\n results in a stack with a regional endpoint. However, if the\n --region parameter is set to us-east-1, and the\n --stack-region parameter is set to one of the following, then the\n stack uses a legacy or classic region: us-west-1,\n us-west-2, sa-east-1, eu-central-1, eu-west-1, ap-northeast-1, ap-southeast-1,\n ap-southeast-2. In this case, the actual API endpoint of the stack is in\n us-east-1. Only the preceding regions are supported as classic\n regions in the us-east-1 API endpoint. Because it is a best practice to\n choose the regional endpoint that is closest to where you manage Amazon Web Services, we recommend\n that you use regional endpoints for new stacks. The CLI common\n --region parameter always specifies a regional API endpoint; it\n cannot be used to specify a classic OpsWorks Stacks region.

\n
", "smithy.api#required": {} } }, "VpcId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched into this VPC. You cannot change the ID later.

\n
    \n
  • \n

    If your account supports EC2-Classic, the default value is no VPC.

    \n
  • \n
  • \n

    If your account does not support EC2-Classic, the default value is the default VPC for the specified region.

    \n
  • \n
\n

If the VPC ID corresponds to a default VPC and you have specified either the\n DefaultAvailabilityZone or the DefaultSubnetId parameter only,\n AWS OpsWorks Stacks infers the value of the \n other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets\n these parameters to the first valid Availability Zone for the specified region and the\n corresponding default VPC subnet ID, respectively.

\n

If you specify a nondefault VPC ID, note the following:

\n
    \n
  • \n

    It must belong to a VPC in your account that is in the specified region.

    \n
  • \n
  • \n

    You must specify a value for DefaultSubnetId.

    \n
  • \n
\n

For more information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack in a\n VPC. For more information about default VPC and EC2-Classic, see Supported\n Platforms.

" + "smithy.api#documentation": "

The ID of the VPC that the stack is to be launched into. The VPC must be in the stack's region. All instances are launched \n into this VPC. You cannot change the ID later.

\n
    \n
  • \n

    If your account supports EC2-Classic, the default value is no VPC.

    \n
  • \n
  • \n

    If your account does not support EC2-Classic, the default value is the default VPC for the specified region.

    \n
  • \n
\n

If the VPC ID corresponds to a default VPC and you have specified either the\n DefaultAvailabilityZone or the DefaultSubnetId parameter only,\n OpsWorks Stacks infers the value of the \n other parameter. If you specify neither parameter, OpsWorks Stacks sets\n these parameters to the first valid Availability Zone for the specified region and the\n corresponding default VPC subnet ID, respectively.

\n

If you specify a nondefault VPC ID, note the following:

\n
    \n
  • \n

    It must belong to a VPC in your account that is in the specified region.

    \n
  • \n
  • \n

    You must specify a value for DefaultSubnetId.

    \n
  • \n
\n

For more information about how to use OpsWorks Stacks with a VPC, see Running a Stack in a\n VPC. For more information about default VPC and EC2-Classic, see Supported\n Platforms.

" } }, "Attributes": { @@ -2066,21 +2066,21 @@ "ServiceRoleArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack's AWS Identity and Access Management (IAM) role, which allows AWS OpsWorks Stacks to work with AWS\n resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an\n existing IAM role. For more information about IAM ARNs, see Using\n Identifiers.

", + "smithy.api#documentation": "

The stack's IAM role, which allows OpsWorks Stacks to work with Amazon Web Services\n resources on your behalf. You must set this parameter to the Amazon Resource Name (ARN) for an\n existing IAM role. For more information about IAM ARNs, see \n Using\n Identifiers.

", "smithy.api#required": {} } }, "DefaultInstanceProfileArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances.\n For more information about IAM ARNs, see Using\n Identifiers.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM profile that is the default profile for all of the stack's EC2 instances.\n For more information about IAM ARNs, see Using\n Identifiers.

", "smithy.api#required": {} } }, "DefaultOs": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance. You can specify one of the following.

\n
    \n
  • \n

    A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, \n Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    \n
  • \n
  • \n

    A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    \n
  • \n
  • \n

    \n CentOS Linux 7\n

    \n
  • \n
  • \n

    \n Red Hat Enterprise Linux 7\n

    \n
  • \n
  • \n

    A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, \n Microsoft Windows Server 2012 R2 with SQL Server Express, \n Microsoft Windows Server 2012 R2 with SQL Server Standard, or \n Microsoft Windows Server 2012 R2 with SQL Server Web.

    \n
  • \n
  • \n

    A custom AMI: Custom. You specify the custom AMI you want to use when\n you create instances. For more\n information, see \n Using Custom AMIs.

    \n
  • \n
\n

The default option is the current Amazon Linux version.\n For more information about supported operating systems,\n see AWS OpsWorks Stacks Operating Systems.

" + "smithy.api#documentation": "

The stack's default operating system, which is installed on every instance unless you specify a different operating \n system when you create the instance. You can specify one of the following.

\n
    \n
  • \n

    A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, \n Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    \n
  • \n
  • \n

    A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    \n
  • \n
  • \n

    \n CentOS Linux 7\n

    \n
  • \n
  • \n

    \n Red Hat Enterprise Linux 7\n

    \n
  • \n
  • \n

    A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, \n Microsoft Windows Server 2012 R2 with SQL Server Express, \n Microsoft Windows Server 2012 R2 with SQL Server Standard, or \n Microsoft Windows Server 2012 R2 with SQL Server Web.

    \n
  • \n
  • \n

    A custom AMI: Custom. You specify the custom AMI you want to use when\n you create instances. For more\n information, see \n Using Custom AMIs.

    \n
  • \n
\n

The default option is the current Amazon Linux version.\n Not all operating systems are supported with all versions of Chef. For more information about supported operating systems,\n see OpsWorks Stacks Operating Systems.

" } }, "HostnameTheme": { @@ -2104,13 +2104,13 @@ "CustomJson": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information about custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes.

" + "smithy.api#documentation": "

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration \n attribute values or to pass data to recipes. The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information about custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes.

" } }, "ConfigurationManager": { "target": "com.amazonaws.opsworks#StackConfigurationManager", "traits": { - "smithy.api#documentation": "

The configuration manager. When you create a stack we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12.

" + "smithy.api#documentation": "

The configuration manager. When you create a stack we recommend that you use the configuration manager to specify the \n Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is \n currently 12.

" } }, "ChefConfiguration": { @@ -2128,7 +2128,7 @@ "UseOpsworksSecurityGroups": { "target": "com.amazonaws.opsworks#Boolean", "traits": { - "smithy.api#documentation": "

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

\n

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are\n associated with layers by default. With UseOpsworksSecurityGroups you can instead\n provide your own custom security groups. UseOpsworksSecurityGroups has the\n following settings:

\n
    \n
  • \n

    True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

    \n
  • \n
  • \n

    False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on creation; custom security groups are required only for those layers that need custom settings.

    \n
  • \n
\n

For more information, see Create a New\n Stack.

" + "smithy.api#documentation": "

Whether to associate the OpsWorks Stacks built-in security groups with the stack's layers.

\n

OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are\n associated with layers by default. With UseOpsworksSecurityGroups you can instead\n provide your own custom security groups. UseOpsworksSecurityGroups has the\n following settings:

\n
    \n
  • \n

    True - OpsWorks Stacks automatically associates the appropriate built-in security group with each \n layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot \n delete the built-in security group.

    \n
  • \n
  • \n

    False - OpsWorks Stacks does not associate built-in security groups with layers. You must create \n appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still \n manually associate a built-in security group with a layer on creation; custom security groups are required only for those \n layers that need custom settings.

    \n
  • \n
\n

For more information, see Create a New\n Stack.

" } }, "CustomCookbooksSource": { @@ -2140,7 +2140,7 @@ "DefaultSshKeyName": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, AWS\n OpsWorks installs the public key on the instance and you can use the private key with an SSH\n client to log in to the instance. For more information, see Using SSH to\n Communicate with an Instance and Managing SSH\n Access. You can override this setting by specifying a different key pair, or no key\n pair, when you \n create an instance.

" + "smithy.api#documentation": "

A default Amazon EC2 key pair name. The default value is none. If you specify a key pair name, \n OpsWorks installs the public key on the instance and you can use the private key with an SSH\n client to log in to the instance. For more information, see Using SSH to\n Communicate with an Instance and Managing SSH\n Access. You can override this setting by specifying a different key pair, or no key\n pair, when you \n create an instance.

" } }, "DefaultRootDeviceType": { @@ -2152,7 +2152,7 @@ "AgentVersion": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The default AWS OpsWorks Stacks agent version. You have the following options:

\n
    \n
  • \n

    Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks\n automatically installs new agent versions on the stack's instances as soon as\n they are available.

    \n
  • \n
  • \n

    Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the stack's instances.

    \n
  • \n
\n

The default setting is the most recent release of the agent. To specify an agent version,\n you must use the complete version number, not the abbreviated number shown on the console.\n For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

\n \n

You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

\n
" + "smithy.api#documentation": "

The default OpsWorks Stacks agent version. You have the following options:

\n
    \n
  • \n

    Auto-update - Set this parameter to LATEST. OpsWorks Stacks\n automatically installs new agent versions on the stack's instances as soon as\n they are available.

    \n
  • \n
  • \n

    Fixed version - Set this parameter to your preferred agent version. To update the agent version, \n you must edit the stack configuration and specify a new version. OpsWorks Stacks installs \n that version on the stack's instances.

    \n
  • \n
\n

The default setting is the most recent release of the agent. To specify an agent version,\n you must use the complete version number, not the abbreviated number shown on the console.\n For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2.

\n \n

You can also specify an agent version when you create or update an instance, \n which overrides the stack's default setting.

\n
" } } }, @@ -2205,7 +2205,7 @@ "SshUsername": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If\n the specified name includes other punctuation marks, AWS OpsWorks Stacks removes them. For example,\n my.name will be changed to myname. If you do not specify an SSH\n user name, AWS OpsWorks Stacks generates one from the IAM user name.

" + "smithy.api#documentation": "

The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If\n the specified name includes other punctuation marks, OpsWorks Stacks removes them. For example,\n my.name is changed to myname. If you do not specify an SSH\n user name, OpsWorks Stacks generates one from the IAM user name.

" } }, "SshPublicKey": { @@ -2301,7 +2301,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a specified app.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Deletes a specified app.

\n

\n Required Permissions: To use this action, an IAM user must have a \n Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#DeleteAppRequest": { @@ -2336,7 +2336,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a specified instance, which terminates the associated Amazon EC2 instance. You must stop an instance before you can delete it.

\n

For more information, see Deleting\n Instances.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Deletes a specified instance, which terminates the associated Amazon EC2 instance. You must stop an instance before \n you can delete it.

\n

For more information, see Deleting\n Instances.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage \n permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#DeleteInstanceRequest": { @@ -2383,7 +2383,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a specified layer. You must first stop and then delete all associated instances or\n unassign registered instances. For more information, see How to\n Delete a Layer.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Deletes a specified layer. You must first stop and then delete all associated instances or\n unassign registered instances. For more information, see How to\n Delete a Layer.

\n

\n Required Permissions: To use this action, an IAM user must have a \n Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#DeleteLayerRequest": { @@ -2453,7 +2453,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a user profile.

\n

\n Required Permissions: To use this action, an IAM user must have an attached policy\n that explicitly grants permissions. For more information about user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Deletes a user profile.

\n

\n Required Permissions: To use this action, an IAM user must have an attached \n policy\n that explicitly grants permissions. For more information about user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#DeleteUserProfileRequest": { @@ -2537,7 +2537,7 @@ "CustomJson": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A string that contains user-defined custom JSON. It can be used to override the corresponding default stack configuration attribute values for stack or to pass data to recipes. The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information on custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes.

" + "smithy.api#documentation": "

A string that contains user-defined custom JSON. It can be used to override the corresponding default stack configuration \n attribute values for stack or to pass data to recipes. The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information on custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes.

" } }, "InstanceIds": { @@ -2557,14 +2557,14 @@ "Name": { "target": "com.amazonaws.opsworks#DeploymentCommandName", "traits": { - "smithy.api#documentation": "

Specifies the operation. You can specify only one command.

\n

For stacks, the following commands are available:

\n
    \n
  • \n

    \n execute_recipes: Execute one or more recipes. To specify the recipes, set an\n Args parameter named recipes to the list of recipes to be\n executed. For example, to execute phpapp::appsetup, set Args to\n {\"recipes\":[\"phpapp::appsetup\"]}.

    \n
  • \n
  • \n

    \n install_dependencies: Install the stack's dependencies.

    \n
  • \n
  • \n

    \n update_custom_cookbooks: Update the stack's custom cookbooks.

    \n
  • \n
  • \n

    \n update_dependencies: Update the stack's dependencies.

    \n
  • \n
\n \n

The update_dependencies and install_dependencies commands are supported only for Linux instances. You can run the commands successfully on Windows instances, but they do nothing.

\n
\n

For apps, the following commands are available:

\n
    \n
  • \n

    \n deploy: Deploy an app. Ruby on Rails apps have an optional Args\n parameter named migrate. Set Args to {\"migrate\":[\"true\"]} to\n migrate the database. The default setting is {\"migrate\":[\"false\"]}.

    \n
  • \n
  • \n

    \n rollback Roll the app back to the previous version. When you update an app,\n AWS OpsWorks Stacks stores the previous version, up to a maximum of five versions. You can use this\n command to roll an app back as many as four versions.

    \n
  • \n
  • \n

    \n start: Start the app's web or application server.

    \n
  • \n
  • \n

    \n stop: Stop the app's web or application server.

    \n
  • \n
  • \n

    \n restart: Restart the app's web or application server.

    \n
  • \n
  • \n

    \n undeploy: Undeploy the app.

    \n
  • \n
", + "smithy.api#documentation": "

Specifies the operation. You can specify only one command.

\n

For stacks, the following commands are available:

\n
    \n
  • \n

    \n execute_recipes: Execute one or more recipes. To specify the recipes, set an\n Args parameter named recipes to the list of recipes to be\n executed. For example, to execute phpapp::appsetup, set Args to\n {\"recipes\":[\"phpapp::appsetup\"]}.

    \n
  • \n
  • \n

    \n install_dependencies: Install the stack's dependencies.

    \n
  • \n
  • \n

    \n update_custom_cookbooks: Update the stack's custom cookbooks.

    \n
  • \n
  • \n

    \n update_dependencies: Update the stack's dependencies.

    \n
  • \n
\n \n

The update_dependencies and install_dependencies commands are supported only for Linux instances. You can run the commands successfully on Windows instances, but they do nothing.

\n
\n

For apps, the following commands are available:

\n
    \n
  • \n

    \n deploy: Deploy an app. Ruby on Rails apps have an optional Args\n parameter named migrate. Set Args to {\"migrate\":[\"true\"]} to\n migrate the database. The default setting is {\"migrate\":[\"false\"]}.

    \n
  • \n
  • \n

    \n rollback Roll the app back to the previous version. When you update an app,\n OpsWorks Stacks stores the previous version, up to a maximum of five versions. You can use this\n command to roll an app back as many as four versions.

    \n
  • \n
  • \n

    \n start: Start the app's web or application server.

    \n
  • \n
  • \n

    \n stop: Stop the app's web or application server.

    \n
  • \n
  • \n

    \n restart: Restart the app's web or application server.

    \n
  • \n
  • \n

    \n undeploy: Undeploy the app.

    \n
  • \n
", "smithy.api#required": {} } }, "Args": { "target": "com.amazonaws.opsworks#DeploymentCommandArgs", "traits": { - "smithy.api#documentation": "

The arguments of those commands that take arguments. It should be set to a JSON object with the following format:

\n

\n {\"arg_name1\" : [\"value1\", \"value2\", ...], \"arg_name2\" : [\"value1\", \"value2\", ...],\n ...}\n

\n

The update_dependencies command takes two arguments:

\n
    \n
  • \n

    \n upgrade_os_to - Specifies the desired Amazon Linux version for instances\n whose OS you want to upgrade, such as Amazon Linux 2016.09. You must also set\n the allow_reboot argument to true.

    \n
  • \n
  • \n

    \n allow_reboot - Specifies whether to allow AWS OpsWorks Stacks to reboot the instances if\n necessary, after installing the updates. This argument can be set to either\n true or false. The default value is false.

    \n
  • \n
\n

For example, to upgrade an instance to Amazon Linux 2016.09, set Args to the\n following.

\n

\n { \"upgrade_os_to\":[\"Amazon Linux 2016.09\"], \"allow_reboot\":[\"true\"] } \n

" + "smithy.api#documentation": "

The arguments of those commands that take arguments. It should be set to a JSON object with the following format:

\n

\n {\"arg_name1\" : [\"value1\", \"value2\", ...], \"arg_name2\" : [\"value1\", \"value2\", ...],\n ...}\n

\n

The update_dependencies command takes two arguments:

\n
    \n
  • \n

    \n upgrade_os_to - Specifies the Amazon Linux version that you want instances\n to run, such as Amazon Linux 2. You must also set\n the allow_reboot argument to true.

    \n
  • \n
  • \n

    \n allow_reboot - Specifies whether to allow OpsWorks Stacks to reboot the instances if\n necessary, after installing the updates. This argument can be set to either\n true or false. The default value is false.

    \n
  • \n
\n

For example, to upgrade an instance to Amazon Linux 2018.03, set Args to the\n following.

\n

\n { \"upgrade_os_to\":[\"Amazon Linux 2018.03\"], \"allow_reboot\":[\"true\"] } \n

" } } }, @@ -2681,7 +2681,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deregisters a specified Amazon ECS cluster from a stack.\n For more information, see\n \n Resource Management.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack or an attached policy that explicitly grants permissions. For more\n information on user permissions, see\n https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html.

" + "smithy.api#documentation": "

Deregisters a specified Amazon ECS cluster from a stack.\n For more information, see\n \n Resource Management.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack or an attached policy that explicitly grants permissions. For more\n information on user permissions, see\n https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html.

" } }, "com.amazonaws.opsworks#DeregisterEcsClusterRequest": { @@ -2716,7 +2716,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deregisters a specified Elastic IP address. The address can then be registered by another\n stack. For more information, see Resource Management.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Deregisters a specified Elastic IP address. The address can be registered by another\n stack after it is deregistered. For more information, see Resource Management.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#DeregisterElasticIpRequest": { @@ -2751,7 +2751,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deregister a registered Amazon EC2 or on-premises instance. This action removes the \n instance from the stack and returns it to your control. This action cannot be used with \n instances that were created with AWS OpsWorks Stacks.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Deregister an instance from OpsWorks Stacks. The instance can be a registered instance (Amazon EC2 or on-premises) or an instance created with OpsWorks. \n This action removes the instance from the stack and returns it to your control.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#DeregisterInstanceRequest": { @@ -2830,7 +2830,7 @@ "VolumeId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The AWS OpsWorks Stacks volume ID, which is the GUID that AWS OpsWorks Stacks assigned to the instance when you registered the volume with the stack, not the Amazon EC2 volume ID.

", + "smithy.api#documentation": "

The OpsWorks Stacks volume ID, which is the GUID that OpsWorks Stacks assigned to the instance \n when you registered the volume with the stack, not the Amazon EC2 volume ID.

", "smithy.api#required": {} } } @@ -2856,7 +2856,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the available AWS OpsWorks Stacks agent versions. You must specify a stack ID or a\n configuration manager. DescribeAgentVersions returns a list of available\n agent versions for the specified stack or configuration manager.

" + "smithy.api#documentation": "

Describes the available OpsWorks Stacks agent versions. You must specify a stack ID or a\n configuration manager. DescribeAgentVersions returns a list of available\n agent versions for the specified stack or configuration manager.

" } }, "com.amazonaws.opsworks#DescribeAgentVersionsRequest": { @@ -2885,7 +2885,7 @@ "AgentVersions": { "target": "com.amazonaws.opsworks#AgentVersions", "traits": { - "smithy.api#documentation": "

The agent versions for the specified stack or configuration manager. Note that this value is the complete version number, not the abbreviated number used by the console.

" + "smithy.api#documentation": "

The agent versions for the specified stack or configuration manager. Note that this value is the complete version number, \n not the abbreviated number used by the console.

" } } }, @@ -2911,7 +2911,7 @@ } ], "traits": { - "smithy.api#documentation": "

Requests a description of a specified set of apps.

\n \n

This call accepts only one resource-identifying parameter.

\n
\n

\n Required Permissions: To use this action, an IAM user must have a Show, Deploy, or\n Manage permissions level for the stack, or an attached policy that explicitly grants\n permissions. For more information about user permissions, see Managing User\n Permissions.

", + "smithy.api#documentation": "

Requests a description of a specified set of apps.

\n \n

This call accepts only one resource-identifying parameter.

\n
\n

\n Required Permissions: To use this action, an IAM user must have a Show, Deploy, \n or\n Manage permissions level for the stack, or an attached policy that explicitly grants\n permissions. For more information about user permissions, see Managing User\n Permissions.

", "smithy.waiters#waitable": { "AppExists": { "acceptors": [ @@ -3135,7 +3135,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID,\n you can use the MaxResults and NextToken parameters to paginate the\n response. However, AWS OpsWorks Stacks currently supports only one cluster per layer, so the result\n set has a maximum of one element.

\n

\n Required Permissions: To use this action, an IAM user must have a Show, Deploy, or\n Manage permissions level for the stack or an attached policy that explicitly grants\n permission. For more information about user permissions, see Managing User\n Permissions.

\n

This call accepts only one resource-identifying parameter.

", + "smithy.api#documentation": "

Describes Amazon ECS clusters that are registered with a stack. If you specify only a stack ID,\n you can use the MaxResults and NextToken parameters to paginate the\n response. However, OpsWorks Stacks currently supports only one cluster per layer, so the result\n set has a maximum of one element.

\n

\n Required Permissions: To use this action, an IAM user must have a Show, Deploy, or\n Manage permissions level for the stack or an attached policy that explicitly grants\n permission. For more information about user permissions, see Managing User\n Permissions.

\n

This call accepts only one resource-identifying parameter.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3727,7 +3727,7 @@ "LayerId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A layer ID. If you use this parameter, DescribeInstances returns descriptions of\n the instances associated with the specified layer.

" + "smithy.api#documentation": "

A layer ID. If you use this parameter, DescribeInstances returns descriptions of\n the instances associated with the specified layer.

" } }, "InstanceIds": { @@ -3897,7 +3897,7 @@ "target": "com.amazonaws.opsworks#DescribeOperatingSystemsResponse" }, "traits": { - "smithy.api#documentation": "

Describes the operating systems that are supported by AWS OpsWorks Stacks.

" + "smithy.api#documentation": "

Describes the operating systems that are supported by OpsWorks Stacks.

" } }, "com.amazonaws.opsworks#DescribeOperatingSystemsResponse": { @@ -3941,7 +3941,7 @@ "IamUserArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The user's IAM ARN. This can also be a federated user's ARN. For more information about IAM ARNs, see Using\n Identifiers.

" + "smithy.api#documentation": "

The user's IAM ARN. This can also be a federated user's ARN. For more information about IAM \n ARNs, see Using\n Identifiers.

" } }, "StackId": { @@ -3961,7 +3961,7 @@ "Permissions": { "target": "com.amazonaws.opsworks#Permissions", "traits": { - "smithy.api#documentation": "

An array of Permission objects that describe the stack permissions.

\n
    \n
  • \n

    If the request object contains only a stack ID, the array contains a\n Permission object with permissions for each of the stack IAM ARNs.

    \n
  • \n
  • \n

    If the request object contains only an IAM ARN, the array contains a\n Permission object with permissions for each of the user's stack IDs.

    \n
  • \n
  • \n

    If the request contains a stack ID and an IAM ARN, the array contains a single\n Permission object with permissions for the specified stack and IAM ARN.

    \n
  • \n
" + "smithy.api#documentation": "

An array of Permission objects that describe the stack permissions.

\n
    \n
  • \n

    If the request object contains only a stack ID, the array contains a\n Permission object with permissions for each of the stack IAM ARNs.

    \n
  • \n
  • \n

    If the request object contains only an IAM ARN, the array contains a\n Permission object with permissions for each of the user's stack IDs.

    \n
  • \n
  • \n

    If the request contains a stack ID and an IAM ARN, the array contains a single\n Permission object with permissions for the specified stack and IAM ARN.

    \n
  • \n
" } } }, @@ -4057,7 +4057,7 @@ "StackId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ID of the stack with which the instances are registered. The operation returns descriptions of all registered Amazon RDS instances.

", + "smithy.api#documentation": "

The ID of the stack with which the instances are registered. The operation returns descriptions of all registered \n Amazon RDS instances.

", "smithy.api#required": {} } }, @@ -4104,7 +4104,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes AWS OpsWorks Stacks service errors.

\n

\n Required Permissions: To use this action, an IAM user must have a Show, Deploy, or\n Manage permissions level for the stack, or an attached policy that explicitly grants\n permissions. For more information about user permissions, see Managing User\n Permissions.

\n

This call accepts only one resource-identifying parameter.

" + "smithy.api#documentation": "

Describes OpsWorks Stacks service errors.

\n

\n Required Permissions: To use this action, an IAM user must have a Show, Deploy, or\n Manage permissions level for the stack, or an attached policy that explicitly grants\n permissions. For more information about user permissions, see Managing User\n Permissions.

\n

This call accepts only one resource-identifying parameter.

" } }, "com.amazonaws.opsworks#DescribeServiceErrorsRequest": { @@ -4189,7 +4189,7 @@ "AgentInstallerUrl": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The AWS OpsWorks Stacks agent installer's URL.

" + "smithy.api#documentation": "

The OpsWorks Stacks agent installer's URL.

" } }, "Parameters": { @@ -4280,7 +4280,7 @@ "StackIds": { "target": "com.amazonaws.opsworks#Strings", "traits": { - "smithy.api#documentation": "

An array of stack IDs that specify the stacks to be described. If you omit this parameter,\n DescribeStacks returns a description of every stack.

" + "smithy.api#documentation": "

An array of stack IDs that specify the stacks to be described. If you omit this parameter, and have permissions to get information \n about all stacks, DescribeStacks returns a description of every stack. If the IAM policy that is attached to an IAM \n user limits the DescribeStacks action to specific stack ARNs, this parameter is required, and the user must specify a stack ARN that is allowed by the policy. \n Otherwise, DescribeStacks returns an AccessDenied error.

" } } }, @@ -4483,7 +4483,7 @@ } ], "traits": { - "smithy.api#documentation": "

Detaches a specified Elastic Load Balancing instance from its layer.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Detaches a specified Elastic Load Balancing instance from its layer.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage \n permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#DetachElasticLoadBalancerRequest": { @@ -4570,7 +4570,7 @@ "VolumeType": { "target": "com.amazonaws.opsworks#VolumeType", "traits": { - "smithy.api#documentation": "

The volume type. gp2 for General Purpose (SSD) volumes, io1 for\n Provisioned IOPS (SSD) volumes, st1 for Throughput Optimized hard disk drives (HDD), sc1 for Cold HDD,and standard for Magnetic volumes.

\n

If you specify the io1 volume type, you must also specify a value for the Iops attribute. \n The maximum ratio of provisioned IOPS to requested volume size (in GiB) is 50:1. AWS uses the default volume size (in GiB) \n specified in the AMI attributes to set IOPS to 50 x (volume size).

" + "smithy.api#documentation": "

The volume type. gp2 for General Purpose (SSD) volumes, io1 for\n Provisioned IOPS (SSD) volumes, st1 for Throughput Optimized hard disk drives (HDD), sc1 for Cold HDD,and standard for Magnetic volumes.

\n

If you specify the io1 volume type, you must also specify a value for the Iops attribute. \n The maximum ratio of provisioned IOPS to requested volume size (in GiB) is 50:1. Amazon Web Services uses the default volume size (in GiB) \n specified in the AMI attributes to set IOPS to 50 x (volume size).

" } }, "DeleteOnTermination": { @@ -4634,7 +4634,7 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The name.

" + "smithy.api#documentation": "

The name, which can be a maximum of 32 characters.

" } }, "Domain": { @@ -4646,7 +4646,7 @@ "Region": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The AWS region. For more information, see Regions and Endpoints.

" + "smithy.api#documentation": "

The Amazon Web Services Region. For more information, see Regions and Endpoints.

" } }, "InstanceId": { @@ -4672,13 +4672,13 @@ "ElasticLoadBalancerName": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The Elastic Load Balancing instance's name.

" + "smithy.api#documentation": "

The Elastic Load Balancing instance name.

" } }, "Region": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance's AWS region.

" + "smithy.api#documentation": "

The instance's Amazon Web Services Region.

" } }, "DnsName": { @@ -4690,13 +4690,13 @@ "StackId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ID of the stack that the instance is associated with.

" + "smithy.api#documentation": "

The ID of the stack with which the instance is associated.

" } }, "LayerId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ID of the layer that the instance is attached to.

" + "smithy.api#documentation": "

The ID of the layer to which the instance is attached.

" } }, "VpcId": { @@ -4720,7 +4720,7 @@ "Ec2InstanceIds": { "target": "com.amazonaws.opsworks#Strings", "traits": { - "smithy.api#documentation": "

A list of the EC2 instances that the Elastic Load Balancing instance is managing traffic for.

" + "smithy.api#documentation": "

A list of the EC2 instances for which the Elastic Load Balancing instance is managing traffic.

" } } }, @@ -4740,21 +4740,21 @@ "Key": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

(Required) The environment variable's name, which can consist of up to 64 characters and must be specified. The name can contain upper- and lowercase letters, numbers, and underscores (_), but it must start with a letter or underscore.

", + "smithy.api#documentation": "

(Required) The environment variable's name, which can consist of up to 64 characters and must be specified. \n The name can contain upper- and lowercase letters, numbers, and underscores (_), but it must start with a letter or underscore.

", "smithy.api#required": {} } }, "Value": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

(Optional) The environment variable's value, which can be left empty. If you specify a value, it can contain up to 256 characters, which must all be printable.

", + "smithy.api#documentation": "

(Optional) The environment variable's value, which can be left empty. If you specify a value, \n it can contain up to 256 characters, which must all be printable.

", "smithy.api#required": {} } }, "Secure": { "target": "com.amazonaws.opsworks#Boolean", "traits": { - "smithy.api#documentation": "

(Optional) Whether the variable's value will be returned by the DescribeApps action.\n To conceal an environment variable's value, set Secure to true.\n DescribeApps then returns *****FILTERED***** instead of the actual\n value. The default value for Secure is false.

" + "smithy.api#documentation": "

(Optional) Whether the variable's value is returned by the DescribeApps action.\n To hide an environment variable's value, set Secure to true.\n DescribeApps returns *****FILTERED***** instead of the actual\n value. The default value for Secure is false.

" } } }, @@ -4850,14 +4850,14 @@ "InstanceId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance's AWS OpsWorks Stacks ID.

", + "smithy.api#documentation": "

The instance's OpsWorks Stacks ID.

", "smithy.api#required": {} } }, "ValidForInMinutes": { "target": "com.amazonaws.opsworks#ValidForInMinutes", "traits": { - "smithy.api#documentation": "

The length of time (in minutes) that the grant is valid. When the grant expires at the end of this period, the user will no longer be able to use the credentials to log in. If the user is logged in at the time, he or she automatically will be logged out.

" + "smithy.api#documentation": "

The length of time (in minutes) that the grant is valid. When the grant expires at the end of this period, \n the user will no longer be able to use the credentials to log in. If the user is logged in at the time, they are \n logged out.

" } } }, @@ -4961,13 +4961,13 @@ "ElasticIp": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance Elastic IP address .

" + "smithy.api#documentation": "

The instance Elastic IP address.

" } }, "Hostname": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance host name.

" + "smithy.api#documentation": "

The instance host name. The following are character limits for instance host names.

\n
    \n
  • \n

    Linux-based instances: 63 characters

    \n
  • \n
  • \n

    Windows-based instances: 15 characters

    \n
  • \n
" } }, "InfrastructureClass": { @@ -4979,7 +4979,7 @@ "InstallUpdatesOnBoot": { "target": "com.amazonaws.opsworks#Boolean", "traits": { - "smithy.api#documentation": "

Whether to install operating system and package updates when the instance boots. The default\n value is true. If this value is set to false, you must then update\n your instances manually by using CreateDeployment to run the\n update_dependencies stack command or\n by manually running yum (Amazon\n Linux) or apt-get (Ubuntu) on the instances.

\n \n

We strongly recommend using the default value of true, to ensure that your\n instances have the latest security updates.

\n
" + "smithy.api#documentation": "

Whether to install operating system and package updates when the instance boots. The default\n value is true. If this value is set to false, you must update\n instances manually by using CreateDeployment to run the\n update_dependencies stack command or\n by manually running yum (Amazon\n Linux) or apt-get (Ubuntu) on the instances.

\n \n

We strongly recommend using the default value of true to ensure that your\n instances have the latest security updates.

\n
" } }, "InstanceId": { @@ -4991,7 +4991,7 @@ "InstanceProfileArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ARN of the instance's IAM profile. For more information about IAM ARNs, see Using\n Identifiers.

" + "smithy.api#documentation": "

The ARN of the instance's IAM profile. For more information about IAM ARNs, \n see Using\n Identifiers.

" } }, "InstanceType": { @@ -5057,7 +5057,7 @@ "ReportedAgentVersion": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance's reported AWS OpsWorks Stacks agent version.

" + "smithy.api#documentation": "

The instance's reported OpsWorks Stacks agent version.

" } }, "ReportedOs": { @@ -5324,7 +5324,7 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The layer name.

" + "smithy.api#documentation": "

The layer name. Layer names can be a maximum of 32 characters.

" } }, "Shortname": { @@ -5336,7 +5336,7 @@ "Attributes": { "target": "com.amazonaws.opsworks#LayerAttributes", "traits": { - "smithy.api#documentation": "

The layer attributes.

\n

For the HaproxyStatsPassword, MysqlRootPassword, and\n GangliaPassword attributes, AWS OpsWorks Stacks returns *****FILTERED*****\n instead of the actual value

\n

For an ECS Cluster layer, AWS OpsWorks Stacks the EcsClusterArn attribute is set to the cluster's ARN.

" + "smithy.api#documentation": "

The layer attributes.

\n

For the HaproxyStatsPassword, MysqlRootPassword, and\n GangliaPassword attributes, OpsWorks Stacks returns *****FILTERED*****\n instead of the actual value

\n

For an ECS Cluster layer, OpsWorks Stacks the EcsClusterArn attribute is set to the cluster's ARN.

" } }, "CloudWatchLogsConfiguration": { @@ -5348,7 +5348,7 @@ "CustomInstanceProfileArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ARN of the default IAM profile to be used for the layer's EC2 instances. For more\n information about IAM ARNs, see Using\n Identifiers.

" + "smithy.api#documentation": "

The ARN of the default IAM profile to be used for the layer's EC2 instances. For more\n information about IAM ARNs, see Using\n Identifiers.

" } }, "CustomJson": { @@ -5402,7 +5402,7 @@ "DefaultRecipes": { "target": "com.amazonaws.opsworks#Recipes", "traits": { - "smithy.api#documentation": "

AWS OpsWorks Stacks supports five lifecycle events: setup, configuration, \n deploy, undeploy, and shutdown.\n For each layer, AWS OpsWorks Stacks runs a set of standard recipes for each event. You can also provide \n custom recipes for any or all layers and events. AWS OpsWorks Stacks runs custom event recipes after the standard \n recipes. LayerCustomRecipes specifies the custom recipes for a particular layer to be run in response to each of \n the five events.

\n

To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe \n name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the \n dbsetup.rb recipe in the repository's phpapp2 folder.

" + "smithy.api#documentation": "

OpsWorks Stacks supports five lifecycle events: setup, configuration, \n deploy, undeploy, and shutdown.\n For each layer, OpsWorks Stacks runs a set of standard recipes for each event. You can also provide \n custom recipes for any or all layers and events. OpsWorks Stacks runs custom event recipes after the standard \n recipes. LayerCustomRecipes specifies the custom recipes for a particular layer to be run in response to each of \n the five events.

\n

To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe \n name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the \n dbsetup.rb recipe in the repository's phpapp2 folder.

" } }, "CustomRecipes": { @@ -5735,13 +5735,13 @@ "target": "com.amazonaws.opsworks#MaxResults", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

Do not use. A validation exception occurs if you add a MaxResults parameter to a ListTagsRequest call.\n

" + "smithy.api#documentation": "

Do not use. A validation exception occurs if you add a MaxResults parameter to a ListTagsRequest \n call.\n

" } }, "NextToken": { "target": "com.amazonaws.opsworks#NextToken", "traits": { - "smithy.api#documentation": "

Do not use. A validation exception occurs if you add a NextToken parameter to a ListTagsRequest call.\n

" + "smithy.api#documentation": "

Do not use. A validation exception occurs if you add a NextToken parameter to a ListTagsRequest \n call.\n

" } } }, @@ -5788,13 +5788,13 @@ "UpScaling": { "target": "com.amazonaws.opsworks#AutoScalingThresholds", "traits": { - "smithy.api#documentation": "

An AutoScalingThresholds object that describes the upscaling configuration,\n which defines how and when AWS OpsWorks Stacks increases the number of instances.

" + "smithy.api#documentation": "

An AutoScalingThresholds object that describes the upscaling configuration,\n which defines how and when OpsWorks Stacks increases the number of instances.

" } }, "DownScaling": { "target": "com.amazonaws.opsworks#AutoScalingThresholds", "traits": { - "smithy.api#documentation": "

An AutoScalingThresholds object that describes the downscaling configuration,\n which defines how and when AWS OpsWorks Stacks reduces the number of instances.

" + "smithy.api#documentation": "

An AutoScalingThresholds object that describes the downscaling configuration,\n which defines how and when OpsWorks Stacks reduces the number of instances.

" } } }, @@ -5832,13 +5832,13 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The name of the operating system, such as Amazon Linux 2018.03.

" + "smithy.api#documentation": "

The name of the operating system, such as Amazon Linux 2.

" } }, "Id": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ID of a supported operating system, such as Amazon Linux 2018.03.

" + "smithy.api#documentation": "

The ID of a supported operating system, such as Amazon Linux 2.

" } }, "Type": { @@ -5850,7 +5850,7 @@ "ConfigurationManagers": { "target": "com.amazonaws.opsworks#OperatingSystemConfigurationManagers", "traits": { - "smithy.api#documentation": "

Supported configuration manager name and versions for an AWS OpsWorks Stacks operating system.

" + "smithy.api#documentation": "

Supported configuration manager name and versions for an OpsWorks Stacks operating system.

" } }, "ReportedName": { @@ -5873,7 +5873,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes supported operating systems in AWS OpsWorks Stacks.

" + "smithy.api#documentation": "

Describes supported operating systems in OpsWorks Stacks.

" } }, "com.amazonaws.opsworks#OperatingSystemConfigurationManager": { @@ -5893,7 +5893,7 @@ } }, "traits": { - "smithy.api#documentation": "

A block that contains information about the configuration manager (Chef) and the versions of the configuration manager that are supported for an operating system.

" + "smithy.api#documentation": "

A block that contains information about the configuration manager (Chef) and the versions of the \n configuration manager that are supported for an operating system.

" } }, "com.amazonaws.opsworks#OperatingSystemConfigurationManagers": { @@ -6147,7 +6147,7 @@ "name": "opsworks" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "AWS OpsWorks\n

Welcome to the AWS OpsWorks Stacks API Reference. This guide provides descriptions, syntax, and\n usage examples for AWS OpsWorks Stacks actions and data types, including common parameters and error\n codes.

\n

AWS OpsWorks Stacks is an application management service that provides an integrated experience for\n overseeing the complete application lifecycle. For information about this product, go to the\n AWS OpsWorks details page.

\n\n

\n SDKs and CLI\n

\n

The most common way to use the AWS OpsWorks Stacks API is by using the AWS Command Line Interface (CLI) or by using one of the AWS SDKs to implement applications in your preferred language. For more information, see:

\n \n\n

\n Endpoints\n

\n

AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. Stacks \n can only be accessed or managed within the endpoint in which they are created.

\n
    \n
  • \n

    opsworks.us-east-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.us-east-2.amazonaws.com

    \n
  • \n
  • \n

    opsworks.us-west-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.us-west-2.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ca-central-1.amazonaws.com (API only; not available in the AWS console)

    \n
  • \n
  • \n

    opsworks.eu-west-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.eu-west-2.amazonaws.com

    \n
  • \n
  • \n

    opsworks.eu-west-3.amazonaws.com

    \n
  • \n
  • \n

    opsworks.eu-central-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ap-northeast-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ap-northeast-2.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ap-south-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ap-southeast-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ap-southeast-2.amazonaws.com

    \n
  • \n
  • \n

    opsworks.sa-east-1.amazonaws.com

    \n
  • \n
\n

\n Chef Versions\n

\n

When you call CreateStack, CloneStack, or UpdateStack we recommend you\n use the ConfigurationManager parameter to specify the Chef version.\n The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information,\n see Chef Versions.

\n \n

You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks to Chef 12 as soon as possible.

\n
", + "smithy.api#documentation": "OpsWorks\n

Welcome to the OpsWorks Stacks API Reference. This guide provides descriptions, syntax, \n and\n usage examples for OpsWorks Stacks actions and data types, including common parameters and error\n codes.

\n

OpsWorks Stacks is an application management service that provides an integrated experience for\n managing the complete application lifecycle. For information about OpsWorks, see the\n OpsWorks information page.

\n

\n SDKs and CLI\n

\n

Use the OpsWorks Stacks API by using the Command Line Interface (CLI) or by using one of the \n Amazon Web Services SDKs to implement applications in your preferred language. For more information, see:

\n \n

\n Endpoints\n

\n

OpsWorks Stacks supports the following endpoints, all HTTPS. You must connect to one of the following endpoints. \n Stacks can only be accessed or managed within the endpoint in which they are created.

\n
    \n
  • \n

    opsworks.us-east-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.us-east-2.amazonaws.com

    \n
  • \n
  • \n

    opsworks.us-west-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.us-west-2.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ca-central-1.amazonaws.com (API only; not available in the Amazon Web Services Management Console)

    \n
  • \n
  • \n

    opsworks.eu-west-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.eu-west-2.amazonaws.com

    \n
  • \n
  • \n

    opsworks.eu-west-3.amazonaws.com

    \n
  • \n
  • \n

    opsworks.eu-central-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ap-northeast-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ap-northeast-2.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ap-south-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ap-southeast-1.amazonaws.com

    \n
  • \n
  • \n

    opsworks.ap-southeast-2.amazonaws.com

    \n
  • \n
  • \n

    opsworks.sa-east-1.amazonaws.com

    \n
  • \n
\n

\n Chef Versions\n

\n

When you call CreateStack, CloneStack, or UpdateStack we recommend you\n use the ConfigurationManager parameter to specify the Chef version.\n The recommended and default value for Linux stacks is currently 12. Windows stacks use Chef 12.2. For more information,\n see Chef Versions.

\n \n

You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend migrating your existing Linux stacks \n to Chef 12 as soon as possible.

\n
", "smithy.api#title": "AWS OpsWorks", "smithy.api#xmlNamespace": { "uri": "http://opsworks.amazonaws.com/doc/2013-02-18/" @@ -6194,7 +6194,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -6237,7 +6236,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -6250,7 +6250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -6264,7 +6263,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -6287,7 +6285,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -6322,7 +6319,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -6333,14 +6329,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -6354,14 +6352,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -6370,11 +6366,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -6385,14 +6381,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -6406,7 +6404,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -6426,7 +6423,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -6437,14 +6433,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -6455,9 +6453,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -6985,7 +6985,7 @@ "IamUserArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for an AWS Identity and Access Management (IAM) role. For more\n information about IAM ARNs, see Using\n Identifiers.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for an Identity and Access Management (IAM) role. For more\n information about IAM ARNs, see Using\n Identifiers.

" } }, "AllowSsh": { @@ -7121,7 +7121,7 @@ "DbInstanceIdentifier": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The DB instance identifier.

" + "smithy.api#documentation": "

The database instance identifier.

" } }, "DbUser": { @@ -7133,13 +7133,13 @@ "DbPassword": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" + "smithy.api#documentation": "

OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" } }, "Region": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance's AWS region.

" + "smithy.api#documentation": "

The instance's Amazon Web Services Region.

" } }, "Address": { @@ -7163,7 +7163,7 @@ "MissingOnRds": { "target": "com.amazonaws.opsworks#Boolean", "traits": { - "smithy.api#documentation": "

Set to true if AWS OpsWorks Stacks is unable to discover the Amazon RDS instance. AWS OpsWorks Stacks attempts\n to discover the instance only once. If this value is set to true, you must\n deregister the instance, and then register it again.

" + "smithy.api#documentation": "

Set to true if OpsWorks Stacks is unable to discover the Amazon RDS instance. \n OpsWorks Stacks attempts\n to discover the instance only once. If this value is set to true, you must\n deregister the instance, and then register it again.

" } } }, @@ -7247,7 +7247,7 @@ } }, "traits": { - "smithy.api#documentation": "

AWS OpsWorks Stacks supports five\n lifecycle events:\n setup, configuration, deploy, undeploy, and shutdown. For\n each layer, AWS OpsWorks Stacks runs a set of standard recipes for each event. In addition, you can provide\n custom recipes for any or all layers and events. AWS OpsWorks Stacks runs custom event recipes after the\n standard recipes. LayerCustomRecipes specifies the custom recipes for a\n particular layer to be run in response to each of the five events.

\n\n

To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe name, which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb recipe in the repository's phpapp2 folder.

" + "smithy.api#documentation": "

OpsWorks Stacks supports five\n lifecycle events:\n setup, configuration, deploy, undeploy, and shutdown. For\n each layer, OpsWorks Stacks runs a set of standard recipes for each event. In addition, you can provide\n custom recipes for any or all layers and events. OpsWorks Stacks runs custom event recipes after the\n standard recipes. LayerCustomRecipes specifies the custom recipes for a\n particular layer to be run in response to each of the five events.

\n

To specify a recipe, use the cookbook's directory name in the repository followed by two colons and the recipe name, \n which is the recipe's file name without the .rb extension. For example: phpapp2::dbsetup specifies the dbsetup.rb recipe in \n the repository's phpapp2 folder.

" } }, "com.amazonaws.opsworks#RegisterEcsCluster": { @@ -7381,7 +7381,7 @@ } ], "traits": { - "smithy.api#documentation": "

Registers instances that were created outside of AWS OpsWorks Stacks with a specified stack.

\n \n

We do not recommend using this action to register instances. The complete registration\n operation includes two tasks: installing the AWS OpsWorks Stacks agent on the instance, and registering\n the instance with the stack. RegisterInstance handles only the second step. You\n should instead use the AWS CLI register command, which performs the entire\n registration operation. For more information,\n see \n Registering an Instance with an AWS OpsWorks Stacks Stack.

\n
\n

Registered instances have the same requirements as instances that are created by using the CreateInstance API. \n For example, registered instances must be running a supported Linux-based operating system, and they must have a supported instance \n type. For more information about requirements for instances that you want to register, see \n Preparing the Instance.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Registers instances that were created outside of OpsWorks Stacks with a specified stack.

\n \n

We do not recommend using this action to register instances. The complete registration\n operation includes two tasks: installing the OpsWorks Stacks agent on the instance, and registering\n the instance with the stack. RegisterInstance handles only the second step. You\n should instead use the CLI register command, which performs the entire\n registration operation. For more information,\n see \n Registering an Instance with an OpsWorks Stacks Stack.

\n
\n

Registered instances have the same requirements as instances that are created by using the CreateInstance \n API. \n For example, registered instances must be running a supported Linux-based operating system, and they must have a supported instance \n type. For more information about requirements for instances that you want to register, see \n Preparing the Instance.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#RegisterInstanceRequest": { @@ -7397,7 +7397,7 @@ "Hostname": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance's hostname.

" + "smithy.api#documentation": "

The instance's host name. The following are character limits for instance host names.

\n
    \n
  • \n

    Linux-based instances: 63 characters

    \n
  • \n
  • \n

    Windows-based instances: 15 characters

    \n
  • \n
" } }, "PublicIp": { @@ -7441,7 +7441,7 @@ "InstanceId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The registered instance's AWS OpsWorks Stacks ID.

" + "smithy.api#documentation": "

The registered instance's OpsWorks Stacks ID.

" } } }, @@ -7696,7 +7696,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes an AWS OpsWorks Stacks service error.

" + "smithy.api#documentation": "

Describes an OpsWorks Stacks service error.

" } }, "com.amazonaws.opsworks#ServiceErrors": { @@ -7722,7 +7722,7 @@ } ], "traits": { - "smithy.api#documentation": "

Specify the load-based auto scaling configuration for a specified layer. For more\n information, see Managing\n Load with Time-based and Load-based Instances.

\n \n

To use load-based auto scaling, you must create a set of load-based auto scaling instances. Load-based auto scaling operates only on the instances from that set, so you must ensure that you have created enough instances to handle the maximum anticipated load.

\n
\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Specify the load-based auto scaling configuration for a specified layer. For more\n information, see Managing\n Load with Time-based and Load-based Instances.

\n \n

To use load-based auto scaling, you must create a set of load-based auto scaling instances. \n Load-based auto scaling operates only on the instances from that set, so you must ensure that you have created enough \n instances to handle the maximum anticipated load.

\n
\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#SetLoadBasedAutoScalingRequest": { @@ -7744,13 +7744,13 @@ "UpScaling": { "target": "com.amazonaws.opsworks#AutoScalingThresholds", "traits": { - "smithy.api#documentation": "

An AutoScalingThresholds object with the upscaling threshold configuration. If\n the load exceeds these thresholds for a specified amount of time, AWS OpsWorks Stacks starts a specified\n number of instances.

" + "smithy.api#documentation": "

An AutoScalingThresholds object with the upscaling threshold configuration. If\n the load exceeds these thresholds for a specified amount of time, OpsWorks Stacks starts a specified\n number of instances.

" } }, "DownScaling": { "target": "com.amazonaws.opsworks#AutoScalingThresholds", "traits": { - "smithy.api#documentation": "

An AutoScalingThresholds object with the downscaling threshold configuration. If\n the load falls below these thresholds for a specified amount of time, AWS OpsWorks Stacks stops a specified\n number of instances.

" + "smithy.api#documentation": "

An AutoScalingThresholds object with the downscaling threshold configuration. If\n the load falls below these thresholds for a specified amount of time, OpsWorks Stacks stops a specified\n number of instances.

" } } }, @@ -7775,7 +7775,7 @@ } ], "traits": { - "smithy.api#documentation": "

Specifies a user's permissions. For more information, see Security and\n Permissions.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Specifies a user's permissions. For more information, see \n Security and\n Permissions.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#SetPermissionRequest": { @@ -7865,7 +7865,7 @@ "ExecutionTimeout": { "target": "com.amazonaws.opsworks#Integer", "traits": { - "smithy.api#documentation": "

The time, in seconds, that AWS OpsWorks Stacks will wait after triggering a Shutdown event before shutting down an instance.

" + "smithy.api#documentation": "

The time, in seconds, that OpsWorks Stacks waits after triggering a Shutdown event before \n shutting down an instance.

" } }, "DelayUntilElbConnectionsDrained": { @@ -7891,7 +7891,7 @@ "Url": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The source URL. The following is an example of an Amazon S3 source URL: https://s3.amazonaws.com/opsworks-demo-bucket/opsworks_cookbook_demo.tar.gz.

" + "smithy.api#documentation": "

The source URL. The following is an example of an Amazon S3 source \n URL: https://s3.amazonaws.com/opsworks-demo-bucket/opsworks_cookbook_demo.tar.gz.

" } }, "Username": { @@ -7903,19 +7903,19 @@ "Password": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

When included in a request, the parameter depends on the repository type.

\n
    \n
  • \n

    For Amazon S3 bundles, set Password to the appropriate IAM secret access\n key.

    \n
  • \n
  • \n

    For HTTP bundles and Subversion repositories, set Password to the\n password.

    \n
  • \n
\n

For more information on how to safely handle IAM credentials, see https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html.

\n

In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" + "smithy.api#documentation": "

When included in a request, the parameter depends on the repository type.

\n
    \n
  • \n

    For Amazon S3 bundles, set Password to the appropriate IAM secret access\n key.

    \n
  • \n
  • \n

    For HTTP bundles and Subversion repositories, set Password to the\n password.

    \n
  • \n
\n

For more information on how to safely handle IAM credentials, see https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html.

\n

In responses, OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" } }, "SshKey": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

In requests, the repository's SSH key.

\n

In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" + "smithy.api#documentation": "

In requests, the repository's SSH key.

\n

In responses, OpsWorks Stacks returns *****FILTERED***** instead of the actual value.

" } }, "Revision": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The application's version. AWS OpsWorks Stacks enables you to easily deploy new versions of an application. One of the simplest approaches is to have branches or revisions in your repository that represent different versions that can potentially be deployed.

" + "smithy.api#documentation": "

The application's version. OpsWorks Stacks enables you to easily deploy new versions of an application. \n One of the simplest approaches is to have branches or revisions in your repository that represent different \n versions that can potentially be deployed.

" } } }, @@ -7992,7 +7992,7 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack name.

" + "smithy.api#documentation": "

The stack name. Stack names can be a maximum of 64 characters.

" } }, "Arn": { @@ -8004,7 +8004,7 @@ "Region": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack AWS region, such as \"ap-northeast-2\". For more information about AWS regions, see Regions and Endpoints.

" + "smithy.api#documentation": "

The stack Amazon Web Services Region, such as ap-northeast-2. For more information about \n Amazon Web Services Regions, see Regions and Endpoints.

" } }, "VpcId": { @@ -8022,13 +8022,13 @@ "ServiceRoleArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack AWS Identity and Access Management (IAM) role.

" + "smithy.api#documentation": "

The stack Identity and Access Management (IAM) role.

" } }, "DefaultInstanceProfileArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances.\n For more information about IAM ARNs, see Using\n Identifiers.

" + "smithy.api#documentation": "

The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances.\n For more information about IAM ARNs, see Using\n Identifiers.

" } }, "DefaultOs": { @@ -8058,7 +8058,7 @@ "CustomJson": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A JSON object that contains user-defined attributes to be added to the stack configuration and deployment attributes. You can use custom JSON to override the corresponding default stack configuration attribute values or to pass data to recipes. The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information on custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes.

" + "smithy.api#documentation": "

A JSON object that contains user-defined attributes to be added to the stack configuration and deployment attributes. \n You can use custom JSON to override the corresponding default stack configuration attribute values or to pass data to recipes. \n The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information on custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes.

" } }, "ConfigurationManager": { @@ -8082,7 +8082,7 @@ "UseOpsworksSecurityGroups": { "target": "com.amazonaws.opsworks#Boolean", "traits": { - "smithy.api#documentation": "

Whether the stack automatically associates the AWS OpsWorks Stacks built-in security groups with the stack's layers.

" + "smithy.api#documentation": "

Whether the stack automatically associates the OpsWorks Stacks built-in security groups with the stack's layers.

" } }, "CustomCookbooksSource": { @@ -8094,7 +8094,7 @@ "DefaultSshKeyName": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A default Amazon EC2 key pair for the stack's instances. You can override this value when you create or update an instance.

" + "smithy.api#documentation": "

A default Amazon EC2 key pair for the stack's instances. You can override this value when you create or \n update an instance.

" } }, "CreatedAt": { @@ -8106,7 +8106,7 @@ "DefaultRootDeviceType": { "target": "com.amazonaws.opsworks#RootDeviceType", "traits": { - "smithy.api#documentation": "

The default root device type. This value is used by default for all instances in the stack,\n but you can override it when you create an instance. For more information, see Storage for the Root Device.

" + "smithy.api#documentation": "

The default root device type. This value is used by default for all instances in the stack,\n but you can override it when you create an instance. For more information, see Storage for the Root Device.\n

" } }, "AgentVersion": { @@ -8146,13 +8146,13 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The name. This parameter must be set to \"Chef\".

" + "smithy.api#documentation": "

The name. This parameter must be set to Chef.

" } }, "Version": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux stacks, and to 12.2 for Windows stacks. The default value for Linux stacks is 11.4.

" + "smithy.api#documentation": "

The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux stacks, and to 12.2 for Windows stacks. \n The default value for Linux stacks is 12.

" } } }, @@ -8297,7 +8297,7 @@ } ], "traits": { - "smithy.api#documentation": "

Stops a specified instance. When you stop a standard instance, the data disappears and must\n be reinstalled when you restart the instance. You can stop an Amazon EBS-backed instance without\n losing data. For more information, see Starting,\n Stopping, and Rebooting Instances.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Stops a specified instance. When you stop a standard instance, the data disappears and must\n be reinstalled when you restart the instance. You can stop an Amazon EBS-backed instance without\n losing data. For more information, see Starting,\n Stopping, and Rebooting Instances.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#StopInstanceRequest": { @@ -8313,7 +8313,7 @@ "Force": { "target": "com.amazonaws.opsworks#Boolean", "traits": { - "smithy.api#documentation": "

Specifies whether to force an instance to stop. If the instance's root device type is ebs, or EBS-backed, \n adding the Force parameter to the StopInstances API call disassociates the AWS OpsWorks Stacks instance from EC2, and forces deletion of only the OpsWorks Stacks instance. \n You must also delete the formerly-associated instance in EC2 after troubleshooting and replacing the AWS OpsWorks Stacks instance with a new one.

" + "smithy.api#documentation": "

Specifies whether to force an instance to stop. If the instance's root device type is ebs, or EBS-backed, \n adding the Force parameter to the StopInstances API call disassociates the OpsWorks Stacks \n instance from EC2, and forces deletion of only the OpsWorks Stacks instance. \n You must also delete the formerly-associated instance in EC2 after troubleshooting and replacing the OpsWorks Stacks \n instance with a new one.

" } } }, @@ -8394,7 +8394,7 @@ } ], "traits": { - "smithy.api#documentation": "

Apply cost-allocation tags to a specified stack or layer in AWS OpsWorks Stacks. For more information about how tagging works, see Tags in the AWS OpsWorks User Guide.

" + "smithy.api#documentation": "

Apply cost-allocation tags to a specified stack or layer in OpsWorks Stacks. For more information about how \n tagging works, see Tags in the OpsWorks User Guide.

" } }, "com.amazonaws.opsworks#TagResourceRequest": { @@ -8410,7 +8410,7 @@ "Tags": { "target": "com.amazonaws.opsworks#Tags", "traits": { - "smithy.api#documentation": "

A map that contains tag keys and tag values that are attached to a stack or layer.

\n
    \n
  • \n

    The key cannot be empty.

    \n
  • \n
  • \n

    The key can be a maximum of 127 characters, and can contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : /\n

    \n
  • \n
  • \n

    The value can be a maximum 255 characters, and contain only Unicode letters, numbers, or separators, or the following special characters: + - = . _ : /\n

    \n
  • \n
  • \n

    Leading and trailing white spaces are trimmed from both the key and value.

    \n
  • \n
  • \n

    A maximum of 40 tags is allowed for any resource.

    \n
  • \n
", + "smithy.api#documentation": "

A map that contains tag keys and tag values that are attached to a stack or layer.

\n
    \n
  • \n

    The key cannot be empty.

    \n
  • \n
  • \n

    The key can be a maximum of 127 characters, and can contain only Unicode letters, numbers, or separators, \n or the following special characters: + - = . _ : /\n

    \n
  • \n
  • \n

    The value can be a maximum 255 characters, and contain only Unicode letters, numbers, or separators, \n or the following special characters: + - = . _ : /\n

    \n
  • \n
  • \n

    Leading and trailing white spaces are trimmed from both the key and value.

    \n
  • \n
  • \n

    A maximum of 40 tags is allowed for any resource.

    \n
  • \n
", "smithy.api#required": {} } } @@ -8449,13 +8449,13 @@ "ValidForInMinutes": { "target": "com.amazonaws.opsworks#Integer", "traits": { - "smithy.api#documentation": "

The length of time (in minutes) that the grant is valid. When the grant expires, at the end of this period, the user will no longer be able to use the credentials to log in. If they are logged in at the time, they will be automatically logged out.

" + "smithy.api#documentation": "

The length of time (in minutes) that the grant is valid. When the grant expires, at the end of this period, \n the user will no longer be able to use the credentials to log in. If they are logged in at the time, they are \n automatically logged out.

" } }, "InstanceId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance's AWS OpsWorks Stacks ID.

" + "smithy.api#documentation": "

The instance's OpsWorks Stacks ID.

" } } }, @@ -8506,7 +8506,7 @@ } ], "traits": { - "smithy.api#documentation": "

Unassigns a registered instance from all layers that are using the instance. \n The instance remains in the stack as an unassigned instance, and can be assigned to \n another layer as needed. You cannot use this action with instances that were created \n with AWS OpsWorks Stacks.

\n

\n Required Permissions: To use this action, an IAM user must \n have a Manage permissions level for the stack or an attached policy that explicitly \n grants permissions. For more information about user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Unassigns a registered instance from all layers that are using the instance. \n The instance remains in the stack as an unassigned instance, and can be assigned to \n another layer as needed. You cannot use this action with instances that were created \n with OpsWorks Stacks.

\n

\n Required Permissions: To use this action, an IAM user must \n have a Manage permissions level for the stack or an attached policy that explicitly \n grants permissions. For more information about user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#UnassignInstanceRequest": { @@ -8688,7 +8688,7 @@ "Environment": { "target": "com.amazonaws.opsworks#EnvironmentVariables", "traits": { - "smithy.api#documentation": "

An array of EnvironmentVariable objects that specify environment variables to be\n associated with the app. After you deploy the app, these variables are defined on the\n associated app server instances.For more information, see Environment Variables.

\n

There is no specific limit on the number of environment variables. However, the size of the associated data structure - which includes the variables' names, values, and protected flag values - cannot exceed 20 KB. This limit should accommodate most if not all use cases. Exceeding it will cause an exception with the message, \"Environment: is too large (maximum is 20 KB).\"

\n \n

If you have specified one or more environment variables, you cannot modify the stack's Chef version.

\n
" + "smithy.api#documentation": "

An array of EnvironmentVariable objects that specify environment variables to be\n associated with the app. After you deploy the app, these variables are defined on the\n associated app server instances.For more information, see Environment Variables.

\n

There is no specific limit on the number of environment variables. However, the size of the associated data structure - \n which includes the variables' names, values, and protected flag values - cannot exceed 20 KB. This limit should accommodate most \n if not all use cases. Exceeding it will cause an exception with the message, \"Environment: is too large (maximum is 20 KB).\"

\n \n

If you have specified one or more environment variables, you cannot modify the stack's Chef version.

\n
" } } }, @@ -8729,7 +8729,7 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The new name.

" + "smithy.api#documentation": "

The new name, which can be a maximum of 32 characters.

" } } }, @@ -8788,19 +8788,19 @@ "Hostname": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance host name.

" + "smithy.api#documentation": "

The instance host name. The following are character limits for instance host names.

\n
    \n
  • \n

    Linux-based instances: 63 characters

    \n
  • \n
  • \n

    Windows-based instances: 15 characters

    \n
  • \n
" } }, "Os": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The instance's operating system, which must be set to one of the following. You cannot update an instance that is using a custom AMI.

\n
    \n
  • \n

    A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux\n 2015.03.

    \n
  • \n
  • \n

    A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    \n
  • \n
  • \n

    \n CentOS Linux 7\n

    \n
  • \n
  • \n

    \n Red Hat Enterprise Linux 7\n

    \n
  • \n
  • \n

    A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, \n Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    \n
  • \n
\n

For more information about supported operating systems,\n see AWS OpsWorks Stacks Operating Systems.

\n

The default option is the current Amazon Linux version. If you set this parameter to\n Custom, you must use the AmiId parameter to\n specify the custom AMI that you want to use. For more information about supported operating\n systems, see Operating Systems. For more information about how to use custom AMIs with OpsWorks, see Using\n Custom AMIs.

\n \n

You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.

\n
" + "smithy.api#documentation": "

The instance's operating system, which must be set to one of the following. You cannot update an instance that is using \n a custom AMI.

\n
    \n
  • \n

    A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux\n 2015.03.

    \n
  • \n
  • \n

    A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    \n
  • \n
  • \n

    \n CentOS Linux 7\n

    \n
  • \n
  • \n

    \n Red Hat Enterprise Linux 7\n

    \n
  • \n
  • \n

    A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, \n Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    \n
  • \n
\n

Not all operating systems are supported with all versions of Chef. For more information about supported operating systems,\n see OpsWorks Stacks Operating Systems.

\n

The default option is the current Amazon Linux version. If you set this parameter to\n Custom, you must use the AmiId parameter to\n specify the custom AMI that you want to use. For more information about how to use custom AMIs with OpsWorks, see Using\n Custom AMIs.

\n \n

You can specify a different Linux operating system for the updated stack, but you cannot change from Linux to Windows or Windows to Linux.

\n
" } }, "AmiId": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ID of the AMI that was used to create the instance. The value of this parameter must be the same AMI ID that the instance is already using. \n You cannot apply a new AMI to an instance by running UpdateInstance. UpdateInstance does not work on instances that are using custom AMIs.\n

" + "smithy.api#documentation": "

The ID of the AMI that was used to create the instance. The value of this parameter must be the same AMI ID that the \n instance is already using. \n You cannot apply a new AMI to an instance by running UpdateInstance. UpdateInstance does not work on instances that are using \n custom AMIs.\n

" } }, "SshKeyName": { @@ -8830,7 +8830,7 @@ "AgentVersion": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The default AWS OpsWorks Stacks agent version. You have the following options:

\n
    \n
  • \n

    \n INHERIT - Use the stack's default agent version setting.

    \n
  • \n
  • \n

    \n version_number - Use the specified agent version.\n This value overrides the stack's default setting.\n To update the agent version, you must edit the instance configuration and specify a\n new version.\n AWS OpsWorks Stacks then automatically installs that version on the instance.

    \n
  • \n
\n

The default setting is INHERIT. To specify an agent version,\n you must use the complete version number, not the abbreviated number shown on the console.\n For a list of available agent version numbers, call DescribeAgentVersions.

\n

AgentVersion cannot be set to Chef 12.2.

" + "smithy.api#documentation": "

The default OpsWorks Stacks agent version. You have the following options:

\n
    \n
  • \n

    \n INHERIT - Use the stack's default agent version setting.

    \n
  • \n
  • \n

    \n version_number - Use the specified agent version.\n This value overrides the stack's default setting.\n To update the agent version, you must edit the instance configuration and specify a\n new version.\n OpsWorks Stacks installs that version on the instance.

    \n
  • \n
\n

The default setting is INHERIT. To specify an agent version,\n you must use the complete version number, not the abbreviated number shown on the console.\n For a list of available agent version numbers, call DescribeAgentVersions.

\n

AgentVersion cannot be set to Chef 12.2.

" } } }, @@ -8871,13 +8871,13 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The layer name, which is used by the console.

" + "smithy.api#documentation": "

The layer name, which is used by the console. Layer names can be a maximum of 32 characters.

" } }, "Shortname": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

For custom layers only, use this parameter to specify the layer's short name, which is used internally by AWS OpsWorks Stacks and by Chef. The short name is also used as the name for the directory where your app files are installed. It can have a maximum of 200 characters and must be in the following format: /\\A[a-z0-9\\-\\_\\.]+\\Z/.

\n

The built-in layers' short names are defined by AWS OpsWorks Stacks. For more information, see the Layer Reference\n

" + "smithy.api#documentation": "

For custom layers only, use this parameter to specify the layer's short name, which is used internally by \n OpsWorks Stacks and by Chef. The short name is also used as the name for the directory where your app files \n are installed. It can have a maximum of 32 characters and must be in the following format: /\\A[a-z0-9\\-\\_\\.]+\\Z/.

\n

Built-in layer short names are defined by OpsWorks Stacks. For more information, see the \n Layer reference in the OpsWorks User Guide.\n

" } }, "Attributes": { @@ -8889,13 +8889,13 @@ "CloudWatchLogsConfiguration": { "target": "com.amazonaws.opsworks#CloudWatchLogsConfiguration", "traits": { - "smithy.api#documentation": "

Specifies CloudWatch Logs configuration options for the layer. For more information, see CloudWatchLogsLogStream.

" + "smithy.api#documentation": "

Specifies CloudWatch Logs configuration options for the layer. For more information, \n see CloudWatchLogsLogStream.

" } }, "CustomInstanceProfileArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ARN of an IAM profile to be used for all of the layer's EC2 instances. For more\n information about IAM ARNs, see Using\n Identifiers.

" + "smithy.api#documentation": "

The ARN of an IAM profile to be used for all of the layer's EC2 instances. For more\n information about IAM ARNs, see Using\n Identifiers.

" } }, "CustomJson": { @@ -9080,7 +9080,7 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack's new name.

" + "smithy.api#documentation": "

The stack's new name. Stack names can be a maximum of 64 characters.

" } }, "Attributes": { @@ -9098,19 +9098,19 @@ "DefaultInstanceProfileArn": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances.\n For more information about IAM ARNs, see Using\n Identifiers.

" + "smithy.api#documentation": "

The ARN of an IAM profile that is the default profile for all of the stack's EC2 instances.\n For more information about IAM ARNs, see Using\n Identifiers.

" } }, "DefaultOs": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack's operating system, which must be set to one of the following:

\n
    \n
  • \n

    A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, \n Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    \n
  • \n
  • \n

    A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    \n
  • \n
  • \n

    \n CentOS Linux 7\n

    \n
  • \n
  • \n

    \n Red Hat Enterprise Linux 7\n

    \n
  • \n
  • \n

    A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, \n Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    \n
  • \n
  • \n

    A custom AMI: Custom. You specify the custom AMI you want to use when\n you create instances. For more information about how to use custom AMIs with OpsWorks, see Using\n Custom AMIs.

    \n
  • \n
\n

The default option is the stack's current operating system.\n For more information about supported operating systems,\n see AWS OpsWorks Stacks Operating Systems.

" + "smithy.api#documentation": "

The stack's operating system, which must be set to one of the following:

\n
    \n
  • \n

    A supported Linux operating system: An Amazon Linux version, such as Amazon Linux 2, Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, \n Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03.

    \n
  • \n
  • \n

    A supported Ubuntu operating system, such as Ubuntu 18.04 LTS, Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS.

    \n
  • \n
  • \n

    \n CentOS Linux 7\n

    \n
  • \n
  • \n

    \n Red Hat Enterprise Linux 7\n

    \n
  • \n
  • \n

    A supported Windows operating system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, \n Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web.

    \n
  • \n
  • \n

    A custom AMI: Custom. You specify the custom AMI you want to use when\n you create instances. For more information about how to use custom AMIs with OpsWorks, see Using\n Custom AMIs.

    \n
  • \n
\n

The default option is the stack's current operating system.\n Not all operating systems are supported with all versions of Chef. For more information about supported operating systems,\n see OpsWorks Stacks Operating Systems.

" } }, "HostnameTheme": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The stack's new host name theme, with spaces replaced by underscores.\n The theme is used to generate host names for the stack's instances.\n By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to the\n layer's short name. The other themes are:

\n
    \n
  • \n

    \n Baked_Goods\n

    \n
  • \n
  • \n

    \n Clouds\n

    \n
  • \n
  • \n

    \n Europe_Cities\n

    \n
  • \n
  • \n

    \n Fruits\n

    \n
  • \n
  • \n

    \n Greek_Deities_and_Titans\n

    \n
  • \n
  • \n

    \n Legendary_creatures_from_Japan\n

    \n
  • \n
  • \n

    \n Planets_and_Moons\n

    \n
  • \n
  • \n

    \n Roman_Deities\n

    \n
  • \n
  • \n

    \n Scottish_Islands\n

    \n
  • \n
  • \n

    \n US_Cities\n

    \n
  • \n
  • \n

    \n Wild_Cats\n

    \n
  • \n
\n

To obtain a generated host name, call GetHostNameSuggestion, which returns a\n host name based on the current theme.

" + "smithy.api#documentation": "

The stack's new host name theme, with spaces replaced by underscores.\n The theme is used to generate host names for the stack's instances.\n By default, HostnameTheme is set to Layer_Dependent, which creates host names by appending integers to \n the\n layer's short name. The other themes are:

\n
    \n
  • \n

    \n Baked_Goods\n

    \n
  • \n
  • \n

    \n Clouds\n

    \n
  • \n
  • \n

    \n Europe_Cities\n

    \n
  • \n
  • \n

    \n Fruits\n

    \n
  • \n
  • \n

    \n Greek_Deities_and_Titans\n

    \n
  • \n
  • \n

    \n Legendary_creatures_from_Japan\n

    \n
  • \n
  • \n

    \n Planets_and_Moons\n

    \n
  • \n
  • \n

    \n Roman_Deities\n

    \n
  • \n
  • \n

    \n Scottish_Islands\n

    \n
  • \n
  • \n

    \n US_Cities\n

    \n
  • \n
  • \n

    \n Wild_Cats\n

    \n
  • \n
\n

To obtain a generated host name, call GetHostNameSuggestion, which returns a\n host name based on the current theme.

" } }, "DefaultAvailabilityZone": { @@ -9128,13 +9128,13 @@ "CustomJson": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration JSON values or to pass data to recipes. The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information about custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes.

" + "smithy.api#documentation": "

A string that contains user-defined, custom JSON. It can be used to override the corresponding default stack configuration \n JSON values or to pass data to recipes. The string should be in the following format:

\n

\n \"{\\\"key1\\\": \\\"value1\\\", \\\"key2\\\": \\\"value2\\\",...}\"\n

\n

For more information about custom JSON, see Use Custom JSON to\n Modify the Stack Configuration Attributes.

" } }, "ConfigurationManager": { "target": "com.amazonaws.opsworks#StackConfigurationManager", "traits": { - "smithy.api#documentation": "

The configuration manager. When you update a stack, we recommend that you use the configuration manager to specify the Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12.

" + "smithy.api#documentation": "

The configuration manager. When you update a stack, we recommend that you use the configuration manager to specify the \n Chef version: 12, 11.10, or 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for Linux stacks is currently 12.

" } }, "ChefConfiguration": { @@ -9152,13 +9152,13 @@ "CustomCookbooksSource": { "target": "com.amazonaws.opsworks#Source", "traits": { - "smithy.api#documentation": "

Contains the information required to retrieve an app or cookbook from a repository. For more information, \n see Adding Apps or Cookbooks and Recipes.

" + "smithy.api#documentation": "

Contains the information required to retrieve an app or cookbook from a repository. For more information, \n see Adding Apps or \n Cookbooks and Recipes.

" } }, "DefaultSshKeyName": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

A default Amazon EC2 key-pair name. The default value is\n none. If you specify a key-pair name,\n AWS OpsWorks Stacks installs the public key on the instance and you can use the private key with an SSH\n client to log in to the instance. For more information, see Using SSH to\n Communicate with an Instance and Managing SSH\n Access. You can override this setting by specifying a different key pair, or no key\n pair, when you \n create an instance.

" + "smithy.api#documentation": "

A default Amazon EC2 key-pair name. The default value is\n none. If you specify a key-pair name,\n OpsWorks Stacks installs the public key on the instance and you can use the private key with an SSH\n client to log in to the instance. For more information, see Using SSH to\n Communicate with an Instance and Managing SSH\n Access. You can override this setting by specifying a different key pair, or no key\n pair, when you \n create an instance.

" } }, "DefaultRootDeviceType": { @@ -9170,13 +9170,13 @@ "UseOpsworksSecurityGroups": { "target": "com.amazonaws.opsworks#Boolean", "traits": { - "smithy.api#documentation": "

Whether to associate the AWS OpsWorks Stacks built-in security groups with the stack's layers.

\n

AWS OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are\n associated with layers by default. UseOpsworksSecurityGroups allows you to\n provide your own custom security groups\n instead of using the built-in groups. UseOpsworksSecurityGroups has\n the following settings:

\n
    \n
  • \n

    True - AWS OpsWorks Stacks automatically associates the appropriate built-in security group with each layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot delete the built-in security group.

    \n
  • \n
  • \n

    False - AWS OpsWorks Stacks does not associate built-in security groups with layers. You must create appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still manually associate a built-in security group with a layer on. Custom security groups are required only for those layers that need custom settings.

    \n
  • \n
\n

For more information, see Create a New\n Stack.

" + "smithy.api#documentation": "

Whether to associate the OpsWorks Stacks built-in security groups with the stack's layers.

\n

OpsWorks Stacks provides a standard set of built-in security groups, one for each layer, which are\n associated with layers by default. UseOpsworksSecurityGroups allows you to\n provide your own custom security groups\n instead of using the built-in groups. UseOpsworksSecurityGroups has\n the following settings:

\n
    \n
  • \n

    True - OpsWorks Stacks automatically associates the appropriate built-in security group with each \n layer (default setting). You can associate additional security groups with a layer after you create it, but you cannot \n delete the built-in security group.

    \n
  • \n
  • \n

    False - OpsWorks Stacks does not associate built-in security groups with layers. You must create \n appropriate EC2 security groups and associate a security group with each layer that you create. However, you can still \n manually associate a built-in security group with a layer on. Custom security groups are required only for those layers \n that need custom settings.

    \n
  • \n
\n

For more information, see Create a New\n Stack.

" } }, "AgentVersion": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The default AWS OpsWorks Stacks agent version. You have the following options:

\n
    \n
  • \n

    Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks\n automatically installs new agent versions on the stack's instances as soon as\n they are available.

    \n
  • \n
  • \n

    Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must edit the stack configuration and specify a new version. AWS OpsWorks Stacks then automatically installs that version on the stack's instances.

    \n
  • \n
\n

The default setting is LATEST. To specify an agent version,\n you must use the complete version number, not the abbreviated number shown on the console.\n For a list of available agent version numbers, call DescribeAgentVersions. \n AgentVersion cannot be set to Chef 12.2.

\n \n

You can also specify an agent version when you create or update an instance, which overrides the stack's default setting.

\n
" + "smithy.api#documentation": "

The default OpsWorks Stacks agent version. You have the following options:

\n
    \n
  • \n

    Auto-update - Set this parameter to LATEST. OpsWorks Stacks\n automatically installs new agent versions on the stack's instances as soon as\n they are available.

    \n
  • \n
  • \n

    Fixed version - Set this parameter to your preferred agent version. To update the agent version, you must \n edit the stack configuration and specify a new version. OpsWorks Stacks installs that version \n on the stack's instances.

    \n
  • \n
\n

The default setting is LATEST. To specify an agent version,\n you must use the complete version number, not the abbreviated number shown on the console.\n For a list of available agent version numbers, call DescribeAgentVersions. \n AgentVersion cannot be set to Chef 12.2.

\n \n

You can also specify an agent version when you create or update an instance, which overrides the stack's default \n setting.

\n
" } } }, @@ -9217,7 +9217,7 @@ "SshUsername": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If\n the specified name includes other punctuation marks, AWS OpsWorks Stacks removes them. For example,\n my.name will be changed to myname. If you do not specify an SSH\n user name, AWS OpsWorks Stacks generates one from the IAM user name.

" + "smithy.api#documentation": "

The user's SSH user name. The allowable characters are [a-z], [A-Z], [0-9], '-', and '_'. If\n the specified name includes other punctuation marks, OpsWorks Stacks removes them. For example,\n my.name will be changed to myname. If you do not specify an SSH\n user name, OpsWorks Stacks generates one from the IAM user name.

" } }, "SshPublicKey": { @@ -9254,7 +9254,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates an Amazon EBS volume's name or mount point. For more information, see Resource Management.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" + "smithy.api#documentation": "

Updates an Amazon EBS volume's name or mount point. For more information, see \n Resource Management.

\n

\n Required Permissions: To use this action, an IAM user must have a Manage permissions\n level for the stack, or an attached policy that explicitly grants permissions. For more\n information on user permissions, see Managing User\n Permissions.

" } }, "com.amazonaws.opsworks#UpdateVolumeRequest": { @@ -9270,7 +9270,7 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The new name.

" + "smithy.api#documentation": "

The new name. Volume names can be a maximum of 128 characters.

" } }, "MountPoint": { @@ -9387,7 +9387,7 @@ "Name": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The volume name.

" + "smithy.api#documentation": "

The volume name. Volume names are a maximum of 128 characters.

" } }, "RaidArrayId": { @@ -9429,7 +9429,7 @@ "Region": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The AWS region. For more information about AWS regions, see Regions and Endpoints.

" + "smithy.api#documentation": "

The Amazon Web Services Region. For more information about Amazon Web Services Regions, see \n Regions and Endpoints.

" } }, "AvailabilityZone": { @@ -9441,7 +9441,7 @@ "VolumeType": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The volume type. For more information, see \n Amazon EBS Volume Types.

\n
    \n
  • \n

    \n standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

    \n
  • \n
  • \n

    \n io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

    \n
  • \n
  • \n

    \n gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

    \n
  • \n
  • \n

    \n st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

    \n
  • \n
  • \n

    \n sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

    \n
  • \n
" + "smithy.api#documentation": "

The volume type. For more information, see \n Amazon EBS Volume Types.

\n
    \n
  • \n

    \n standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

    \n
  • \n
  • \n

    \n io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

    \n
  • \n
  • \n

    \n gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size \n of 16384 GiB.

    \n
  • \n
  • \n

    \n st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a \n minimum size of 125 GiB and a maximum size of 16384 GiB.

    \n
  • \n
  • \n

    \n sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 125 GiB and a maximum size of 16384 GiB.

    \n
  • \n
" } }, "Iops": { @@ -9494,7 +9494,7 @@ "VolumeType": { "target": "com.amazonaws.opsworks#String", "traits": { - "smithy.api#documentation": "

The volume type. For more information, see \n Amazon EBS Volume Types.

\n
    \n
  • \n

    \n standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

    \n
  • \n
  • \n

    \n io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

    \n
  • \n
  • \n

    \n gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size of 16384 GiB.

    \n
  • \n
  • \n

    \n st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

    \n
  • \n
  • \n

    \n sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 500 GiB and a maximum size of 16384 GiB.

    \n
  • \n
" + "smithy.api#documentation": "

The volume type. For more information, see \n Amazon EBS Volume Types.

\n
    \n
  • \n

    \n standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a maximum size of 1024 GiB.

    \n
  • \n
  • \n

    \n io1 - Provisioned IOPS (SSD). PIOPS volumes must have a minimum size of 4 GiB and a maximum size of 16384 GiB.

    \n
  • \n
  • \n

    \n gp2 - General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a maximum size \n of 16384 GiB.

    \n
  • \n
  • \n

    \n st1 - Throughput Optimized hard disk drive (HDD). Throughput optimized HDD volumes must have a \n minimum size of 125 GiB and a maximum size of 16384 GiB.

    \n
  • \n
  • \n

    \n sc1 - Cold HDD. Cold HDD volumes must have a minimum size of 125 GiB and a maximum size of 16384 GiB.

    \n
  • \n
" } }, "Iops": { @@ -9596,7 +9596,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes a time-based instance's auto scaling schedule. The schedule consists of a set of key-value pairs.

\n
    \n
  • \n

    The key is the time period (a UTC hour) and must be an integer from 0 - 23.

    \n
  • \n
  • \n

    The value indicates whether the instance should be online or offline for the specified period, and must be set to \"on\" or \"off\"

    \n
  • \n
\n

The default setting for all time periods is off, so you use the following parameters primarily to specify the online periods. You don't have to explicitly specify offline periods unless you want to change an online period to an offline period.

\n

The following example specifies that the instance should be online for four hours, from UTC 1200 - 1600. It will be off for the remainder of the day.

\n

\n { \"12\":\"on\", \"13\":\"on\", \"14\":\"on\", \"15\":\"on\" } \n

" + "smithy.api#documentation": "

Describes a time-based instance's auto scaling schedule. The schedule consists of a set of key-value pairs.

\n
    \n
  • \n

    The key is the time period (a UTC hour) and must be an integer from 0 - 23.

    \n
  • \n
  • \n

    The value indicates whether the instance should be online or offline for the specified period, and must be \n set to \"on\" or \"off\"

    \n
  • \n
\n

The default setting for all time periods is off, so you use the following parameters primarily to specify the online periods. You don't have to explicitly specify offline periods unless you want to change an online period to an offline period.

\n

The following example specifies that the instance should be online for four hours, \n from UTC 1200 - 1600. It will be off for the remainder of the day.

\n

\n { \"12\":\"on\", \"13\":\"on\", \"14\":\"on\", \"15\":\"on\" } \n

" } } } diff --git a/models/osis.json b/models/osis.json index d1db83f403..865663460e 100644 --- a/models/osis.json +++ b/models/osis.json @@ -772,6 +772,12 @@ } } }, + "com.amazonaws.osis#BlueprintFormat": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(YAML|JSON)$" + } + }, "com.amazonaws.osis#Boolean": { "type": "boolean" }, @@ -787,7 +793,7 @@ } }, "traits": { - "smithy.api#documentation": "

Options that specify the configuration of a persistent buffer.\n To configure how OpenSearch Ingestion encrypts this data, set the EncryptionAtRestOptions.

" + "smithy.api#documentation": "

Options that specify the configuration of a persistent buffer.\n To configure how OpenSearch Ingestion encrypts this data, set the EncryptionAtRestOptions. For more information, see Persistent buffering.

" } }, "com.amazonaws.osis#ChangeProgressStage": { @@ -925,13 +931,19 @@ } } }, + "com.amazonaws.osis#CidrBlock": { + "type": "string", + "traits": { + "smithy.api#pattern": "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/(3[0-2]|[12]?[0-9])$" + } + }, "com.amazonaws.osis#CloudWatchLogDestination": { "type": "structure", "members": { "LogGroup": { "target": "com.amazonaws.osis#LogGroup", "traits": { - "smithy.api#documentation": "

The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing\n log group or create a new one. For example,\n /aws/OpenSearchService/IngestionService/my-pipeline.

", + "smithy.api#documentation": "

The name of the CloudWatch Logs group to send pipeline logs to. You can specify an existing\n log group or create a new one. For example,\n /aws/vendedlogs/OpenSearchService/pipelines.

", "smithy.api#required": {} } } @@ -965,6 +977,9 @@ { "target": "com.amazonaws.osis#AccessDeniedException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -1085,6 +1100,9 @@ { "target": "com.amazonaws.osis#ConflictException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -1127,19 +1145,32 @@ "smithy.api#output": {} } }, + "com.amazonaws.osis#DisabledOperationException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.osis#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

Exception is thrown when an operation has been disabled.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, "com.amazonaws.osis#EncryptionAtRestOptions": { "type": "structure", "members": { "KmsKeyArn": { "target": "com.amazonaws.osis#KmsKeyArn", "traits": { - "smithy.api#documentation": "

The ARN of the KMS key used to encrypt data-at-rest in OpenSearch Ingestion.\n By default, data is encrypted using an AWS owned key.

", + "smithy.api#documentation": "

The ARN of the KMS key used to encrypt buffer data.\n By default, data is encrypted using an Amazon Web Services owned key.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Options to control how OpenSearch encrypts all data-at-rest.

" + "smithy.api#documentation": "

Options to control how OpenSearch encrypts buffer data.

" } }, "com.amazonaws.osis#ErrorMessage": { @@ -1157,6 +1188,9 @@ { "target": "com.amazonaws.osis#AccessDeniedException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -1188,6 +1222,9 @@ { "target": "com.amazonaws.osis#AccessDeniedException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -1217,6 +1254,13 @@ "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "Format": { + "target": "com.amazonaws.osis#BlueprintFormat", + "traits": { + "smithy.api#documentation": "

The format format of the blueprint to retrieve.

", + "smithy.api#httpQuery": "format" + } } }, "traits": { @@ -1231,6 +1275,12 @@ "traits": { "smithy.api#documentation": "

The requested blueprint in YAML format.

" } + }, + "Format": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

The format of the blueprint.

" + } } }, "traits": { @@ -1249,6 +1299,9 @@ { "target": "com.amazonaws.osis#AccessDeniedException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -1304,7 +1357,7 @@ "PipelineName": { "target": "com.amazonaws.osis#PipelineName", "traits": { - "smithy.api#documentation": "

The name of the pipeline to get information about.

", + "smithy.api#documentation": "

The name of the pipeline.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1400,6 +1453,9 @@ { "target": "com.amazonaws.osis#AccessDeniedException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -1452,6 +1508,9 @@ { "target": "com.amazonaws.osis#AccessDeniedException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -1530,6 +1589,9 @@ { "target": "com.amazonaws.osis#AccessDeniedException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -1711,10 +1773,22 @@ "EncryptionAtRestOptions": { "target": "com.amazonaws.osis#EncryptionAtRestOptions" }, + "VpcEndpointService": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

The VPC endpoint service name for the pipeline.

" + } + }, "ServiceVpcEndpoints": { "target": "com.amazonaws.osis#ServiceVpcEndpointsList", "traits": { - "smithy.api#documentation": "

A list of VPC endpoints that OpenSearch Ingestion has created to other AWS services.

" + "smithy.api#documentation": "

A list of VPC endpoints that OpenSearch Ingestion has created to other Amazon Web Services services.

" + } + }, + "Destinations": { + "target": "com.amazonaws.osis#PipelineDestinationList", + "traits": { + "smithy.api#documentation": "

Destinations to which the pipeline writes data.

" } }, "Tags": { @@ -1752,6 +1826,30 @@ "traits": { "smithy.api#documentation": "

The YAML configuration of the blueprint.

" } + }, + "DisplayName": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

The display name of the blueprint.

" + } + }, + "DisplayDescription": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

A description of the blueprint.

" + } + }, + "Service": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

The name of the service that the blueprint is associated with.

" + } + }, + "UseCase": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

The use case that the blueprint relates to.

" + } } }, "traits": { @@ -1766,6 +1864,30 @@ "traits": { "smithy.api#documentation": "

The name of the blueprint.

" } + }, + "DisplayName": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

The display name of the blueprint.

" + } + }, + "DisplayDescription": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

A description of the blueprint.

" + } + }, + "Service": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

The name of the service that the blueprint is associated with.

" + } + }, + "UseCase": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

The use case that the blueprint relates to.

" + } } }, "traits": { @@ -1787,6 +1909,32 @@ } } }, + "com.amazonaws.osis#PipelineDestination": { + "type": "structure", + "members": { + "ServiceName": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

The name of the service receiving data from the pipeline.

" + } + }, + "Endpoint": { + "target": "com.amazonaws.osis#String", + "traits": { + "smithy.api#documentation": "

The endpoint receiving data from the pipeline.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An object representing the destination of a pipeline.

" + } + }, + "com.amazonaws.osis#PipelineDestinationList": { + "type": "list", + "member": { + "target": "com.amazonaws.osis#PipelineDestination" + } + }, "com.amazonaws.osis#PipelineName": { "type": "string", "traits": { @@ -1924,6 +2072,12 @@ "smithy.api#documentation": "

The date and time when the pipeline was last updated.

" } }, + "Destinations": { + "target": "com.amazonaws.osis#PipelineDestinationList", + "traits": { + "smithy.api#documentation": "

A list of destinations to which the pipeline writes data.

" + } + }, "Tags": { "target": "com.amazonaws.osis#TagList", "traits": { @@ -2009,7 +2163,7 @@ "VpcEndpointId": { "target": "com.amazonaws.osis#String", "traits": { - "smithy.api#documentation": "

The ID of the VPC endpoint that was created.

" + "smithy.api#documentation": "

The unique identifier of the VPC endpoint that was created.

" } } }, @@ -2038,6 +2192,9 @@ { "target": "com.amazonaws.osis#ConflictException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -2099,6 +2256,9 @@ { "target": "com.amazonaws.osis#ConflictException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -2226,6 +2386,9 @@ { "target": "com.amazonaws.osis#AccessDeniedException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -2303,6 +2466,9 @@ { "target": "com.amazonaws.osis#AccessDeniedException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -2367,6 +2533,9 @@ { "target": "com.amazonaws.osis#ConflictException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -2464,6 +2633,9 @@ { "target": "com.amazonaws.osis#AccessDeniedException" }, + { + "target": "com.amazonaws.osis#DisabledOperationException" + }, { "target": "com.amazonaws.osis#InternalException" }, @@ -2548,6 +2720,27 @@ "target": "com.amazonaws.osis#ValidationMessage" } }, + "com.amazonaws.osis#VpcAttachmentOptions": { + "type": "structure", + "members": { + "AttachToVpc": { + "target": "com.amazonaws.osis#Boolean", + "traits": { + "smithy.api#documentation": "

Whether a VPC is attached to the pipeline.

", + "smithy.api#required": {} + } + }, + "CidrBlock": { + "target": "com.amazonaws.osis#CidrBlock", + "traits": { + "smithy.api#documentation": "

The CIDR block to be reserved for OpenSearch Ingestion to create elastic network interfaces (ENIs).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Options for attaching a VPC to pipeline.

" + } + }, "com.amazonaws.osis#VpcEndpoint": { "type": "structure", "members": { @@ -2574,6 +2767,23 @@ "smithy.api#documentation": "

An OpenSearch Ingestion-managed VPC endpoint that will access one or more\n pipelines.

" } }, + "com.amazonaws.osis#VpcEndpointManagement": { + "type": "enum", + "members": { + "CUSTOMER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOMER" + } + }, + "SERVICE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SERVICE" + } + } + } + }, "com.amazonaws.osis#VpcEndpointServiceName": { "type": "enum", "members": { @@ -2606,6 +2816,18 @@ "traits": { "smithy.api#documentation": "

A list of security groups associated with the VPC endpoint.

" } + }, + "VpcAttachmentOptions": { + "target": "com.amazonaws.osis#VpcAttachmentOptions", + "traits": { + "smithy.api#documentation": "

Options for attaching a VPC to a pipeline.

" + } + }, + "VpcEndpointManagement": { + "target": "com.amazonaws.osis#VpcEndpointManagement", + "traits": { + "smithy.api#documentation": "

Defines whether you or Amazon OpenSearch Ingestion service create and manage the VPC endpoint configured for the pipeline.

" + } } }, "traits": { diff --git a/models/pca-connector-scep.json b/models/pca-connector-scep.json new file mode 100644 index 0000000000..5e404ec6df --- /dev/null +++ b/models/pca-connector-scep.json @@ -0,0 +1,2334 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.pcaconnectorscep#AccessDeniedException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

You can receive this error if you attempt to perform an operation and you don't have\n the required permissions. This can be caused by insufficient permissions in policies\n attached to your Amazon Web Services Identity and Access Management (IAM) principal. It can also happen\n because of restrictions in place from an Amazon Web Services Organizations service control policy (SCP)\n that affects your Amazon Web Services account.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.pcaconnectorscep#AzureApplicationId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 15, + "max": 100 + }, + "smithy.api#pattern": "^[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}-[a-zA-Z0-9]{2,15}$" + } + }, + "com.amazonaws.pcaconnectorscep#AzureDomain": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9._-]+$" + } + }, + "com.amazonaws.pcaconnectorscep#BadRequestException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request is malformed or contains an error such as an invalid parameter value or a missing required parameter.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.pcaconnectorscep#CertificateAuthorityArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 5, + "max": 200 + }, + "smithy.api#pattern": "^arn:aws(-[a-z]+)*:acm-pca:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:certificate-authority\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$" + } + }, + "com.amazonaws.pcaconnectorscep#Challenge": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.pcaconnectorscep#ChallengeArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the challenge.

" + } + }, + "ConnectorArn": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector.

" + } + }, + "CreatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that the challenge was created.

" + } + }, + "UpdatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that the challenge was updated.

" + } + }, + "Password": { + "target": "com.amazonaws.pcaconnectorscep#SensitiveString", + "traits": { + "smithy.api#documentation": "

The SCEP challenge password, in UUID format.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

For Connector for SCEP for general-purpose. An object containing information about the specified connector's SCEP challenge passwords.

" + } + }, + "com.amazonaws.pcaconnectorscep#ChallengeArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 5, + "max": 200 + }, + "smithy.api#pattern": "^arn:aws(-[a-z]+)*:pca-connector-scep:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}\\/challenge\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$" + } + }, + "com.amazonaws.pcaconnectorscep#ChallengeMetadata": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.pcaconnectorscep#ChallengeArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the challenge.

" + } + }, + "ConnectorArn": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector.

" + } + }, + "CreatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that the connector was created.

" + } + }, + "UpdatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that the connector was updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about the connector's challenge.

" + } + }, + "com.amazonaws.pcaconnectorscep#ChallengeMetadataList": { + "type": "list", + "member": { + "target": "com.amazonaws.pcaconnectorscep#ChallengeMetadataSummary" + } + }, + "com.amazonaws.pcaconnectorscep#ChallengeMetadataSummary": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.pcaconnectorscep#ChallengeArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the challenge.

" + } + }, + "ConnectorArn": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector.

" + } + }, + "CreatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that the challenge was created.

" + } + }, + "UpdatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that the challenge was updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about the specified challenge, returned by the GetChallengeMetadata action.

" + } + }, + "com.amazonaws.pcaconnectorscep#ChallengeResource": { + "type": "resource", + "identifiers": { + "ChallengeArn": { + "target": "com.amazonaws.pcaconnectorscep#ChallengeArn" + } + }, + "create": { + "target": "com.amazonaws.pcaconnectorscep#CreateChallenge" + }, + "read": { + "target": "com.amazonaws.pcaconnectorscep#GetChallengeMetadata" + }, + "delete": { + "target": "com.amazonaws.pcaconnectorscep#DeleteChallenge" + }, + "list": { + "target": "com.amazonaws.pcaconnectorscep#ListChallengeMetadata" + }, + "operations": [ + { + "target": "com.amazonaws.pcaconnectorscep#GetChallengePassword" + } + ], + "traits": { + "aws.cloudformation#cfnResource": { + "name": "Challenge" + } + } + }, + "com.amazonaws.pcaconnectorscep#ClientToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[!-~]+$" + } + }, + "com.amazonaws.pcaconnectorscep#ConflictException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "ResourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services resource.

", + "smithy.api#required": {} + } + }, + "ResourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The resource type, which can be either Connector or Challenge.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

This request can't be completed for one of the following reasons because the requested\n resource was being concurrently modified by another request.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.pcaconnectorscep#Connector": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector.

" + } + }, + "CertificateAuthorityArn": { + "target": "com.amazonaws.pcaconnectorscep#CertificateAuthorityArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the certificate authority associated with the connector.

" + } + }, + "Type": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorType", + "traits": { + "smithy.api#documentation": "

The connector type.

" + } + }, + "MobileDeviceManagement": { + "target": "com.amazonaws.pcaconnectorscep#MobileDeviceManagement", + "traits": { + "smithy.api#documentation": "

Contains settings relevant to the mobile device management system that you chose for the connector. If you didn't configure MobileDeviceManagement, then the connector is for general-purpose use and this object is empty.

" + } + }, + "OpenIdConfiguration": { + "target": "com.amazonaws.pcaconnectorscep#OpenIdConfiguration", + "traits": { + "smithy.api#documentation": "

Contains OpenID Connect (OIDC) parameters for use with Connector for SCEP for Microsoft Intune. For more information about using Connector for SCEP for Microsoft Intune, see Using Connector for SCEP for Microsoft Intune.

" + } + }, + "Status": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorStatus", + "traits": { + "smithy.api#documentation": "

The connector's status.

" + } + }, + "StatusReason": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorStatusReason", + "traits": { + "smithy.api#documentation": "

Information about why connector creation failed, if status is FAILED.

" + } + }, + "Endpoint": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The connector's HTTPS public SCEP URL.

" + } + }, + "CreatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that the connector was created.

" + } + }, + "UpdatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that the connector was updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Connector for SCEP is a service that links Amazon Web Services Private Certificate Authority to your SCEP-enabled devices. The connector brokers the exchange of certificates from Amazon Web Services Private CA to your SCEP-enabled devices and mobile device management systems. The connector is a complex type that contains the connector's configuration settings.

" + } + }, + "com.amazonaws.pcaconnectorscep#ConnectorArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 5, + "max": 200 + }, + "smithy.api#pattern": "^arn:aws(-[a-z]+)*:pca-connector-scep:[a-z]+(-[a-z]+)+-[1-9]\\d*:\\d{12}:connector\\/[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$" + } + }, + "com.amazonaws.pcaconnectorscep#ConnectorList": { + "type": "list", + "member": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorSummary" + } + }, + "com.amazonaws.pcaconnectorscep#ConnectorResource": { + "type": "resource", + "identifiers": { + "ConnectorArn": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorArn" + } + }, + "create": { + "target": "com.amazonaws.pcaconnectorscep#CreateConnector" + }, + "read": { + "target": "com.amazonaws.pcaconnectorscep#GetConnector" + }, + "delete": { + "target": "com.amazonaws.pcaconnectorscep#DeleteConnector" + }, + "list": { + "target": "com.amazonaws.pcaconnectorscep#ListConnectors" + }, + "traits": { + "aws.cloudformation#cfnResource": { + "name": "Connector" + } + } + }, + "com.amazonaws.pcaconnectorscep#ConnectorStatus": { + "type": "enum", + "members": { + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATING" + } + }, + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETING" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, + "com.amazonaws.pcaconnectorscep#ConnectorStatusReason": { + "type": "enum", + "members": { + "INTERNAL_FAILURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNAL_FAILURE" + } + }, + "PRIVATECA_ACCESS_DENIED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRIVATECA_ACCESS_DENIED" + } + }, + "PRIVATECA_INVALID_STATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRIVATECA_INVALID_STATE" + } + }, + "PRIVATECA_RESOURCE_NOT_FOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRIVATECA_RESOURCE_NOT_FOUND" + } + } + } + }, + "com.amazonaws.pcaconnectorscep#ConnectorSummary": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector.

" + } + }, + "CertificateAuthorityArn": { + "target": "com.amazonaws.pcaconnectorscep#CertificateAuthorityArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector's associated certificate authority.

" + } + }, + "Type": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorType", + "traits": { + "smithy.api#documentation": "

The connector type.

" + } + }, + "MobileDeviceManagement": { + "target": "com.amazonaws.pcaconnectorscep#MobileDeviceManagement", + "traits": { + "smithy.api#documentation": "

Contains settings relevant to the mobile device management system that you chose for the connector. If you didn't configure MobileDeviceManagement, then the connector is for general-purpose use and this object is empty.

" + } + }, + "OpenIdConfiguration": { + "target": "com.amazonaws.pcaconnectorscep#OpenIdConfiguration", + "traits": { + "smithy.api#documentation": "

Contains OpenID Connect (OIDC) parameters for use with Microsoft Intune.

" + } + }, + "Status": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorStatus", + "traits": { + "smithy.api#documentation": "

The connector's status. Status can be creating, active, deleting, or failed.

" + } + }, + "StatusReason": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorStatusReason", + "traits": { + "smithy.api#documentation": "

Information about why connector creation failed, if status is FAILED.

" + } + }, + "Endpoint": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The connector's HTTPS public SCEP URL.

" + } + }, + "CreatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that the challenge was created.

" + } + }, + "UpdatedAt": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time that the challenge was updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Lists the Amazon Web Services Private CA SCEP connectors belonging to your Amazon Web Services account.

" + } + }, + "com.amazonaws.pcaconnectorscep#ConnectorType": { + "type": "enum", + "members": { + "GENERAL_PURPOSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GENERAL_PURPOSE" + } + }, + "INTUNE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTUNE" + } + } + } + }, + "com.amazonaws.pcaconnectorscep#CreateChallenge": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#CreateChallengeRequest" + }, + "output": { + "target": "com.amazonaws.pcaconnectorscep#CreateChallengeResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#BadRequestException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ConflictException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

For general-purpose connectors. Creates a challenge password for the specified connector. The SCEP protocol uses a challenge password to authenticate a request before issuing a certificate from a certificate authority (CA). Your SCEP clients include the challenge password as part of their certificate request to Connector for SCEP. To retrieve the connector Amazon Resource Names (ARNs) for the connectors in your account, call ListConnectors.

\n

To create additional challenge passwords for the connector, call CreateChallenge again. We recommend frequently rotating your challenge passwords.

", + "smithy.api#http": { + "code": 202, + "method": "POST", + "uri": "/challenges" + } + } + }, + "com.amazonaws.pcaconnectorscep#CreateChallengeRequest": { + "type": "structure", + "members": { + "ConnectorArn": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector that you want to create a challenge for.

", + "smithy.api#required": {} + } + }, + "ClientToken": { + "target": "com.amazonaws.pcaconnectorscep#ClientToken", + "traits": { + "smithy.api#documentation": "

Custom string that can be used to distinguish between calls to the CreateChallenge action. Client tokens for CreateChallenge time out after five minutes. Therefore, if you call CreateChallenge multiple times with the same client token within five minutes, Connector for SCEP recognizes that you are requesting only one challenge and will only respond with one. If you change the client token for each call, Connector for SCEP recognizes that you are requesting multiple challenge passwords.

", + "smithy.api#idempotencyToken": {} + } + }, + "Tags": { + "target": "com.amazonaws.pcaconnectorscep#Tags", + "traits": { + "aws.cloudformation#cfnMutability": "write", + "smithy.api#documentation": "

The key-value pairs to associate with the resource.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#CreateChallengeResponse": { + "type": "structure", + "members": { + "Challenge": { + "target": "com.amazonaws.pcaconnectorscep#Challenge", + "traits": { + "smithy.api#documentation": "

Returns the challenge details for the specified connector.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.pcaconnectorscep#CreateConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#CreateConnectorRequest" + }, + "output": { + "target": "com.amazonaws.pcaconnectorscep#CreateConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ConflictException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a SCEP connector. A SCEP connector links Amazon Web Services Private Certificate Authority to your SCEP-compatible devices and mobile device management (MDM) systems. Before you create a connector, you must complete a set of prerequisites, including creation of a private certificate authority (CA) to use with this connector. For more information, see Connector for SCEP prerequisites.

", + "smithy.api#http": { + "code": 202, + "method": "POST", + "uri": "/connectors" + } + } + }, + "com.amazonaws.pcaconnectorscep#CreateConnectorRequest": { + "type": "structure", + "members": { + "CertificateAuthorityArn": { + "target": "com.amazonaws.pcaconnectorscep#CertificateAuthorityArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services Private Certificate Authority certificate authority to use with this connector. Due to security vulnerabilities present in the SCEP protocol, we recommend using a private CA that's dedicated for use with the connector.

\n

To retrieve the private CAs associated with your account, you can call ListCertificateAuthorities using the Amazon Web Services Private CA API.

", + "smithy.api#required": {} + } + }, + "MobileDeviceManagement": { + "target": "com.amazonaws.pcaconnectorscep#MobileDeviceManagement", + "traits": { + "smithy.api#documentation": "

If you don't supply a value, by default Connector for SCEP creates a connector for general-purpose use. A general-purpose connector is designed to work with clients or endpoints that support the SCEP protocol, except Connector for SCEP for Microsoft Intune. With connectors for general-purpose use, you manage SCEP challenge passwords using Connector for SCEP. For information about considerations and limitations with using Connector for SCEP, see Considerations and Limitations.

\n

If you provide an IntuneConfiguration, Connector for SCEP creates a connector for use with Microsoft Intune, and you manage the challenge passwords using Microsoft Intune. For more information, see Using Connector for SCEP for Microsoft Intune.

" + } + }, + "ClientToken": { + "target": "com.amazonaws.pcaconnectorscep#ClientToken", + "traits": { + "smithy.api#documentation": "

Custom string that can be used to distinguish between calls to the CreateChallenge action. Client tokens for CreateChallenge time out after five minutes. Therefore, if you call CreateChallenge multiple times with the same client token within five minutes, Connector for SCEP recognizes that you are requesting only one challenge and will only respond with one. If you change the client token for each call, Connector for SCEP recognizes that you are requesting multiple challenge passwords.

", + "smithy.api#idempotencyToken": {} + } + }, + "Tags": { + "target": "com.amazonaws.pcaconnectorscep#Tags", + "traits": { + "aws.cloudformation#cfnMutability": "write", + "smithy.api#documentation": "

The key-value pairs to associate with the resource.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#CreateConnectorResponse": { + "type": "structure", + "members": { + "ConnectorArn": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorArn", + "traits": { + "smithy.api#documentation": "

Returns the Amazon Resource Name (ARN) of the connector.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.pcaconnectorscep#DeleteChallenge": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#DeleteChallengeRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ConflictException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the specified Challenge.

", + "smithy.api#http": { + "code": 202, + "method": "DELETE", + "uri": "/challenges/{ChallengeArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.pcaconnectorscep#DeleteChallengeRequest": { + "type": "structure", + "members": { + "ChallengeArn": { + "target": "com.amazonaws.pcaconnectorscep#ChallengeArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the challenge password to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#DeleteConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#DeleteConnectorRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ConflictException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the specified Connector. This operation also deletes any challenges associated with the connector.

", + "smithy.api#http": { + "code": 202, + "method": "DELETE", + "uri": "/connectors/{ConnectorArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.pcaconnectorscep#DeleteConnectorRequest": { + "type": "structure", + "members": { + "ConnectorArn": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#GetChallengeMetadata": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#GetChallengeMetadataRequest" + }, + "output": { + "target": "com.amazonaws.pcaconnectorscep#GetChallengeMetadataResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the metadata for the specified Challenge.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/challengeMetadata/{ChallengeArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.pcaconnectorscep#GetChallengeMetadataRequest": { + "type": "structure", + "members": { + "ChallengeArn": { + "target": "com.amazonaws.pcaconnectorscep#ChallengeArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the challenge.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#GetChallengeMetadataResponse": { + "type": "structure", + "members": { + "ChallengeMetadata": { + "target": "com.amazonaws.pcaconnectorscep#ChallengeMetadata", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The metadata for the challenge.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.pcaconnectorscep#GetChallengePassword": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#GetChallengePasswordRequest" + }, + "output": { + "target": "com.amazonaws.pcaconnectorscep#GetChallengePasswordResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the challenge password for the specified Challenge.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/challengePasswords/{ChallengeArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.pcaconnectorscep#GetChallengePasswordRequest": { + "type": "structure", + "members": { + "ChallengeArn": { + "target": "com.amazonaws.pcaconnectorscep#ChallengeArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the challenge.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#GetChallengePasswordResponse": { + "type": "structure", + "members": { + "Password": { + "target": "com.amazonaws.pcaconnectorscep#SensitiveString", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The SCEP challenge password.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.pcaconnectorscep#GetConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#GetConnectorRequest" + }, + "output": { + "target": "com.amazonaws.pcaconnectorscep#GetConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves details about the specified Connector. Calling this action returns important details about the connector, such as the public SCEP URL where your clients can request certificates.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/connectors/{ConnectorArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.pcaconnectorscep#GetConnectorRequest": { + "type": "structure", + "members": { + "ConnectorArn": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#GetConnectorResponse": { + "type": "structure", + "members": { + "Connector": { + "target": "com.amazonaws.pcaconnectorscep#Connector", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The properties of the connector.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.pcaconnectorscep#InternalServerException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request processing has failed because of an unknown error, exception or failure with\n an internal server.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.pcaconnectorscep#IntuneConfiguration": { + "type": "structure", + "members": { + "AzureApplicationId": { + "target": "com.amazonaws.pcaconnectorscep#AzureApplicationId", + "traits": { + "smithy.api#documentation": "

The directory (tenant) ID from your Microsoft Entra ID app registration.

", + "smithy.api#required": {} + } + }, + "Domain": { + "target": "com.amazonaws.pcaconnectorscep#AzureDomain", + "traits": { + "smithy.api#documentation": "

The primary domain from your Microsoft Entra ID app registration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configuration details for use with Microsoft Intune. For information about using Connector for SCEP for Microsoft Intune, see Using Connector for SCEP for Microsoft Intune.

\n

When you use Connector for SCEP for Microsoft Intune, certain functionalities are enabled by accessing Microsoft Intune through the Microsoft API. Your use of the Connector for SCEP and accompanying Amazon Web Services services doesn't remove your need to have a valid license for your use of the Microsoft Intune service. You should also review the Microsoft Intune® App Protection Policies.

" + } + }, + "com.amazonaws.pcaconnectorscep#ListChallengeMetadata": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#ListChallengeMetadataRequest" + }, + "output": { + "target": "com.amazonaws.pcaconnectorscep#ListChallengeMetadataResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the challenge metadata for the specified ARN.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/challengeMetadata" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Challenges" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.pcaconnectorscep#ListChallengeMetadataRequest": { + "type": "structure", + "members": { + "MaxResults": { + "target": "com.amazonaws.pcaconnectorscep#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of objects that you want Connector for SCEP to return for this request. If more\n objects are available, in the response, Connector for SCEP provides a\n NextToken value that you can use in a subsequent call to get the next batch of objects.

", + "smithy.api#httpQuery": "MaxResults" + } + }, + "NextToken": { + "target": "com.amazonaws.pcaconnectorscep#NextToken", + "traits": { + "smithy.api#documentation": "

When you request a list of objects with a MaxResults setting, if the number of objects that are still available\n for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken\n value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

", + "smithy.api#httpQuery": "NextToken" + } + }, + "ConnectorArn": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector.

", + "smithy.api#httpQuery": "ConnectorArn", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#ListChallengeMetadataResponse": { + "type": "structure", + "members": { + "Challenges": { + "target": "com.amazonaws.pcaconnectorscep#ChallengeMetadataList", + "traits": { + "smithy.api#documentation": "

The challenge metadata for the challenges belonging to your Amazon Web Services account.

" + } + }, + "NextToken": { + "target": "com.amazonaws.pcaconnectorscep#NextToken", + "traits": { + "smithy.api#documentation": "

When you request a list of objects with a MaxResults setting, if the number of objects that are still available\n for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken\n value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.pcaconnectorscep#ListConnectors": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#ListConnectorsRequest" + }, + "output": { + "target": "com.amazonaws.pcaconnectorscep#ListConnectorsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the connectors belonging to your Amazon Web Services account.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/connectors" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Connectors" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.pcaconnectorscep#ListConnectorsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "target": "com.amazonaws.pcaconnectorscep#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of objects that you want Connector for SCEP to return for this request. If more\n objects are available, in the response, Connector for SCEP provides a\n NextToken value that you can use in a subsequent call to get the next batch of objects.

", + "smithy.api#httpQuery": "MaxResults" + } + }, + "NextToken": { + "target": "com.amazonaws.pcaconnectorscep#NextToken", + "traits": { + "smithy.api#documentation": "

When you request a list of objects with a MaxResults setting, if the number of objects that are still available\n for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken\n value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

", + "smithy.api#httpQuery": "NextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#ListConnectorsResponse": { + "type": "structure", + "members": { + "Connectors": { + "target": "com.amazonaws.pcaconnectorscep#ConnectorList", + "traits": { + "smithy.api#documentation": "

The connectors belonging to your Amazon Web Services account.

" + } + }, + "NextToken": { + "target": "com.amazonaws.pcaconnectorscep#NextToken", + "traits": { + "smithy.api#documentation": "

When you request a list of objects with a MaxResults setting, if the number of objects that are still available\n for retrieval exceeds the maximum you requested, Connector for SCEP returns a NextToken\n value in the response. To retrieve the next batch of objects, use the token returned from the prior request in your next request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.pcaconnectorscep#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.pcaconnectorscep#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the tags associated with the specified resource. Tags are key-value pairs that\n you can use to categorize and manage your resources, for purposes like billing. For\n example, you might set the tag key to \"customer\" and the value to the customer name or ID.\n You can specify one or more tags to add to each Amazon Web Services resource, up to 50 tags for a\n resource.

", + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/tags/{ResourceArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.pcaconnectorscep#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.pcaconnectorscep#Tags", + "traits": { + "smithy.api#documentation": "

The key-value pairs to associate with the resource.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.pcaconnectorscep#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.pcaconnectorscep#MobileDeviceManagement": { + "type": "union", + "members": { + "Intune": { + "target": "com.amazonaws.pcaconnectorscep#IntuneConfiguration", + "traits": { + "smithy.api#documentation": "

Configuration settings for use with Microsoft Intune. For information about using Connector for SCEP for Microsoft Intune, see Using Connector for SCEP for Microsoft Intune.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

If you don't supply a value, by default Connector for SCEP creates a connector for general-purpose use. A general-purpose connector is designed to work with clients or endpoints that support the SCEP protocol, except Connector for SCEP for Microsoft Intune. For information about considerations and limitations with using Connector for SCEP, see Considerations and Limitations.

\n

If you provide an IntuneConfiguration, Connector for SCEP creates a connector for use with Microsoft Intune, and you manage the challenge passwords using Microsoft Intune. For more information, see Using Connector for SCEP for Microsoft Intune.

" + } + }, + "com.amazonaws.pcaconnectorscep#NextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 + }, + "smithy.api#pattern": "^(?:[A-Za-z0-9_-]{4})*(?:[A-Za-z0-9_-]{2}==|[A-Za-z0-9_-]{3}=)?$" + } + }, + "com.amazonaws.pcaconnectorscep#OpenIdConfiguration": { + "type": "structure", + "members": { + "Issuer": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The issuer value to copy into your Microsoft Entra app registration's OIDC.

" + } + }, + "Subject": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The subject value to copy into your Microsoft Entra app registration's OIDC.

" + } + }, + "Audience": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The audience value to copy into your Microsoft Entra app registration's OIDC.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains OpenID Connect (OIDC) parameters for use with Microsoft Intune. For more information about using Connector for SCEP for Microsoft Intune, see Using Connector for SCEP for Microsoft Intune.

" + } + }, + "com.amazonaws.pcaconnectorscep#PcaConnectorScep": { + "type": "service", + "version": "2018-05-10", + "operations": [ + { + "target": "com.amazonaws.pcaconnectorscep#ListTagsForResource" + }, + { + "target": "com.amazonaws.pcaconnectorscep#TagResource" + }, + { + "target": "com.amazonaws.pcaconnectorscep#UntagResource" + } + ], + "resources": [ + { + "target": "com.amazonaws.pcaconnectorscep#ChallengeResource" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ConnectorResource" + } + ], + "traits": { + "aws.api#service": { + "arnNamespace": "pca-connector-scep", + "cloudFormationName": "PCAConnectorSCEP", + "sdkId": "Pca Connector Scep", + "serviceName": "pca-connector-scep", + "cloudTrailEventSource": "pca-connector-scep.amazonaws.com" + }, + "aws.auth#sigv4": { + "name": "pca-connector-scep" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": [ + "*", + "content-type", + "x-amz-content-sha256", + "x-amz-user-agent", + "x-amzn-platform-id", + "x-amzn-trace-id" + ], + "additionalExposedHeaders": [ + "x-amzn-errortype", + "x-amzn-requestid", + "x-amzn-trace-id" + ], + "maxAge": 86400 + }, + "smithy.api#documentation": "\n

Connector for SCEP (Preview) is in preview release for Amazon Web Services Private Certificate Authority and is subject to change.

\n
\n

Connector for SCEP (Preview) creates a connector between Amazon Web Services Private CA and your SCEP-enabled clients and devices. For more\n information, see Connector for SCEP in the Amazon Web Services Private CA User Guide.

", + "smithy.api#title": "Private CA Connector for SCEP", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-scep-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-scep-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-scep.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://pca-connector-scep.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://pca-connector-scep.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.pcaconnectorscep#ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "ResourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Web Services resource.

", + "smithy.api#required": {} + } + }, + "ResourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The resource type, which can be either Connector or Challenge.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The operation tried to access a nonexistent resource. The resource might be incorrectly specified, or it might have a status other than ACTIVE.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.pcaconnectorscep#SensitiveString": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.pcaconnectorscep#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "ResourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The resource type, which can be either Connector or Challenge.

", + "smithy.api#required": {} + } + }, + "ServiceCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Identifies the originating service.

", + "smithy.api#required": {} + } + }, + "QuotaCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The quota identifier.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request would cause a service quota to be exceeded.

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.pcaconnectorscep#TagKeyList": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.pcaconnectorscep#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#TagResourceRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Adds one or more tags to your resource.

", + "smithy.api#http": { + "code": 204, + "method": "POST", + "uri": "/tags/{ResourceArn}" + } + } + }, + "com.amazonaws.pcaconnectorscep#TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "aws.api#data": "tagging", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.pcaconnectorscep#Tags", + "traits": { + "smithy.api#documentation": "

The key-value pairs to associate with the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#Tags": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.pcaconnectorscep#ThrottlingException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The limit on the number of requests per second was exceeded.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": { + "throttling": true + } + } + }, + "com.amazonaws.pcaconnectorscep#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.pcaconnectorscep#UntagResourceRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.pcaconnectorscep#AccessDeniedException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#InternalServerException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ThrottlingException" + }, + { + "target": "com.amazonaws.pcaconnectorscep#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Removes one or more tags from your resource.

", + "smithy.api#http": { + "code": 204, + "method": "DELETE", + "uri": "/tags/{ResourceArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.pcaconnectorscep#UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "smithy.api#String", + "traits": { + "aws.api#data": "tagging", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.pcaconnectorscep#TagKeyList", + "traits": { + "smithy.api#documentation": "

Specifies a list of tag keys that you want to remove from the specified resources.

", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pcaconnectorscep#ValidationException": { + "type": "structure", + "members": { + "Message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "Reason": { + "target": "com.amazonaws.pcaconnectorscep#ValidationExceptionReason", + "traits": { + "smithy.api#documentation": "

The reason for the validation error, if available. The service doesn't return a reason for every validation exception.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An input validation error occurred. For example, invalid characters in a name tag, or an invalid pagination token.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.pcaconnectorscep#ValidationExceptionReason": { + "type": "enum", + "members": { + "CA_CERT_VALIDITY_TOO_SHORT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CA_CERT_VALIDITY_TOO_SHORT" + } + }, + "INVALID_CA_USAGE_MODE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_CA_USAGE_MODE" + } + }, + "INVALID_CONNECTOR_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_CONNECTOR_TYPE" + } + }, + "INVALID_STATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_STATE" + } + }, + "NO_CLIENT_TOKEN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NO_CLIENT_TOKEN" + } + }, + "UNKNOWN_OPERATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNKNOWN_OPERATION" + } + }, + "OTHER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OTHER" + } + } + } + } + } +} \ No newline at end of file diff --git a/models/pi.json b/models/pi.json index ea55a498dd..1cd963a046 100644 --- a/models/pi.json +++ b/models/pi.json @@ -43,7 +43,7 @@ "com.amazonaws.pi#AdditionalMetricsList": { "type": "list", "member": { - "target": "com.amazonaws.pi#RequestString" + "target": "com.amazonaws.pi#SanitizedString" }, "traits": { "smithy.api#length": { @@ -211,6 +211,18 @@ } } }, + "com.amazonaws.pi#AuthorizedActionsList": { + "type": "list", + "member": { + "target": "com.amazonaws.pi#FineGrainedAction" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 3 + } + } + }, "com.amazonaws.pi#Boolean": { "type": "boolean" }, @@ -644,14 +656,14 @@ "type": "structure", "members": { "Group": { - "target": "com.amazonaws.pi#RequestString", + "target": "com.amazonaws.pi#SanitizedString", "traits": { "smithy.api#documentation": "

The name of the dimension group. Valid values are as follows:

\n
    \n
  • \n

    \n db - The name of the database to which the client is connected. The following values are permitted:

    \n
      \n
    • \n

      Aurora PostgreSQL

      \n
    • \n
    • \n

      Amazon RDS PostgreSQL

      \n
    • \n
    • \n

      Aurora MySQL

      \n
    • \n
    • \n

      Amazon RDS MySQL

      \n
    • \n
    • \n

      Amazon RDS MariaDB

      \n
    • \n
    • \n

      Amazon DocumentDB

      \n
    • \n
    \n
  • \n
  • \n

    \n db.application - The name of the application that is connected to the database. The following values are\n permitted:

    \n
      \n
    • \n

      Aurora PostgreSQL

      \n
    • \n
    • \n

      Amazon RDS PostgreSQL

      \n
    • \n
    • \n

      Amazon DocumentDB

      \n
    • \n
    \n
  • \n
  • \n

    \n db.host - The host name of the connected client (all engines).

    \n
  • \n
  • \n

    \n db.query - The query that is currently running (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query_tokenized - The digest query (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.session_type - The type of the current session (only Aurora PostgreSQL and RDS PostgreSQL).

    \n
  • \n
  • \n

    \n db.sql - The text of the SQL statement that is currently running (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql_tokenized - The SQL digest (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.user - The user logged in to the database (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_event - The event for which the database backend is waiting (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_event_type - The type of event for which the database backend is waiting (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_state - The event for which the database backend is waiting (only Amazon DocumentDB).

    \n
  • \n
", "smithy.api#required": {} } }, "Dimensions": { - "target": "com.amazonaws.pi#RequestStringList", + "target": "com.amazonaws.pi#SanitizedStringList", "traits": { "smithy.api#documentation": "

A list of specific dimensions from a dimension group. If this parameter is not present,\n then it signifies that all of the dimensions in the group were requested, or are present in\n the response.

\n

Valid values for elements in the Dimensions array are:

\n
    \n
  • \n

    \n db.application.name - The name of the application that is connected to the database. Valid values are as follows:

    \n
      \n
    • \n

      Aurora PostgreSQL

      \n
    • \n
    • \n

      Amazon RDS PostgreSQL

      \n
    • \n
    • \n

      Amazon DocumentDB

      \n
    • \n
    \n
  • \n
  • \n

    \n db.host.id - The host ID of the connected client (all engines).

    \n
  • \n
  • \n

    \n db.host.name - The host name of the connected client (all engines).

    \n
  • \n
  • \n

    \n db.name - The name of the database to which the client is connected. Valid values are as follows:

    \n
      \n
    • \n

      Aurora PostgreSQL

      \n
    • \n
    • \n

      Amazon RDS PostgreSQL

      \n
    • \n
    • \n

      Aurora MySQL

      \n
    • \n
    • \n

      Amazon RDS MySQL

      \n
    • \n
    • \n

      Amazon RDS MariaDB

      \n
    • \n
    • \n

      Amazon DocumentDB

      \n
    • \n
    \n
  • \n
  • \n

    \n db.query.id - The query ID generated by Performance Insights (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.db_id - The query ID generated by the database (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.statement - The text of the query that is being run (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.tokenized_id\n

    \n
  • \n
  • \n

    \n db.query.tokenized.id - The query digest ID generated by Performance Insights (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.tokenized.db_id - The query digest ID generated by Performance Insights (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.query.tokenized.statement - The text of the query digest (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.session_type.name - The type of the current session (only Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql.id - The hash of the full, non-tokenized SQL statement generated by Performance Insights (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql.db_id - Either the SQL ID generated by the database engine, or a value generated by Performance Insights that begins with\n pi- (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql.statement - The full text of the SQL statement that is running, as in SELECT * FROM employees\n (all engines except Amazon DocumentDB)

    \n
  • \n
  • \n

    \n db.sql.tokenized_id\n

    \n
  • \n
  • \n

    \n db.sql_tokenized.id - The hash of the SQL digest generated by Performance Insights (all engines except Amazon DocumentDB). In the console,\n db.sql_tokenized.id is called the Support ID because Amazon Web Services Support can look at this data to help you troubleshoot\n database issues.

    \n
  • \n
  • \n

    \n db.sql_tokenized.db_id - Either the native database ID used to refer to the SQL statement, or a synthetic ID such as\n pi-2372568224 that Performance Insights generates if the native database ID isn't available (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.sql_tokenized.statement - The text of the SQL digest, as in SELECT * FROM employees WHERE employee_id =\n ? (all engines except Amazon DocumentDB)

    \n
  • \n
  • \n

    \n db.user.id - The ID of the user logged in to the database (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.user.name - The name of the user logged in to the database (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_event.name - The event for which the backend is waiting (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_event.type - The type of event for which the backend is waiting (all engines except Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_event_type.name - The name of the event type for which the backend is waiting (all engines except\n Amazon DocumentDB).

    \n
  • \n
  • \n

    \n db.wait_state.name - The event for which the backend is waiting (only Amazon DocumentDB).

    \n
  • \n
" } @@ -775,7 +787,7 @@ "com.amazonaws.pi#DimensionsMetricList": { "type": "list", "member": { - "target": "com.amazonaws.pi#RequestString" + "target": "com.amazonaws.pi#SanitizedString" }, "traits": { "smithy.api#length": { @@ -854,6 +866,29 @@ } } }, + "com.amazonaws.pi#FineGrainedAction": { + "type": "enum", + "members": { + "DESCRIBE_DIMENSION_KEYS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DescribeDimensionKeys" + } + }, + "GET_DIMENSION_KEY_DETAILS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GetDimensionKeyDetails" + } + }, + "GET_RESOURCE_METRICS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GetResourceMetrics" + } + } + } + }, "com.amazonaws.pi#GetDimensionKeyDetails": { "type": "operation", "input": { @@ -1402,6 +1437,12 @@ "traits": { "smithy.api#documentation": "

An optional pagination token provided by a previous request. If this parameter is specified, \n the response includes only records beyond the token, up to the value specified by MaxRecords.\n

" } + }, + "AuthorizedActions": { + "target": "com.amazonaws.pi#AuthorizedActionsList", + "traits": { + "smithy.api#documentation": "

The actions to discover the dimensions you are authorized to access. If you specify multiple actions, then the response will\n contain the dimensions common for all the actions.

\n

When you don't specify this request parameter or provide an empty list, the response contains all the \n available dimensions for the target database engine whether or not you are authorized to access them.

" + } } }, "traits": { @@ -1740,7 +1781,7 @@ "type": "structure", "members": { "Metric": { - "target": "com.amazonaws.pi#RequestString", + "target": "com.amazonaws.pi#SanitizedString", "traits": { "smithy.api#documentation": "

The name of a Performance Insights metric to be measured.

\n

Valid values for Metric are:

\n \n

If the number of active sessions is less than an internal Performance Insights threshold, db.load.avg and db.sampledload.avg are the same\n value. If the number of active sessions is greater than the internal threshold, Performance Insights samples the active sessions, with db.load.avg showing the\n scaled values, db.sampledload.avg showing the raw values, and db.sampledload.avg less than\n db.load.avg. For most use cases, you can query db.load.avg only.

", "smithy.api#required": {} @@ -1766,7 +1807,7 @@ "com.amazonaws.pi#MetricQueryFilterMap": { "type": "map", "key": { - "target": "com.amazonaws.pi#RequestString" + "target": "com.amazonaws.pi#SanitizedString" }, "value": { "target": "com.amazonaws.pi#RequestString" @@ -1787,7 +1828,7 @@ "com.amazonaws.pi#MetricTypeList": { "type": "list", "member": { - "target": "com.amazonaws.pi#RequestString" + "target": "com.amazonaws.pi#SanitizedString" } }, "com.amazonaws.pi#MetricValuesList": { @@ -2867,22 +2908,10 @@ "smithy.api#pattern": "\\S" } }, - "com.amazonaws.pi#RequestStringList": { - "type": "list", - "member": { - "target": "com.amazonaws.pi#RequestString" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 10 - } - } - }, "com.amazonaws.pi#RequestedDimensionList": { "type": "list", "member": { - "target": "com.amazonaws.pi#RequestString" + "target": "com.amazonaws.pi#SanitizedString" }, "traits": { "smithy.api#length": { @@ -2965,6 +2994,29 @@ "target": "com.amazonaws.pi#ResponseResourceMetric" } }, + "com.amazonaws.pi#SanitizedString": { + "type": "string", + "traits": { + "smithy.api#documentation": "A generic string type that forbids characters that could expose our service (or services downstream)\n to security risks around injections.", + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9-_\\.:/*)( ]+$" + } + }, + "com.amazonaws.pi#SanitizedStringList": { + "type": "list", + "member": { + "target": "com.amazonaws.pi#SanitizedString" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, "com.amazonaws.pi#ServiceType": { "type": "enum", "members": { diff --git a/models/pinpoint.json b/models/pinpoint.json index bd176ea385..5dfd2d90a1 100644 --- a/models/pinpoint.json +++ b/models/pinpoint.json @@ -1939,6 +1939,12 @@ "smithy.api#documentation": "

The verified email address to send the email from. The default address is the FromAddress specified for the email channel for the application.

" } }, + "Headers": { + "target": "com.amazonaws.pinpoint#ListOfMessageHeader", + "traits": { + "smithy.api#documentation": "

The list of MessageHeaders for the email. You can have up to 15 MessageHeaders for each email.

" + } + }, "HtmlBody": { "target": "com.amazonaws.pinpoint#__string", "traits": { @@ -6133,6 +6139,12 @@ "smithy.api#documentation": "

The subject line, or title, to use in email messages that are based on the message template.

" } }, + "Headers": { + "target": "com.amazonaws.pinpoint#ListOfMessageHeader", + "traits": { + "smithy.api#documentation": "

The list of MessageHeaders for the email. You can have up to 15 Headers.

" + } + }, "tags": { "target": "com.amazonaws.pinpoint#MapOf__string", "traits": { @@ -6206,6 +6218,12 @@ "smithy.api#documentation": "

The subject line, or title, that's used in email messages that are based on the message template.

" } }, + "Headers": { + "target": "com.amazonaws.pinpoint#ListOfMessageHeader", + "traits": { + "smithy.api#documentation": "

The list of MessageHeaders for the email. You can have up to 15 Headers.

" + } + }, "tags": { "target": "com.amazonaws.pinpoint#MapOf__string", "traits": { @@ -13509,6 +13527,12 @@ "target": "com.amazonaws.pinpoint#JourneyRunResponse" } }, + "com.amazonaws.pinpoint#ListOfMessageHeader": { + "type": "list", + "member": { + "target": "com.amazonaws.pinpoint#MessageHeader" + } + }, "com.amazonaws.pinpoint#ListOfMultiConditionalBranch": { "type": "list", "member": { @@ -14202,6 +14226,26 @@ "smithy.api#documentation": "

Specifies the message configuration settings for a campaign.

" } }, + "com.amazonaws.pinpoint#MessageHeader": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The name of the message header. The header name can contain up to 126 characters.

" + } + }, + "Value": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The value of the message header. The header value can contain up to 870 characters, including the length of any rendered attributes. For example if you add the {CreationDate} attribute, it renders as YYYY-MM-DDTHH:MM:SS.SSSZ and is 24 characters in length.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the name and value pair of an email header to add to your email. You can have up to 15 MessageHeaders. A header can contain information such as the sender, receiver, route, or timestamp.

" + } + }, "com.amazonaws.pinpoint#MessageRequest": { "type": "structure", "members": { @@ -18128,6 +18172,12 @@ "traits": { "smithy.api#documentation": "

The body of the email message, in plain text format. We recommend using plain text format for email clients that don't render HTML content and clients that are connected to high-latency networks, such as mobile devices.

" } + }, + "Headers": { + "target": "com.amazonaws.pinpoint#ListOfMessageHeader", + "traits": { + "smithy.api#documentation": "

The list of MessageHeaders for the email. You can have up to 15 Headers.

" + } } }, "traits": { diff --git a/models/pipes.json b/models/pipes.json index 3300c3d6e5..22208d6573 100644 --- a/models/pipes.json +++ b/models/pipes.json @@ -52,25 +52,25 @@ "Subnets": { "target": "com.amazonaws.pipes#Subnets", "traits": { - "smithy.api#documentation": "

Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets.

", + "smithy.api#documentation": "

Specifies the subnets associated with the task. These subnets must all be in the same\n VPC. You can specify as many as 16 subnets.

", "smithy.api#required": {} } }, "SecurityGroups": { "target": "com.amazonaws.pipes#SecurityGroups", "traits": { - "smithy.api#documentation": "

Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many \n as five security groups. If you do not specify a security group, the default security group for the VPC is used.

" + "smithy.api#documentation": "

Specifies the security groups associated with the task. These security groups must all\n be in the same VPC. You can specify as many as five security groups. If you do not specify\n a security group, the default security group for the VPC is used.

" } }, "AssignPublicIp": { "target": "com.amazonaws.pipes#AssignPublicIp", "traits": { - "smithy.api#documentation": "

Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when \n LaunchType in EcsParameters is set to FARGATE.

" + "smithy.api#documentation": "

Specifies whether the task's elastic network interface receives a public IP address. You\n can specify ENABLED only when LaunchType in\n EcsParameters is set to FARGATE.

" } } }, "traits": { - "smithy.api#documentation": "

This structure specifies the VPC subnets and security groups for the task, and whether a public IP address is to be used. \n This structure is relevant only for ECS tasks that use the awsvpc network mode.

" + "smithy.api#documentation": "

This structure specifies the VPC subnets and security groups for the task, and whether a\n public IP address is to be used. This structure is relevant only for ECS tasks that use the\n awsvpc network mode.

" } }, "com.amazonaws.pipes#BatchArrayProperties": { @@ -102,25 +102,25 @@ "Command": { "target": "com.amazonaws.pipes#StringList", "traits": { - "smithy.api#documentation": "

The command to send to the container that overrides the default command from the Docker image or the task definition.

" + "smithy.api#documentation": "

The command to send to the container that overrides the default command from the Docker\n image or the task definition.

" } }, "Environment": { "target": "com.amazonaws.pipes#BatchEnvironmentVariableList", "traits": { - "smithy.api#documentation": "

The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing \n environment variables from the Docker image or the task definition.

\n \n

Environment variables cannot start with \"Batch\". This naming convention is reserved for variables that Batch sets.

\n
" + "smithy.api#documentation": "

The environment variables to send to the container. You can add new environment\n variables, which are added to the container at launch, or you can override the existing\n environment variables from the Docker image or the task definition.

\n \n

Environment variables cannot start with \"Batch\". This\n naming convention is reserved for variables that Batch sets.

\n
" } }, "InstanceType": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The instance type to use for a multi-node parallel job.

\n \n

This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided.

\n
" + "smithy.api#documentation": "

The instance type to use for a multi-node parallel job.

\n \n

This parameter isn't applicable to single-node container jobs or jobs that run on\n Fargate resources, and shouldn't be provided.

\n
" } }, "ResourceRequirements": { "target": "com.amazonaws.pipes#BatchResourceRequirementsList", "traits": { - "smithy.api#documentation": "

The type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include GPU, MEMORY, \n and VCPU.

" + "smithy.api#documentation": "

The type and amount of resources to assign to a container. This overrides the settings\n in the job definition. The supported resources include GPU,\n MEMORY, and VCPU.

" } } }, @@ -146,18 +146,18 @@ "Name": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The name of the key-value pair. For environment variables, this is the name of the environment variable.

" + "smithy.api#documentation": "

The name of the key-value pair. For environment variables, this is the name of the\n environment variable.

" } }, "Value": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The value of the key-value pair. For environment variables, this is the value of the environment variable.

" + "smithy.api#documentation": "

The value of the key-value pair. For environment variables, this is the value of the\n environment variable.

" } } }, "traits": { - "smithy.api#documentation": "

The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing \n environment variables from the Docker image or the task definition.

\n \n

Environment variables cannot start with \"Batch\". This naming convention is reserved for variables that Batch sets.

\n
" + "smithy.api#documentation": "

The environment variables to send to the container. You can add new environment\n variables, which are added to the container at launch, or you can override the existing\n environment variables from the Docker image or the task definition.

\n \n

Environment variables cannot start with \"Batch\". This\n naming convention is reserved for variables that Batch sets.

\n
" } }, "com.amazonaws.pipes#BatchEnvironmentVariableList": { @@ -172,7 +172,7 @@ "JobId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The job ID of the Batch job that's associated with this dependency.

" + "smithy.api#documentation": "

The job ID of the Batch job that's associated with this\n dependency.

" } }, "Type": { @@ -216,20 +216,20 @@ "Type": { "target": "com.amazonaws.pipes#BatchResourceRequirementType", "traits": { - "smithy.api#documentation": "

The type of resource to assign to a container. The supported resources include GPU, MEMORY, and VCPU.

", + "smithy.api#documentation": "

The type of resource to assign to a container. The supported resources include\n GPU, MEMORY, and VCPU.

", "smithy.api#required": {} } }, "Value": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The quantity of the specified resource to reserve for the container. The values vary based on the\n type specified.

\n
\n
type=\"GPU\"
\n
\n

The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all\n containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched\n on.

\n \n

GPUs aren't available for jobs that are running on Fargate resources.

\n
\n
\n
type=\"MEMORY\"
\n
\n

The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are\n running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated.\n This parameter maps to Memory in the \n Create a container section of the Docker Remote API \n and the --memory option to docker run.\n You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for\n multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to\n Memory in the \n Create a container section of the Docker Remote API and the\n --memory option to docker run.

\n \n

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for\n a particular instance type, see Memory\n management in the Batch User Guide.

\n
\n

For jobs that are running on Fargate resources, then value is the hard limit (in MiB), and\n must match one of the supported values and the VCPU values must be one of the values supported for\n that memory value.

\n
\n
value = 512
\n
\n

\n VCPU = 0.25

\n
\n
value = 1024
\n
\n

\n VCPU = 0.25 or 0.5

\n
\n
value = 2048
\n
\n

\n VCPU = 0.25, 0.5, or 1

\n
\n
value = 3072
\n
\n

\n VCPU = 0.5, or 1

\n
\n
value = 4096
\n
\n

\n VCPU = 0.5, 1, or 2

\n
\n
value = 5120, 6144, or 7168
\n
\n

\n VCPU = 1 or 2

\n
\n
value = 8192
\n
\n

\n VCPU = 1, 2, 4, or 8

\n
\n
value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360
\n
\n

\n VCPU = 2 or 4

\n
\n
value = 16384
\n
\n

\n VCPU = 2, 4, or 8

\n
\n
value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720
\n
\n

\n VCPU = 4

\n
\n
value = 20480, 24576, or 28672
\n
\n

\n VCPU = 4 or 8

\n
\n
value = 36864, 45056, 53248, or 61440
\n
\n

\n VCPU = 8

\n
\n
value = 32768, 40960, 49152, or 57344
\n
\n

\n VCPU = 8 or 16

\n
\n
value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880
\n
\n

\n VCPU = 16

\n
\n
\n
\n
type=\"VCPU\"
\n
\n

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the\n \n Create a container section of the Docker Remote API \n and the --cpu-shares option to\n docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2\n resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be\n specified for each node at least once.

\n

The default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about\n Fargate quotas, see Fargate quotas in the Amazon Web Services General Reference.

\n

For jobs that are running on Fargate resources, then value must match one of the supported\n values and the MEMORY values must be one of the values supported for that VCPU value.\n The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16

\n
\n
value = 0.25
\n
\n

\n MEMORY = 512, 1024, or 2048

\n
\n
value = 0.5
\n
\n

\n MEMORY = 1024, 2048, 3072, or 4096

\n
\n
value = 1
\n
\n

\n MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192

\n
\n
value = 2
\n
\n

\n MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

\n
\n
value = 4
\n
\n

\n MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456,\n 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

\n
\n
value = 8
\n
\n

\n MEMORY = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440\n

\n
\n
value = 16
\n
\n

\n MEMORY = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880\n

\n
\n
\n
\n
", + "smithy.api#documentation": "

The quantity of the specified resource to reserve for the container. The values vary\n based on the type specified.

\n
\n
type=\"GPU\"
\n
\n

The number of physical GPUs to reserve for the container. Make sure that the\n number of GPUs reserved for all containers in a job doesn't exceed the number of\n available GPUs on the compute resource that the job is launched on.

\n \n

GPUs aren't available for jobs that are running on Fargate\n resources.

\n
\n
\n
type=\"MEMORY\"
\n
\n

The memory hard limit (in MiB) present to the container. This parameter is\n supported for jobs that are running on EC2 resources. If your container attempts\n to exceed the memory specified, the container is terminated. This parameter maps\n to Memory in the Create a\n container section of the Docker Remote API and\n the --memory option to docker run. You\n must specify at least 4 MiB of memory for a job. This is required but can be\n specified in several places for multi-node parallel (MNP) jobs. It must be\n specified for each node at least once. This parameter maps to Memory\n in the \n Create a container section of the Docker Remote API and\n the --memory option to docker run.

\n \n

If you're trying to maximize your resource utilization by providing your\n jobs as much memory as possible for a particular instance type, see Memory management in the Batch User\n Guide.

\n
\n

For jobs that are running on Fargate resources, then\n value is the hard limit (in MiB), and must match one of the\n supported values and the VCPU values must be one of the values\n supported for that memory value.

\n
\n
value = 512
\n
\n

\n VCPU = 0.25

\n
\n
value = 1024
\n
\n

\n VCPU = 0.25 or 0.5

\n
\n
value = 2048
\n
\n

\n VCPU = 0.25, 0.5, or 1

\n
\n
value = 3072
\n
\n

\n VCPU = 0.5, or 1

\n
\n
value = 4096
\n
\n

\n VCPU = 0.5, 1, or 2

\n
\n
value = 5120, 6144, or 7168
\n
\n

\n VCPU = 1 or 2

\n
\n
value = 8192
\n
\n

\n VCPU = 1, 2, 4, or 8

\n
\n
value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360
\n
\n

\n VCPU = 2 or 4

\n
\n
value = 16384
\n
\n

\n VCPU = 2, 4, or 8

\n
\n
value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648,\n 29696, or 30720
\n
\n

\n VCPU = 4

\n
\n
value = 20480, 24576, or 28672
\n
\n

\n VCPU = 4 or 8

\n
\n
value = 36864, 45056, 53248, or 61440
\n
\n

\n VCPU = 8

\n
\n
value = 32768, 40960, 49152, or 57344
\n
\n

\n VCPU = 8 or 16

\n
\n
value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or\n 122880
\n
\n

\n VCPU = 16

\n
\n
\n
\n
type=\"VCPU\"
\n
\n

The number of vCPUs reserved for the container. This parameter maps to\n CpuShares in the Create a\n container section of the Docker Remote API and\n the --cpu-shares option to docker run. Each\n vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at\n least one vCPU. This is required but can be specified in several places; it must\n be specified for each node at least once.

\n

The default for the Fargate On-Demand vCPU resource count quota\n is 6 vCPUs. For more information about Fargate quotas, see Fargate quotas in the Amazon Web Services\n General Reference.

\n

For jobs that are running on Fargate resources, then\n value must match one of the supported values and the\n MEMORY values must be one of the values supported for that\n VCPU value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and\n 16

\n
\n
value = 0.25
\n
\n

\n MEMORY = 512, 1024, or 2048

\n
\n
value = 0.5
\n
\n

\n MEMORY = 1024, 2048, 3072, or 4096

\n
\n
value = 1
\n
\n

\n MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or\n 8192

\n
\n
value = 2
\n
\n

\n MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240,\n 11264, 12288, 13312, 14336, 15360, or 16384

\n
\n
value = 4
\n
\n

\n MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336,\n 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576,\n 25600, 26624, 27648, 28672, 29696, or 30720

\n
\n
value = 8
\n
\n

\n MEMORY = 16384, 20480, 24576, 28672, 32768, 36864, 40960,\n 45056, 49152, 53248, 57344, or 61440

\n
\n
value = 16
\n
\n

\n MEMORY = 32768, 40960, 49152, 57344, 65536, 73728, 81920,\n 90112, 98304, 106496, 114688, or 122880

\n
\n
\n
\n
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The type and amount of a resource to assign to a container. The supported resources include GPU, MEMORY, and VCPU.

" + "smithy.api#documentation": "

The type and amount of a resource to assign to a container. The supported resources\n include GPU, MEMORY, and VCPU.

" } }, "com.amazonaws.pipes#BatchResourceRequirementType": { @@ -272,12 +272,12 @@ "Attempts": { "target": "com.amazonaws.pipes#BatchRetryAttempts", "traits": { - "smithy.api#documentation": "

The number of times to move a job to the RUNNABLE status. If the value of attempts is greater than one, the job is retried on \n failure the same number of attempts as the value.

" + "smithy.api#documentation": "

The number of times to move a job to the RUNNABLE status. If the value of\n attempts is greater than one, the job is retried on failure the same number\n of attempts as the value.

" } } }, "traits": { - "smithy.api#documentation": "

The retry strategy that's associated with a job. For more information, see \n Automated job retries in the Batch User Guide.

" + "smithy.api#documentation": "

The retry strategy that's associated with a job. For more information, see Automated job\n retries in the Batch User Guide.

" } }, "com.amazonaws.pipes#Boolean": { @@ -322,14 +322,14 @@ "target": "com.amazonaws.pipes#CapacityProviderStrategyItemWeight", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The weight value designates the relative percentage of the total number of tasks launched\n that should use the specified capacity provider. The weight value is taken into consideration\n after the base value, if defined, is satisfied.

" + "smithy.api#documentation": "

The weight value designates the relative percentage of the total number of tasks\n launched that should use the specified capacity provider. The weight value is taken into\n consideration after the base value, if defined, is satisfied.

" } }, "base": { "target": "com.amazonaws.pipes#CapacityProviderStrategyItemBase", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The base value designates how many tasks, at a minimum, to run on the specified capacity\n provider. Only one capacity provider in a capacity provider strategy can have a base defined.\n If no value is specified, the default value of 0 is used.

" + "smithy.api#documentation": "

The base value designates how many tasks, at a minimum, to run on the specified capacity\n provider. Only one capacity provider in a capacity provider strategy can have a base\n defined. If no value is specified, the default value of 0 is used.

" } } }, @@ -461,7 +461,7 @@ "aws:RequestTag/${TagKey}", "aws:TagKeys" ], - "smithy.api#documentation": "

Create a pipe. Amazon EventBridge Pipes connect event sources to targets and reduces the need for specialized knowledge and integration code.

", + "smithy.api#documentation": "

Create a pipe. Amazon EventBridge Pipes connect event sources to targets and reduces\n the need for specialized knowledge and integration code.

", "smithy.api#http": { "method": "POST", "uri": "/v1/pipes/{Name}", @@ -634,7 +634,7 @@ } }, "traits": { - "smithy.api#documentation": "

A DeadLetterConfig object that contains information about a dead-letter queue configuration.

" + "smithy.api#documentation": "

A DeadLetterConfig object that contains information about a dead-letter\n queue configuration.

" } }, "com.amazonaws.pipes#DeletePipe": { @@ -905,6 +905,76 @@ "smithy.api#output": {} } }, + "com.amazonaws.pipes#DimensionMapping": { + "type": "structure", + "members": { + "DimensionValue": { + "target": "com.amazonaws.pipes#DimensionValue", + "traits": { + "smithy.api#documentation": "

Dynamic path to the dimension value in the source event.

", + "smithy.api#required": {} + } + }, + "DimensionValueType": { + "target": "com.amazonaws.pipes#DimensionValueType", + "traits": { + "smithy.api#documentation": "

The data type of the dimension for the time-series data.

", + "smithy.api#required": {} + } + }, + "DimensionName": { + "target": "com.amazonaws.pipes#DimensionName", + "traits": { + "smithy.api#documentation": "

The metadata attributes of the time series. For example, the name and Availability Zone\n of an Amazon EC2 instance or the name of the manufacturer of a wind turbine are\n dimensions.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Maps source data to a dimension in the target Timestream for LiveAnalytics\n table.

\n

For more information, see Amazon Timestream for LiveAnalytics concepts\n

" + } + }, + "com.amazonaws.pipes#DimensionMappings": { + "type": "list", + "member": { + "target": "com.amazonaws.pipes#DimensionMapping" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.pipes#DimensionName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.pipes#DimensionValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, + "com.amazonaws.pipes#DimensionValueType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "VARCHAR", + "name": "VARCHAR" + } + ] + } + }, "com.amazonaws.pipes#DynamoDBStreamStartPosition": { "type": "string", "traits": { @@ -926,54 +996,54 @@ "Command": { "target": "com.amazonaws.pipes#StringList", "traits": { - "smithy.api#documentation": "

The command to send to the container that overrides the default command from the Docker image or the task definition. You must also specify a container name.

" + "smithy.api#documentation": "

The command to send to the container that overrides the default command from the Docker\n image or the task definition. You must also specify a container name.

" } }, "Cpu": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The number of cpu units reserved for the container, instead of the default value from the task definition. You must also specify a container name.

" + "smithy.api#documentation": "

The number of cpu units reserved for the container, instead of the default\n value from the task definition. You must also specify a container name.

" } }, "Environment": { "target": "com.amazonaws.pipes#EcsEnvironmentVariableList", "traits": { - "smithy.api#documentation": "

The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can \n override the existing environment variables from the Docker image or the task definition. You must also specify a container name.

" + "smithy.api#documentation": "

The environment variables to send to the container. You can add new environment\n variables, which are added to the container at launch, or you can override the existing\n environment variables from the Docker image or the task definition. You must also specify a\n container name.

" } }, "EnvironmentFiles": { "target": "com.amazonaws.pipes#EcsEnvironmentFileList", "traits": { - "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container, instead of the value from the container definition.

" + "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container, instead of\n the value from the container definition.

" } }, "Memory": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. \n If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name.

" + "smithy.api#documentation": "

The hard limit (in MiB) of memory to present to the container, instead of the default\n value from the task definition. If your container attempts to exceed the memory specified\n here, the container is killed. You must also specify a container name.

" } }, "MemoryReservation": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. \n You must also specify a container name.

" + "smithy.api#documentation": "

The soft limit (in MiB) of memory to reserve for the container, instead of the default\n value from the task definition. You must also specify a container name.

" } }, "Name": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

The name of the container that receives the override. This parameter is required if any override is specified.

" + "smithy.api#documentation": "

The name of the container that receives the override. This parameter is required if any\n override is specified.

" } }, "ResourceRequirements": { "target": "com.amazonaws.pipes#EcsResourceRequirementsList", "traits": { - "smithy.api#documentation": "

The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU.

" + "smithy.api#documentation": "

The type and amount of a resource to assign to a container, instead of the default value\n from the task definition. The only supported resource is a GPU.

" } } }, "traits": { - "smithy.api#documentation": "

The overrides that are sent to a container. An empty container override can be passed in. An example of an empty \n container override is {\"containerOverrides\": [ ] }. If a non-empty container override is specified, the name parameter must be included.

" + "smithy.api#documentation": "

The overrides that are sent to a container. An empty container override can be passed\n in. An example of an empty container override is {\"containerOverrides\": [ ] }.\n If a non-empty container override is specified, the name parameter must be\n included.

" } }, "com.amazonaws.pipes#EcsContainerOverrideList": { @@ -995,13 +1065,13 @@ "value": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon S3 object containing the\n environment variable file.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container. You can\n specify up to ten environment files. The file must have a .env file\n extension. Each line in an environment file should contain an environment variable in\n VARIABLE=VALUE format. Lines beginning with # are treated\n as comments and are ignored. For more information about the environment variable file\n syntax, see Declare default\n environment variables in file.

\n

If there are environment variables specified using the environment\n parameter in a container definition, they take precedence over the variables contained\n within an environment file. If multiple environment files are specified that contain the\n same variable, they're processed from the top down. We recommend that you use unique\n variable names. For more information, see Specifying environment\n variables in the Amazon Elastic Container Service Developer Guide.

\n

This parameter is only supported for tasks hosted on Fargate using the\n following platform versions:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
" + "smithy.api#documentation": "

A list of files containing the environment variables to pass to a container. You can\n specify up to ten environment files. The file must have a .env file extension.\n Each line in an environment file should contain an environment variable in\n VARIABLE=VALUE format. Lines beginning with # are treated as\n comments and are ignored. For more information about the environment variable file syntax,\n see Declare default environment\n variables in file.

\n

If there are environment variables specified using the environment\n parameter in a container definition, they take precedence over the variables contained\n within an environment file. If multiple environment files are specified that contain the\n same variable, they're processed from the top down. We recommend that you use unique\n variable names. For more information, see Specifying environment\n variables in the Amazon Elastic Container Service Developer\n Guide.

\n

This parameter is only supported for tasks hosted on Fargate using the\n following platform versions:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
" } }, "com.amazonaws.pipes#EcsEnvironmentFileList": { @@ -1027,18 +1097,18 @@ "name": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

The name of the key-value pair. For environment variables, this is the name of the environment variable.

" + "smithy.api#documentation": "

The name of the key-value pair. For environment variables, this is the name of the\n environment variable.

" } }, "value": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

The value of the key-value pair. For environment variables, this is the value of the environment variable.

" + "smithy.api#documentation": "

The value of the key-value pair. For environment variables, this is the value of the\n environment variable.

" } } }, "traits": { - "smithy.api#documentation": "

The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can \n override the existing environment variables from the Docker image or the task definition. You must also specify a container name.

" + "smithy.api#documentation": "

The environment variables to send to the container. You can add new environment\n variables, which are added to the container at launch, or you can override the existing\n environment variables from the Docker image or the task definition. You must also specify a\n container name.

" } }, "com.amazonaws.pipes#EcsEnvironmentVariableList": { @@ -1053,13 +1123,13 @@ "sizeInGiB": { "target": "com.amazonaws.pipes#EphemeralStorageSize", "traits": { - "smithy.api#documentation": "

The total amount, in GiB, of ephemeral storage to set for the task. The minimum\n supported value is 21 GiB and the maximum supported value is\n 200 GiB.

", + "smithy.api#documentation": "

The total amount, in GiB, of ephemeral storage to set for the task. The minimum\n supported value is 21 GiB and the maximum supported value is 200\n GiB.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The amount of ephemeral storage to allocate for the task. This parameter is used to\n expand the total amount of ephemeral storage available, beyond the default amount, for\n tasks hosted on Fargate. For more information, see Fargate task\n storage in the Amazon ECS User Guide for Fargate.

\n \n

This parameter is only supported for tasks hosted on Fargate using\n Linux platform version 1.4.0 or later. This parameter is not supported\n for Windows containers on Fargate.

\n
" + "smithy.api#documentation": "

The amount of ephemeral storage to allocate for the task. This parameter is used to\n expand the total amount of ephemeral storage available, beyond the default amount, for\n tasks hosted on Fargate. For more information, see Fargate task storage in the Amazon ECS User Guide\n for Fargate.

\n \n

This parameter is only supported for tasks hosted on Fargate using\n Linux platform version 1.4.0 or later. This parameter is not supported for\n Windows containers on Fargate.

\n
" } }, "com.amazonaws.pipes#EcsInferenceAcceleratorOverride": { @@ -1068,7 +1138,7 @@ "deviceName": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

The Elastic Inference accelerator device name to override for the task. This parameter must match a deviceName specified in the task definition.

" + "smithy.api#documentation": "

The Elastic Inference accelerator device name to override for the task. This parameter\n must match a deviceName specified in the task definition.

" } }, "deviceType": { @@ -1079,7 +1149,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details on an Elastic Inference accelerator task override. This parameter is used to\n override the Elastic Inference accelerator specified in the task definition. For more\n information, see Working with Amazon\n Elastic Inference on Amazon ECS in the\n Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Details on an Elastic Inference accelerator task override. This parameter is used to\n override the Elastic Inference accelerator specified in the task definition. For more\n information, see Working with Amazon Elastic\n Inference on Amazon ECS in the Amazon Elastic Container Service\n Developer Guide.

" } }, "com.amazonaws.pipes#EcsInferenceAcceleratorOverrideList": { @@ -1094,20 +1164,20 @@ "type": { "target": "com.amazonaws.pipes#EcsResourceRequirementType", "traits": { - "smithy.api#documentation": "

The type of resource to assign to a container. The supported values are\n GPU or InferenceAccelerator.

", + "smithy.api#documentation": "

The type of resource to assign to a container. The supported values are GPU\n or InferenceAccelerator.

", "smithy.api#required": {} } }, "value": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

The value for the specified resource type.

\n

If the GPU type is used, the value is the number of physical\n GPUs the Amazon ECS container agent reserves for the container. The number\n of GPUs that's reserved for all containers in a task can't exceed the number of\n available GPUs on the container instance that the task is launched on.

\n

If the InferenceAccelerator type is used, the value matches\n the deviceName for an InferenceAccelerator specified in a\n task definition.

", + "smithy.api#documentation": "

The value for the specified resource type.

\n

If the GPU type is used, the value is the number of physical\n GPUs the Amazon ECS container agent reserves for the container. The\n number of GPUs that's reserved for all containers in a task can't exceed the number of\n available GPUs on the container instance that the task is launched on.

\n

If the InferenceAccelerator type is used, the value matches\n the deviceName for an InferenceAccelerator specified in a task\n definition.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The type and amount of a resource to assign to a container. The supported resource\n types are GPUs and Elastic Inference accelerators. For more information, see Working with\n GPUs on Amazon ECS or Working with\n Amazon Elastic Inference on Amazon ECS in the\n Amazon Elastic Container Service Developer Guide\n

" + "smithy.api#documentation": "

The type and amount of a resource to assign to a container. The supported resource types\n are GPUs and Elastic Inference accelerators. For more information, see Working with\n GPUs on Amazon ECS or Working with Amazon Elastic\n Inference on Amazon ECS in the Amazon Elastic Container Service\n Developer Guide\n

" } }, "com.amazonaws.pipes#EcsResourceRequirementType": { @@ -1149,13 +1219,13 @@ "EphemeralStorage": { "target": "com.amazonaws.pipes#EcsEphemeralStorage", "traits": { - "smithy.api#documentation": "

The ephemeral storage setting override for the task.

\n \n

This parameter is only supported for tasks hosted on Fargate that\n use the following platform versions:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n
" + "smithy.api#documentation": "

The ephemeral storage setting override for the task.

\n \n

This parameter is only supported for tasks hosted on Fargate that use\n the following platform versions:

\n
    \n
  • \n

    Linux platform version 1.4.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n
" } }, "ExecutionRoleArn": { "target": "com.amazonaws.pipes#ArnOrJsonPath", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the task execution IAM role override for the task. For more\n information, see Amazon ECS task\n execution IAM role in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the task execution IAM role override for the task. For\n more information, see Amazon ECS\n task execution IAM role in the Amazon Elastic Container Service Developer\n Guide.

" } }, "InferenceAcceleratorOverrides": { @@ -1173,7 +1243,7 @@ "TaskRoleArn": { "target": "com.amazonaws.pipes#ArnOrJsonPath", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers\n in this task are granted the permissions that are specified in this role. For more\n information, see IAM Role for Tasks\n in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume.\n All containers in this task are granted the permissions that are specified in this role.\n For more information, see IAM Role for Tasks in\n the Amazon Elastic Container Service Developer Guide.

" } } }, @@ -1201,6 +1271,29 @@ } } }, + "com.amazonaws.pipes#EpochTimeUnit": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "MILLISECONDS", + "name": "MILLISECONDS" + }, + { + "value": "SECONDS", + "name": "SECONDS" + }, + { + "value": "MICROSECONDS", + "name": "MICROSECONDS" + }, + { + "value": "NANOSECONDS", + "name": "NANOSECONDS" + } + ] + } + }, "com.amazonaws.pipes#ErrorMessage": { "type": "string" }, @@ -1269,7 +1362,7 @@ } }, "traits": { - "smithy.api#documentation": "

Filter events using an event pattern. For more information, see Events and Event\n Patterns in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

Filter events using an event pattern. For more information, see Events and Event\n Patterns in the Amazon EventBridge User Guide.

" } }, "com.amazonaws.pipes#FilterCriteria": { @@ -1314,12 +1407,12 @@ "DeliveryStreamArn": { "target": "com.amazonaws.pipes#FirehoseArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Firehose delivery stream to which EventBridge delivers the pipe log records.

" } } }, "traits": { - "smithy.api#documentation": "

The Amazon Kinesis Data Firehose logging configuration settings for the pipe.

" + "smithy.api#documentation": "

The Amazon Data Firehose logging configuration settings for the pipe.

" } }, "com.amazonaws.pipes#FirehoseLogDestinationParameters": { @@ -1328,13 +1421,13 @@ "DeliveryStreamArn": { "target": "com.amazonaws.pipes#FirehoseArn", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.

", + "smithy.api#documentation": "

Specifies the Amazon Resource Name (ARN) of the Firehose delivery stream to\n which EventBridge delivers the pipe log records.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The Amazon Kinesis Data Firehose logging configuration settings for the pipe.

" + "smithy.api#documentation": "

The Amazon Data Firehose logging configuration settings for the pipe.

" } }, "com.amazonaws.pipes#HeaderKey": { @@ -1409,7 +1502,7 @@ "retryAfterSeconds": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The number of seconds to wait before retrying the action that caused the exception.

", + "smithy.api#documentation": "

The number of seconds to wait before retrying the action that caused the\n exception.

", "smithy.api#httpHeader": "Retry-After" } } @@ -1578,7 +1671,7 @@ "NamePrefix": { "target": "com.amazonaws.pipes#PipeName", "traits": { - "smithy.api#documentation": "

A value that will return a subset of the pipes associated with this account. For example, \"NamePrefix\": \"ABC\" will return \n all endpoints with \"ABC\" in the name.

", + "smithy.api#documentation": "

A value that will return a subset of the pipes associated with this account. For\n example, \"NamePrefix\": \"ABC\" will return all endpoints with \"ABC\" in the\n name.

", "smithy.api#httpQuery": "NamePrefix" } }, @@ -1834,6 +1927,51 @@ } } }, + "com.amazonaws.pipes#MeasureName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.pipes#MeasureValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, + "com.amazonaws.pipes#MeasureValueType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "DOUBLE", + "name": "DOUBLE" + }, + { + "value": "BIGINT", + "name": "BIGINT" + }, + { + "value": "VARCHAR", + "name": "VARCHAR" + }, + { + "value": "BOOLEAN", + "name": "BOOLEAN" + }, + { + "value": "TIMESTAMP", + "name": "TIMESTAMP" + } + ] + } + }, "com.amazonaws.pipes#MessageDeduplicationId": { "type": "string", "traits": { @@ -1854,18 +1992,111 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.pipes#MultiMeasureAttributeMapping": { + "type": "structure", + "members": { + "MeasureValue": { + "target": "com.amazonaws.pipes#MeasureValue", + "traits": { + "smithy.api#documentation": "

Dynamic path to the measurement attribute in the source event.

", + "smithy.api#required": {} + } + }, + "MeasureValueType": { + "target": "com.amazonaws.pipes#MeasureValueType", + "traits": { + "smithy.api#documentation": "

Data type of the measurement attribute in the source event.

", + "smithy.api#required": {} + } + }, + "MultiMeasureAttributeName": { + "target": "com.amazonaws.pipes#MultiMeasureAttributeName", + "traits": { + "smithy.api#documentation": "

Target measure name to be used.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A mapping of a source event data field to a measure in a Timestream for\n LiveAnalytics record.

" + } + }, + "com.amazonaws.pipes#MultiMeasureAttributeMappings": { + "type": "list", + "member": { + "target": "com.amazonaws.pipes#MultiMeasureAttributeMapping" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.pipes#MultiMeasureAttributeName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.pipes#MultiMeasureMapping": { + "type": "structure", + "members": { + "MultiMeasureName": { + "target": "com.amazonaws.pipes#MultiMeasureName", + "traits": { + "smithy.api#documentation": "

The name of the multiple measurements per record (multi-measure).

", + "smithy.api#required": {} + } + }, + "MultiMeasureAttributeMappings": { + "target": "com.amazonaws.pipes#MultiMeasureAttributeMappings", + "traits": { + "smithy.api#documentation": "

Mappings that represent multiple source event fields mapped to measures in the same\n Timestream for LiveAnalytics record.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Maps multiple measures from the source event to the same Timestream for\n LiveAnalytics record.

\n

For more information, see Amazon Timestream for LiveAnalytics concepts\n

" + } + }, + "com.amazonaws.pipes#MultiMeasureMappings": { + "type": "list", + "member": { + "target": "com.amazonaws.pipes#MultiMeasureMapping" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, + "com.amazonaws.pipes#MultiMeasureName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, "com.amazonaws.pipes#NetworkConfiguration": { "type": "structure", "members": { "awsvpcConfiguration": { "target": "com.amazonaws.pipes#AwsVpcConfiguration", "traits": { - "smithy.api#documentation": "

Use this structure to specify the VPC subnets and security groups for the task, and\n whether a public IP address is to be used. This structure is relevant only for ECS tasks that\n use the awsvpc network mode.

" + "smithy.api#documentation": "

Use this structure to specify the VPC subnets and security groups for the task, and\n whether a public IP address is to be used. This structure is relevant only for ECS tasks\n that use the awsvpc network mode.

" } } }, "traits": { - "smithy.api#documentation": "

This structure specifies the network configuration for an Amazon ECS task.

" + "smithy.api#documentation": "

This structure specifies the network configuration for an Amazon ECS\n task.

" } }, "com.amazonaws.pipes#NextToken": { @@ -1991,7 +2222,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object that represents a pipe. Amazon EventBridgePipes connect event sources to targets and reduces the need for specialized knowledge and integration code.

" + "smithy.api#documentation": "

An object that represents a pipe. Amazon EventBridgePipes connect event sources to\n targets and reduces the need for specialized knowledge and integration code.

" } }, "com.amazonaws.pipes#PipeArn": { @@ -2021,24 +2252,24 @@ "PathParameterValues": { "target": "com.amazonaws.pipes#PathParameterList", "traits": { - "smithy.api#documentation": "

The path parameter values to be used to populate API Gateway REST API or EventBridge\n ApiDestination path wildcards (\"*\").

" + "smithy.api#documentation": "

The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards (\"*\").

" } }, "HeaderParameters": { "target": "com.amazonaws.pipes#HeaderParametersMap", "traits": { - "smithy.api#documentation": "

The headers that need to be sent as part of request invoking the API Gateway REST API or\n EventBridge ApiDestination.

" + "smithy.api#documentation": "

The headers that need to be sent as part of request invoking the API Gateway REST\n API or EventBridge ApiDestination.

" } }, "QueryStringParameters": { "target": "com.amazonaws.pipes#QueryStringParametersMap", "traits": { - "smithy.api#documentation": "

The query string keys/values that need to be sent as part of request invoking the API Gateway \n REST API or EventBridge ApiDestination.

" + "smithy.api#documentation": "

The query string keys/values that need to be sent as part of request invoking the\n API Gateway REST API or EventBridge ApiDestination.

" } } }, "traits": { - "smithy.api#documentation": "

These are custom parameter to be used when the target is an API Gateway REST APIs or\n EventBridge ApiDestinations. In the latter case, these are merged with any\n InvocationParameters specified on the Connection, with any values from the Connection taking\n precedence.

" + "smithy.api#documentation": "

These are custom parameter to be used when the target is an API Gateway REST APIs\n or EventBridge ApiDestinations. In the latter case, these are merged with any\n InvocationParameters specified on the Connection, with any values from the Connection\n taking precedence.

" } }, "com.amazonaws.pipes#PipeEnrichmentParameters": { @@ -2047,13 +2278,13 @@ "InputTemplate": { "target": "com.amazonaws.pipes#InputTemplate", "traits": { - "smithy.api#documentation": "

Valid JSON text passed to the enrichment. In this case, nothing from the event itself is\n passed to the enrichment. For more information, see The JavaScript Object Notation (JSON) Data\n Interchange Format.

\n

To remove an input template, specify an empty string.

" + "smithy.api#documentation": "

Valid JSON text passed to the enrichment. In this case, nothing from the event itself is\n passed to the enrichment. For more information, see The JavaScript Object Notation (JSON)\n Data Interchange Format.

\n

To remove an input template, specify an empty string.

" } }, "HttpParameters": { "target": "com.amazonaws.pipes#PipeEnrichmentHttpParameters", "traits": { - "smithy.api#documentation": "

Contains the HTTP parameters to use when the target is a API Gateway REST endpoint or\n EventBridge ApiDestination.

\n

If you specify an API Gateway REST API or EventBridge ApiDestination as a target, you can\n use this parameter to specify headers, path parameters, and query string keys/values as part\n of your target invoking request. If you're using ApiDestinations, the corresponding Connection\n can also have these values configured. In case of any conflicting keys, values from the\n Connection take precedence.

" + "smithy.api#documentation": "

Contains the HTTP parameters to use when the target is a API Gateway REST\n endpoint or EventBridge ApiDestination.

\n

If you specify an API Gateway REST API or EventBridge ApiDestination as a\n target, you can use this parameter to specify headers, path parameters, and query string\n keys/values as part of your target invoking request. If you're using ApiDestinations, the\n corresponding Connection can also have these values configured. In case of any conflicting\n keys, values from the Connection take precedence.

" } } }, @@ -2079,7 +2310,7 @@ "FirehoseLogDestination": { "target": "com.amazonaws.pipes#FirehoseLogDestination", "traits": { - "smithy.api#documentation": "

The Amazon Kinesis Data Firehose logging configuration settings for the pipe.

" + "smithy.api#documentation": "

The Amazon Data Firehose logging configuration settings for the pipe.

" } }, "CloudwatchLogsLogDestination": { @@ -2097,7 +2328,7 @@ "IncludeExecutionData": { "target": "com.amazonaws.pipes#IncludeExecutionData", "traits": { - "smithy.api#documentation": "

Whether the execution data (specifically, the payload, awsRequest, and awsResponse fields) is included in the log messages for this pipe.

\n

This applies to all log destinations for the pipe.

\n

For more information, see Including execution data in logs in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

Whether the execution data (specifically, the payload,\n awsRequest, and awsResponse fields) is included in the log\n messages for this pipe.

\n

This applies to all log destinations for the pipe.

\n

For more information, see Including execution data in logs in the Amazon EventBridge User\n Guide.

" } } }, @@ -2117,7 +2348,7 @@ "FirehoseLogDestination": { "target": "com.amazonaws.pipes#FirehoseLogDestinationParameters", "traits": { - "smithy.api#documentation": "

The Amazon Kinesis Data Firehose logging configuration settings for the pipe.

" + "smithy.api#documentation": "

The Amazon Data Firehose logging configuration settings for the pipe.

" } }, "CloudwatchLogsLogDestination": { @@ -2129,19 +2360,19 @@ "Level": { "target": "com.amazonaws.pipes#LogLevel", "traits": { - "smithy.api#documentation": "

The level of logging detail to include. This applies to all log destinations for the pipe.

\n

For more information, see Specifying EventBridge Pipes log level in the Amazon EventBridge User Guide.

", + "smithy.api#documentation": "

The level of logging detail to include. This applies to all log destinations for the pipe.

\n

For more information, see Specifying\n EventBridge Pipes log level in the Amazon EventBridge User\n Guide.

", "smithy.api#required": {} } }, "IncludeExecutionData": { "target": "com.amazonaws.pipes#IncludeExecutionData", "traits": { - "smithy.api#documentation": "

Specify ON to include the execution data (specifically, the payload and awsRequest fields) in the log messages for this pipe.

\n

This applies to all log destinations for the pipe.

\n

For more information, see Including execution data in logs in the Amazon EventBridge User Guide.

\n

The default is OFF.

" + "smithy.api#documentation": "

Specify ALL to include the execution data (specifically, the\n payload, awsRequest, and awsResponse fields) in\n the log messages for this pipe.

\n

This applies to all log destinations for the pipe.

\n

For more information, see Including execution data in logs in the Amazon EventBridge User\n Guide.

\n

By default, execution data is not included.

" } } }, "traits": { - "smithy.api#documentation": "

Specifies the logging configuration settings for the pipe.

\n

When you call UpdatePipe, EventBridge updates the fields in the\n PipeLogConfigurationParameters object atomically as one and overrides\n existing values. This is by design. If you don't specify an optional field in any of the\n Amazon Web Services service parameters objects\n (CloudwatchLogsLogDestinationParameters,\n FirehoseLogDestinationParameters, or\n S3LogDestinationParameters), EventBridge sets that field to its\n system-default value during the update.

\n

For example, suppose when you created the pipe you\n specified a Kinesis Data Firehose stream log destination. You then update the pipe to add an\n Amazon S3 log destination. In addition to specifying the\n S3LogDestinationParameters for the new log destination, you must also\n specify the fields in the FirehoseLogDestinationParameters object in order to\n retain the Kinesis Data Firehose stream log destination.

\n

For more information on generating pipe log records, see Log EventBridge Pipes in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

Specifies the logging configuration settings for the pipe.

\n

When you call UpdatePipe, EventBridge updates the fields in the\n PipeLogConfigurationParameters object atomically as one and overrides\n existing values. This is by design. If you don't specify an optional field in any of the\n Amazon Web Services service parameters objects\n (CloudwatchLogsLogDestinationParameters,\n FirehoseLogDestinationParameters, or\n S3LogDestinationParameters), EventBridge sets that field to its\n system-default value during the update.

\n

For example, suppose when you created the pipe you specified a Firehose stream\n log destination. You then update the pipe to add an Amazon S3 log destination. In\n addition to specifying the S3LogDestinationParameters for the new log\n destination, you must also specify the fields in the\n FirehoseLogDestinationParameters object in order to retain the Firehose stream log destination.

\n

For more information on generating pipe log records, see Log EventBridge\n Pipes in the Amazon EventBridge User Guide.

" } }, "com.amazonaws.pipes#PipeName": { @@ -2347,7 +2578,7 @@ "StartingPositionTimestamp": { "target": "com.amazonaws.pipes#Timestamp", "traits": { - "smithy.api#documentation": "

With StartingPosition set to AT_TIMESTAMP, the time from which to start reading, in Unix time seconds.

" + "smithy.api#documentation": "

With StartingPosition set to AT_TIMESTAMP, the time from which\n to start reading, in Unix time seconds.

" } } }, @@ -2448,7 +2679,7 @@ "SelfManagedKafkaParameters": { "target": "com.amazonaws.pipes#PipeSourceSelfManagedKafkaParameters", "traits": { - "smithy.api#documentation": "

The parameters for using a self-managed Apache Kafka stream as a source.

" + "smithy.api#documentation": "

The parameters for using a self-managed Apache Kafka stream as a source.

\n

A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.\n This includes both clusters you manage yourself, as well as those hosted by a third-party\n provider, such as Confluent\n Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.

" } } }, @@ -2556,7 +2787,7 @@ } }, "traits": { - "smithy.api#documentation": "

The parameters for using a self-managed Apache Kafka stream as a source.

" + "smithy.api#documentation": "

The parameters for using a self-managed Apache Kafka stream as a source.

\n

A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.\n This includes both clusters you manage yourself, as well as those hosted by a third-party\n provider, such as Confluent\n Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.

" } }, "com.amazonaws.pipes#PipeSourceSqsQueueParameters": { @@ -2662,14 +2893,14 @@ "JobDefinition": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The job definition used by this job. This value can be one of name, name:revision, or the Amazon Resource Name (ARN) for the job definition. \n If name is specified without a revision then the latest active revision is used.

", + "smithy.api#documentation": "

The job definition used by this job. This value can be one of name,\n name:revision, or the Amazon Resource Name (ARN) for the job definition. If\n name is specified without a revision then the latest active revision is used.

", "smithy.api#required": {} } }, "JobName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The name of the job. It can be up to 128 letters long. The first character must be alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-), \n and underscores (_).

", + "smithy.api#documentation": "

The name of the job. It can be up to 128 letters long. The first character must be\n alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-), and\n underscores (_).

", "smithy.api#required": {} } }, @@ -2682,7 +2913,7 @@ "RetryStrategy": { "target": "com.amazonaws.pipes#BatchRetryStrategy", "traits": { - "smithy.api#documentation": "

The retry strategy to use for failed jobs. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition.

" + "smithy.api#documentation": "

The retry strategy to use for failed jobs. When a retry strategy is specified here, it\n overrides the retry strategy defined in the job definition.

" } }, "ContainerOverrides": { @@ -2694,13 +2925,13 @@ "DependsOn": { "target": "com.amazonaws.pipes#BatchDependsOn", "traits": { - "smithy.api#documentation": "

A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a SEQUENTIAL type dependency without \n specifying a job ID for array jobs so that each child array job completes sequentially, starting at index 0. You can also specify an N_TO_N \n type dependency with a job ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of each \n dependency to complete before it can begin.

" + "smithy.api#documentation": "

A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can\n specify a SEQUENTIAL type dependency without specifying a job ID for array\n jobs so that each child array job completes sequentially, starting at index 0. You can also\n specify an N_TO_N type dependency with a job ID for array jobs. In that case,\n each index child of this job must wait for the corresponding index child of each dependency\n to complete before it can begin.

" } }, "Parameters": { "target": "com.amazonaws.pipes#BatchParametersMap", "traits": { - "smithy.api#documentation": "

Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job definition. Parameters are specified as a key and \n value pair mapping. Parameters included here override any corresponding parameter defaults from the job definition.

" + "smithy.api#documentation": "

Additional parameters passed to the job that replace parameter substitution placeholders\n that are set in the job definition. Parameters are specified as a key and value pair\n mapping. Parameters included here override any corresponding parameter defaults from the\n job definition.

" } } }, @@ -2720,7 +2951,7 @@ "Timestamp": { "target": "com.amazonaws.pipes#JsonPath", "traits": { - "smithy.api#documentation": "

The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

" + "smithy.api#documentation": "

The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970\n 00:00:00 UTC.

" } } }, @@ -2734,51 +2965,51 @@ "TaskDefinitionArn": { "target": "com.amazonaws.pipes#ArnOrJsonPath", "traits": { - "smithy.api#documentation": "

The ARN of the task definition to use if the event target is an Amazon ECS task.

", + "smithy.api#documentation": "

The ARN of the task definition to use if the event target is an Amazon ECS task.\n

", "smithy.api#required": {} } }, "TaskCount": { "target": "com.amazonaws.pipes#LimitMin1", "traits": { - "smithy.api#documentation": "

The number of tasks to create based on TaskDefinition. The default is 1.

" + "smithy.api#documentation": "

The number of tasks to create based on TaskDefinition. The default is\n 1.

" } }, "LaunchType": { "target": "com.amazonaws.pipes#LaunchType", "traits": { - "smithy.api#documentation": "

Specifies the launch type on which your task is running. The launch type that you specify\n here must match one of the launch type (compatibilities) of the target task. The\n FARGATE value is supported only in the Regions where Fargate with Amazon ECS\n is supported. For more information, see Fargate on Amazon ECS in\n the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Specifies the launch type on which your task is running. The launch type that you\n specify here must match one of the launch type (compatibilities) of the target task. The\n FARGATE value is supported only in the Regions where Fargate with Amazon ECS is supported. For more information, see\n Fargate on Amazon ECS in the Amazon Elastic Container Service Developer Guide.

" } }, "NetworkConfiguration": { "target": "com.amazonaws.pipes#NetworkConfiguration", "traits": { - "smithy.api#documentation": "

Use this structure if the Amazon ECS task uses the awsvpc network mode. This\n structure specifies the VPC subnets and security groups associated with the task, and whether\n a public IP address is to be used. This structure is required if LaunchType is\n FARGATE because the awsvpc mode is required for Fargate\n tasks.

\n

If you specify NetworkConfiguration when the target ECS task does not use the\n awsvpc network mode, the task fails.

" + "smithy.api#documentation": "

Use this structure if the Amazon ECS task uses the awsvpc network\n mode. This structure specifies the VPC subnets and security groups associated with the\n task, and whether a public IP address is to be used. This structure is required if\n LaunchType is FARGATE because the awsvpc mode is\n required for Fargate tasks.

\n

If you specify NetworkConfiguration when the target ECS task does not use\n the awsvpc network mode, the task fails.

" } }, "PlatformVersion": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

Specifies the platform version for the task. Specify only the numeric portion of the\n platform version, such as 1.1.0.

\n

This structure is used only if LaunchType is FARGATE. For more\n information about valid platform versions, see Fargate Platform\n Versions in the Amazon Elastic Container Service Developer\n Guide.

" + "smithy.api#documentation": "

Specifies the platform version for the task. Specify only the numeric portion of the\n platform version, such as 1.1.0.

\n

This structure is used only if LaunchType is FARGATE. For more\n information about valid platform versions, see Fargate\n Platform Versions in the Amazon Elastic Container Service Developer\n Guide.

" } }, "Group": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

Specifies an Amazon ECS task group for the task. The maximum length is 255 characters.

" + "smithy.api#documentation": "

Specifies an Amazon ECS task group for the task. The maximum length is 255\n characters.

" } }, "CapacityProviderStrategy": { "target": "com.amazonaws.pipes#CapacityProviderStrategy", "traits": { - "smithy.api#documentation": "

The capacity provider strategy to use for the task.

\n

If a capacityProviderStrategy is specified, the launchType\n parameter must be omitted. If no capacityProviderStrategy or launchType is\n specified, the defaultCapacityProviderStrategy for the cluster is used.

" + "smithy.api#documentation": "

The capacity provider strategy to use for the task.

\n

If a capacityProviderStrategy is specified, the launchType\n parameter must be omitted. If no capacityProviderStrategy or launchType is\n specified, the defaultCapacityProviderStrategy for the cluster is used.\n

" } }, "EnableECSManagedTags": { "target": "com.amazonaws.pipes#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Specifies whether to enable Amazon ECS managed tags for the task. For more information,\n see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer\n Guide.

" + "smithy.api#documentation": "

Specifies whether to enable Amazon ECS managed tags for the task. For more\n information, see Tagging Your Amazon ECS Resources in the Amazon Elastic Container Service Developer Guide.

" } }, "EnableExecuteCommand": { @@ -2803,7 +3034,7 @@ "PropagateTags": { "target": "com.amazonaws.pipes#PropagateTags", "traits": { - "smithy.api#documentation": "

Specifies whether to propagate the tags from the task definition to the task. If no value\n is specified, the tags are not propagated. Tags can only be propagated to the task during task\n creation. To add tags to a task after task creation, use the TagResource API action.

" + "smithy.api#documentation": "

Specifies whether to propagate the tags from the task definition to the task. If no\n value is specified, the tags are not propagated. Tags can only be propagated to the task\n during task creation. To add tags to a task after task creation, use the\n TagResource API action.

" } }, "ReferenceId": { @@ -2821,7 +3052,7 @@ "Tags": { "target": "com.amazonaws.pipes#TagList", "traits": { - "smithy.api#documentation": "

The metadata that you apply to the task to help you categorize and organize them. Each tag\n consists of a key and an optional value, both of which you define. To learn more, see RunTask in the Amazon ECS API Reference.

" + "smithy.api#documentation": "

The metadata that you apply to the task to help you categorize and organize them. Each\n tag consists of a key and an optional value, both of which you define. To learn more, see\n RunTask in the Amazon ECS API Reference.

" } } }, @@ -2835,13 +3066,13 @@ "EndpointId": { "target": "com.amazonaws.pipes#EventBridgeEndpointId", "traits": { - "smithy.api#documentation": "

The URL subdomain of the endpoint. For example, if the URL for Endpoint is https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo.

" + "smithy.api#documentation": "

The URL subdomain of the endpoint. For example, if the URL for Endpoint is\n https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is\n abcde.veo.

" } }, "DetailType": { "target": "com.amazonaws.pipes#EventBridgeDetailType", "traits": { - "smithy.api#documentation": "

A free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail.

" + "smithy.api#documentation": "

A free-form string, with a maximum of 128 characters, used to decide what fields to\n expect in the event detail.

" } }, "Source": { @@ -2853,7 +3084,7 @@ "Resources": { "target": "com.amazonaws.pipes#EventBridgeEventResourceList", "traits": { - "smithy.api#documentation": "

Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily\n concerns. Any number, including zero, may be present.

" + "smithy.api#documentation": "

Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event\n primarily concerns. Any number, including zero, may be present.

" } }, "Time": { @@ -2873,19 +3104,19 @@ "PathParameterValues": { "target": "com.amazonaws.pipes#PathParameterList", "traits": { - "smithy.api#documentation": "

The path parameter values to be used to populate API Gateway REST API or EventBridge\n ApiDestination path wildcards (\"*\").

" + "smithy.api#documentation": "

The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards (\"*\").

" } }, "HeaderParameters": { "target": "com.amazonaws.pipes#HeaderParametersMap", "traits": { - "smithy.api#documentation": "

The headers that need to be sent as part of request invoking the API Gateway REST API or\n EventBridge ApiDestination.

" + "smithy.api#documentation": "

The headers that need to be sent as part of request invoking the API Gateway REST\n API or EventBridge ApiDestination.

" } }, "QueryStringParameters": { "target": "com.amazonaws.pipes#QueryStringParametersMap", "traits": { - "smithy.api#documentation": "

The query string keys/values that need to be sent as part of request invoking the API Gateway \n REST API or EventBridge ApiDestination.

" + "smithy.api#documentation": "

The query string keys/values that need to be sent as part of request invoking the\n API Gateway REST API or EventBridge ApiDestination.

" } } }, @@ -2914,7 +3145,7 @@ "PartitionKey": { "target": "com.amazonaws.pipes#KinesisPartitionKey", "traits": { - "smithy.api#documentation": "

Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters \n for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. \n Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this \n hashing mechanism, all data records with the same partition key map to the same shard within the stream.

", + "smithy.api#documentation": "

Determines which shard in the stream the data record is assigned to. Partition keys are\n Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the\n partition key and associated data to a specific shard. Specifically, an MD5 hash function\n is used to map partition keys to 128-bit integer values and to map associated data records\n to shards. As a result of this hashing mechanism, all data records with the same partition\n key map to the same shard within the stream.

", "smithy.api#required": {} } } @@ -2929,7 +3160,7 @@ "InvocationType": { "target": "com.amazonaws.pipes#PipeTargetInvocationType", "traits": { - "smithy.api#documentation": "

Specify whether to invoke the function synchronously or asynchronously.

\n
    \n
  • \n

    \n REQUEST_RESPONSE (default) - Invoke synchronously. This corresponds to the RequestResponse option in the InvocationType parameter for the Lambda Invoke API.

    \n
  • \n
  • \n

    \n FIRE_AND_FORGET - Invoke asynchronously. This corresponds to the Event option in the InvocationType parameter for the Lambda Invoke API.

    \n
  • \n
\n

For more information, see Invocation types in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

Specify whether to invoke the function synchronously or asynchronously.

\n
    \n
  • \n

    \n REQUEST_RESPONSE (default) - Invoke synchronously. This corresponds\n to the RequestResponse option in the InvocationType\n parameter for the Lambda\n Invoke\n API.

    \n
  • \n
  • \n

    \n FIRE_AND_FORGET - Invoke asynchronously. This corresponds to the\n Event option in the InvocationType parameter for the\n Lambda\n Invoke\n API.

    \n
  • \n
\n

For more information, see Invocation\n types in the Amazon EventBridge User Guide.

" } } }, @@ -2943,7 +3174,7 @@ "InputTemplate": { "target": "com.amazonaws.pipes#InputTemplate", "traits": { - "smithy.api#documentation": "

Valid JSON text passed to the target. In this case, nothing from the event itself is\n passed to the target. For more information, see The JavaScript Object Notation (JSON) Data\n Interchange Format.

\n

To remove an input template, specify an empty string.

" + "smithy.api#documentation": "

Valid JSON text passed to the target. In this case, nothing from the event itself is\n passed to the target. For more information, see The JavaScript Object Notation (JSON)\n Data Interchange Format.

\n

To remove an input template, specify an empty string.

" } }, "LambdaFunctionParameters": { @@ -3011,6 +3242,12 @@ "traits": { "smithy.api#documentation": "

The parameters for using an CloudWatch Logs log stream as a target.

" } + }, + "TimestreamParameters": { + "target": "com.amazonaws.pipes#PipeTargetTimestreamParameters", + "traits": { + "smithy.api#documentation": "

The parameters for using a Timestream for LiveAnalytics table as a\n target.

" + } } }, "traits": { @@ -3029,7 +3266,7 @@ "Database": { "target": "com.amazonaws.pipes#Database", "traits": { - "smithy.api#documentation": "

The name of the database. Required when authenticating using temporary credentials.

", + "smithy.api#documentation": "

The name of the database. Required when authenticating using temporary\n credentials.

", "smithy.api#required": {} } }, @@ -3070,7 +3307,7 @@ "PipelineParameterList": { "target": "com.amazonaws.pipes#SageMakerPipelineParameterList", "traits": { - "smithy.api#documentation": "

List of Parameter names and values for SageMaker Model Building Pipeline execution.

" + "smithy.api#documentation": "

List of Parameter names and values for SageMaker Model Building Pipeline\n execution.

" } } }, @@ -3104,7 +3341,7 @@ "InvocationType": { "target": "com.amazonaws.pipes#PipeTargetInvocationType", "traits": { - "smithy.api#documentation": "

Specify whether to invoke the Step Functions state machine synchronously or asynchronously.

\n
    \n
  • \n

    \n REQUEST_RESPONSE (default) - Invoke synchronously. For more information, see StartSyncExecution in the Step Functions API Reference.

    \n \n

    \n REQUEST_RESPONSE is not supported for STANDARD state machine workflows.

    \n
    \n
  • \n
  • \n

    \n FIRE_AND_FORGET - Invoke asynchronously. For more information, see StartExecution in the Step Functions API Reference.

    \n
  • \n
\n

For more information, see Invocation types in the Amazon EventBridge User Guide.

" + "smithy.api#documentation": "

Specify whether to invoke the Step Functions state machine synchronously or\n asynchronously.

\n
    \n
  • \n

    \n REQUEST_RESPONSE (default) - Invoke synchronously. For more\n information, see StartSyncExecution in the Step Functions API\n Reference.

    \n \n

    \n REQUEST_RESPONSE is not supported for STANDARD state\n machine workflows.

    \n
    \n
  • \n
  • \n

    \n FIRE_AND_FORGET - Invoke asynchronously. For more information, see\n StartExecution in the Step Functions API\n Reference.

    \n
  • \n
\n

For more information, see Invocation\n types in the Amazon EventBridge User Guide.

" } } }, @@ -3112,6 +3349,65 @@ "smithy.api#documentation": "

The parameters for using a Step Functions state machine as a target.

" } }, + "com.amazonaws.pipes#PipeTargetTimestreamParameters": { + "type": "structure", + "members": { + "TimeValue": { + "target": "com.amazonaws.pipes#TimeValue", + "traits": { + "smithy.api#documentation": "

Dynamic path to the source data field that represents the time value for your data.

", + "smithy.api#required": {} + } + }, + "EpochTimeUnit": { + "target": "com.amazonaws.pipes#EpochTimeUnit", + "traits": { + "smithy.api#documentation": "

The granularity of the time units used. Default is MILLISECONDS.

\n

Required if TimeFieldType is specified as EPOCH.

" + } + }, + "TimeFieldType": { + "target": "com.amazonaws.pipes#TimeFieldType", + "traits": { + "smithy.api#documentation": "

The type of time value used.

\n

The default is EPOCH.

" + } + }, + "TimestampFormat": { + "target": "com.amazonaws.pipes#TimestampFormat", + "traits": { + "smithy.api#documentation": "

How to format the timestamps. For example,\n YYYY-MM-DDThh:mm:ss.sssTZD.

\n

Required if TimeFieldType is specified as\n TIMESTAMP_FORMAT.

" + } + }, + "VersionValue": { + "target": "com.amazonaws.pipes#VersionValue", + "traits": { + "smithy.api#documentation": "

64 bit version value or source data field that represents the version value for your data.

\n

Write requests with a higher version number will update the existing measure values of the record and version. \n In cases where the measure value is the same, the version will still be updated.

\n

Default value is 1.

\n

Timestream for LiveAnalytics does not support updating partial measure values in a record.

\n

Write requests for duplicate data with a\n higher version number will update the existing measure value and version. In cases where\n the measure value is the same, Version will still be updated. Default value is\n 1.

\n \n

\n Version must be 1 or greater, or you will receive a\n ValidationException error.

\n
", + "smithy.api#required": {} + } + }, + "DimensionMappings": { + "target": "com.amazonaws.pipes#DimensionMappings", + "traits": { + "smithy.api#documentation": "

Map source data to dimensions in the target Timestream for LiveAnalytics\n table.

\n

For more information, see Amazon Timestream for LiveAnalytics concepts\n

", + "smithy.api#required": {} + } + }, + "SingleMeasureMappings": { + "target": "com.amazonaws.pipes#SingleMeasureMappings", + "traits": { + "smithy.api#documentation": "

Mappings of single source data fields to individual records in the specified Timestream for LiveAnalytics table.

" + } + }, + "MultiMeasureMappings": { + "target": "com.amazonaws.pipes#MultiMeasureMappings", + "traits": { + "smithy.api#documentation": "

Maps multiple measures from the source event to the same record in the specified Timestream for LiveAnalytics table.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The parameters for using a Timestream for LiveAnalytics table as a\n target.

" + } + }, "com.amazonaws.pipes#Pipes": { "type": "service", "version": "2015-10-07", @@ -3191,7 +3487,7 @@ ], "maxAge": 86400 }, - "smithy.api#documentation": "

Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need for specialized knowledge and integration code when developing \n event driven architectures. This helps ensures consistency across your company’s applications. With Pipes, the target can be any available EventBridge target. \n To set up a pipe, you select the event source, add optional event filtering, define optional enrichment, and select the target for the event data.

", + "smithy.api#documentation": "

Amazon EventBridge Pipes connects event sources to targets. Pipes reduces the need\n for specialized knowledge and integration code when developing event driven architectures.\n This helps ensures consistency across your company’s applications. With Pipes, the target\n can be any available EventBridge target. To set up a pipe, you select the event\n source, add optional event filtering, define optional enrichment, and select the target for\n the event data.

", "smithy.api#title": "Amazon EventBridge Pipes", "smithy.api#xmlNamespace": { "uri": "http://events.amazonaws.com/doc/2015-10-07" @@ -3868,18 +4164,18 @@ "type": { "target": "com.amazonaws.pipes#PlacementConstraintType", "traits": { - "smithy.api#documentation": "

The type of constraint. Use distinctInstance to ensure that each task in a particular\n group is running on a different container instance. Use memberOf to restrict the selection to\n a group of valid candidates.

" + "smithy.api#documentation": "

The type of constraint. Use distinctInstance to ensure that each task in a particular\n group is running on a different container instance. Use memberOf to restrict the selection\n to a group of valid candidates.

" } }, "expression": { "target": "com.amazonaws.pipes#PlacementConstraintExpression", "traits": { - "smithy.api#documentation": "

A cluster query language expression to apply to the constraint. You cannot specify an\n expression if the constraint type is distinctInstance. To learn more, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.\n

" + "smithy.api#documentation": "

A cluster query language expression to apply to the constraint. You cannot specify an\n expression if the constraint type is distinctInstance. To learn more, see\n Cluster Query\n Language in the Amazon Elastic Container Service Developer Guide.

" } } }, "traits": { - "smithy.api#documentation": "

An object representing a constraint on task placement. To learn more, see Task Placement Constraints in the Amazon Elastic Container Service Developer\n Guide.

" + "smithy.api#documentation": "

An object representing a constraint on task placement. To learn more, see Task Placement\n Constraints in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.pipes#PlacementConstraintExpression": { @@ -3937,18 +4233,18 @@ "type": { "target": "com.amazonaws.pipes#PlacementStrategyType", "traits": { - "smithy.api#documentation": "

The type of placement strategy. The random placement strategy randomly places tasks on\n available candidates. The spread placement strategy spreads placement across available\n candidates evenly based on the field parameter. The binpack strategy places tasks on available\n candidates that have the least available amount of the resource that is specified with the\n field parameter. For example, if you binpack on memory, a task is placed on the instance with\n the least amount of remaining memory (but still enough to run the task).

" + "smithy.api#documentation": "

The type of placement strategy. The random placement strategy randomly places tasks on\n available candidates. The spread placement strategy spreads placement across available\n candidates evenly based on the field parameter. The binpack strategy places tasks on\n available candidates that have the least available amount of the resource that is specified\n with the field parameter. For example, if you binpack on memory, a task is placed on the\n instance with the least amount of remaining memory (but still enough to run the task).\n

" } }, "field": { "target": "com.amazonaws.pipes#PlacementStrategyField", "traits": { - "smithy.api#documentation": "

The field to apply the placement strategy against. For the spread placement strategy,\n valid values are instanceId (or host, which has the same effect), or any platform or custom\n attribute that is applied to a container instance, such as attribute:ecs.availability-zone.\n For the binpack placement strategy, valid values are cpu and memory. For the random placement\n strategy, this field is not used.

" + "smithy.api#documentation": "

The field to apply the placement strategy against. For the spread placement strategy,\n valid values are instanceId (or host, which has the same effect), or any platform or custom\n attribute that is applied to a container instance, such as attribute:ecs.availability-zone.\n For the binpack placement strategy, valid values are cpu and memory. For the random\n placement strategy, this field is not used.

" } } }, "traits": { - "smithy.api#documentation": "

The task placement strategy for a task or service. To learn more, see Task Placement Strategies in the Amazon Elastic Container Service Service Developer\n Guide.

" + "smithy.api#documentation": "

The task placement strategy for a task or service. To learn more, see Task Placement\n Strategies in the Amazon Elastic Container Service Service Developer Guide.

" } }, "com.amazonaws.pipes#PlacementStrategyField": { @@ -4090,13 +4386,13 @@ "BucketName": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

The name of the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.

" + "smithy.api#documentation": "

The name of the Amazon S3 bucket to which EventBridge delivers the log\n records for the pipe.

" } }, "Prefix": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

The prefix text with which to begin Amazon S3 log object names.

\n

For more information, see Organizing objects using prefixes\n in the Amazon Simple Storage Service User Guide.

" + "smithy.api#documentation": "

The prefix text with which to begin Amazon S3 log object names.

\n

For more information, see Organizing objects using\n prefixes in the Amazon Simple Storage Service User Guide.

" } }, "BucketOwner": { @@ -4108,7 +4404,7 @@ "OutputFormat": { "target": "com.amazonaws.pipes#S3OutputFormat", "traits": { - "smithy.api#documentation": "

The format EventBridge uses for the log records.

\n " + "smithy.api#documentation": "

The format EventBridge uses for the log records.

\n " } } }, @@ -4122,7 +4418,7 @@ "BucketName": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

Specifies the name of the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.

", + "smithy.api#documentation": "

Specifies the name of the Amazon S3 bucket to which EventBridge delivers\n the log records for the pipe.

", "smithy.api#length": { "min": 3, "max": 63 @@ -4133,7 +4429,7 @@ "BucketOwner": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Web Services account that owns the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.

", + "smithy.api#documentation": "

Specifies the Amazon Web Services account that owns the Amazon S3 bucket to which\n EventBridge delivers the log records for the pipe.

", "smithy.api#pattern": "^\\d{12}$", "smithy.api#required": {} } @@ -4141,13 +4437,13 @@ "OutputFormat": { "target": "com.amazonaws.pipes#S3OutputFormat", "traits": { - "smithy.api#documentation": "

How EventBridge should format the log records.

\n " + "smithy.api#documentation": "

How EventBridge should format the log records.

\n " } }, "Prefix": { "target": "com.amazonaws.pipes#String", "traits": { - "smithy.api#documentation": "

Specifies any prefix text with which to begin Amazon S3 log object names.

\n

You can use prefixes to organize the data that you store in Amazon S3 buckets. \n A prefix is a string of characters at the beginning of the object key name. \n A prefix can be any length, subject to the maximum length of the object key name (1,024 bytes). \n For more information, see Organizing objects using prefixes\n in the Amazon Simple Storage Service User Guide.

", + "smithy.api#documentation": "

Specifies any prefix text with which to begin Amazon S3 log object names.

\n

You can use prefixes to organize the data that you store in Amazon S3 buckets. A\n prefix is a string of characters at the beginning of the object key name. A prefix can be\n any length, subject to the maximum length of the object key name (1,024 bytes). For more\n information, see Organizing objects using\n prefixes in the Amazon Simple Storage Service User Guide.

", "smithy.api#length": { "max": 256 } @@ -4183,14 +4479,14 @@ "Name": { "target": "com.amazonaws.pipes#SageMakerPipelineParameterName", "traits": { - "smithy.api#documentation": "

Name of parameter to start execution of a SageMaker Model Building Pipeline.

", + "smithy.api#documentation": "

Name of parameter to start execution of a SageMaker Model Building\n Pipeline.

", "smithy.api#required": {} } }, "Value": { "target": "com.amazonaws.pipes#SageMakerPipelineParameterValue", "traits": { - "smithy.api#documentation": "

Value of parameter to start execution of a SageMaker Model Building Pipeline.

", + "smithy.api#documentation": "

Value of parameter to start execution of a SageMaker Model Building\n Pipeline.

", "smithy.api#required": {} } } @@ -4339,18 +4635,18 @@ "Subnets": { "target": "com.amazonaws.pipes#SubnetIds", "traits": { - "smithy.api#documentation": "

Specifies the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets.

" + "smithy.api#documentation": "

Specifies the subnets associated with the stream. These subnets must all be in the same\n VPC. You can specify as many as 16 subnets.

" } }, "SecurityGroup": { "target": "com.amazonaws.pipes#SecurityGroupIds", "traits": { - "smithy.api#documentation": "

Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many \n as five security groups. If you do not specify a security group, the default security group for the VPC is used.

" + "smithy.api#documentation": "

Specifies the security groups associated with the stream. These security groups must all\n be in the same VPC. You can specify as many as five security groups. If you do not specify\n a security group, the default security group for the VPC is used.

" } } }, "traits": { - "smithy.api#documentation": "

This structure specifies the VPC subnets and security groups for the stream, and whether a public IP address is to be used.

" + "smithy.api#documentation": "

This structure specifies the VPC subnets and security groups for the stream, and whether\n a public IP address is to be used.

" } }, "com.amazonaws.pipes#SelfManagedKafkaStartPosition": { @@ -4412,6 +4708,47 @@ "smithy.api#httpError": 402 } }, + "com.amazonaws.pipes#SingleMeasureMapping": { + "type": "structure", + "members": { + "MeasureValue": { + "target": "com.amazonaws.pipes#MeasureValue", + "traits": { + "smithy.api#documentation": "

Dynamic path of the source field to map to the measure in the record.

", + "smithy.api#required": {} + } + }, + "MeasureValueType": { + "target": "com.amazonaws.pipes#MeasureValueType", + "traits": { + "smithy.api#documentation": "

Data type of the source field.

", + "smithy.api#required": {} + } + }, + "MeasureName": { + "target": "com.amazonaws.pipes#MeasureName", + "traits": { + "smithy.api#documentation": "

Target measure name for the measurement attribute in the Timestream table.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Maps a single source data field to a single record in the specified Timestream\n for LiveAnalytics table.

\n

For more information, see Amazon Timestream for LiveAnalytics concepts\n

" + } + }, + "com.amazonaws.pipes#SingleMeasureMappings": { + "type": "list", + "member": { + "target": "com.amazonaws.pipes#SingleMeasureMapping" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 8192 + } + } + }, "com.amazonaws.pipes#Sql": { "type": "string", "traits": { @@ -4722,7 +5059,7 @@ } }, "traits": { - "smithy.api#documentation": "

A key-value pair associated with an Amazon Web Services resource. In EventBridge, rules and event buses\n support tagging.

" + "smithy.api#documentation": "

A key-value pair associated with an Amazon Web Services resource. In EventBridge,\n rules and event buses support tagging.

" } }, "com.amazonaws.pipes#TagKey": { @@ -4793,7 +5130,7 @@ "aws:RequestTag/${TagKey}", "aws:ResourceTag/${TagKey}" ], - "smithy.api#documentation": "

Assigns one or more tags (key-value pairs) to the specified pipe. Tags can\n help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user permission to access or change only resources with certain tag\n values.

\n

Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of\n characters.

\n

You can use the TagResource action with a pipe that already has tags. If\n you specify a new tag key, this tag is appended to the list of tags associated with the\n pipe. If you specify a tag key that is already associated with the pipe, the new tag\n value that you specify replaces the previous value for that tag.

\n

You can associate as many as 50 tags with a pipe.

", + "smithy.api#documentation": "

Assigns one or more tags (key-value pairs) to the specified pipe. Tags can help you\n organize and categorize your resources. You can also use them to scope user permissions by\n granting a user permission to access or change only resources with certain tag\n values.

\n

Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly\n as strings of characters.

\n

You can use the TagResource action with a pipe that already has tags. If\n you specify a new tag key, this tag is appended to the list of tags associated with the\n pipe. If you specify a tag key that is already associated with the pipe, the new tag value\n that you specify replaces the previous value for that tag.

\n

You can associate as many as 50 tags with a pipe.

", "smithy.api#http": { "uri": "/tags/{resourceArn}", "method": "POST" @@ -4870,7 +5207,7 @@ "retryAfterSeconds": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

The number of seconds to wait before retrying the action that caused the exception.

", + "smithy.api#documentation": "

The number of seconds to wait before retrying the action that caused the\n exception.

", "smithy.api#httpHeader": "Retry-After" } } @@ -4881,9 +5218,42 @@ "smithy.api#httpError": 429 } }, + "com.amazonaws.pipes#TimeFieldType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EPOCH", + "name": "EPOCH" + }, + { + "value": "TIMESTAMP_FORMAT", + "name": "TIMESTAMP_FORMAT" + } + ] + } + }, + "com.amazonaws.pipes#TimeValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, "com.amazonaws.pipes#Timestamp": { "type": "timestamp" }, + "com.amazonaws.pipes#TimestampFormat": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, "com.amazonaws.pipes#URI": { "type": "string", "traits": { @@ -4995,7 +5365,7 @@ "aws:RequestTag/${TagKey}", "aws:TagKeys" ], - "smithy.api#documentation": "

Update an existing pipe. When you call UpdatePipe, EventBridge only the updates fields you have specified in the request; the rest remain unchanged.\n The exception to this is if you modify any Amazon Web Services-service specific fields in the SourceParameters, EnrichmentParameters, or \n TargetParameters objects. For example, DynamoDBStreamParameters or EventBridgeEventBusParameters. \n EventBridge updates the fields in these objects atomically as one and overrides existing values. \n This is by design, and means that if you don't specify an optional field in one of these Parameters objects, EventBridge sets that field to its system-default value during the update.

\n

For more information about pipes, see \n Amazon EventBridge Pipes in the Amazon EventBridge User Guide.

", + "smithy.api#documentation": "

Update an existing pipe. When you call UpdatePipe, EventBridge only the\n updates fields you have specified in the request; the rest remain unchanged. The exception\n to this is if you modify any Amazon Web Services-service specific fields in the\n SourceParameters, EnrichmentParameters, or\n TargetParameters objects. For example,\n DynamoDBStreamParameters or EventBridgeEventBusParameters.\n EventBridge updates the fields in these objects atomically as one and overrides existing\n values. This is by design, and means that if you don't specify an optional field in one of\n these Parameters objects, EventBridge sets that field to its system-default\n value during the update.

\n

For more information about pipes, see \n Amazon EventBridge Pipes in the Amazon EventBridge User Guide.

", "smithy.api#http": { "method": "PUT", "uri": "/v1/pipes/{Name}", @@ -5322,7 +5692,7 @@ "SelfManagedKafkaParameters": { "target": "com.amazonaws.pipes#UpdatePipeSourceSelfManagedKafkaParameters", "traits": { - "smithy.api#documentation": "

The parameters for using a self-managed Apache Kafka stream as a source.

" + "smithy.api#documentation": "

The parameters for using a self-managed Apache Kafka stream as a source.

\n

A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.\n This includes both clusters you manage yourself, as well as those hosted by a third-party\n provider, such as Confluent\n Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.

" } } }, @@ -5392,7 +5762,7 @@ } }, "traits": { - "smithy.api#documentation": "

The parameters for using a self-managed Apache Kafka stream as a source.

" + "smithy.api#documentation": "

The parameters for using a self-managed Apache Kafka stream as a source.

\n

A self managed cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.\n This includes both clusters you manage yourself, as well as those hosted by a third-party\n provider, such as Confluent\n Cloud, CloudKarafka, or Redpanda. For more information, see Apache Kafka streams as a source in the Amazon EventBridge User Guide.

" } }, "com.amazonaws.pipes#UpdatePipeSourceSqsQueueParameters": { @@ -5424,7 +5794,7 @@ "fieldList": { "target": "com.amazonaws.pipes#ValidationExceptionFieldList", "traits": { - "smithy.api#documentation": "

The list of fields for which validation failed and the corresponding failure messages.

" + "smithy.api#documentation": "

The list of fields for which validation failed and the corresponding failure\n messages.

" } } }, @@ -5461,6 +5831,15 @@ "member": { "target": "com.amazonaws.pipes#ValidationExceptionField" } + }, + "com.amazonaws.pipes#VersionValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } } } } \ No newline at end of file diff --git a/models/polly.json b/models/polly.json index 601b7c2f54..5b9a83199e 100644 --- a/models/polly.json +++ b/models/polly.json @@ -158,7 +158,20 @@ "method": "GET", "uri": "/v1/voices", "code": 200 - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeVoicesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.polly#DescribeVoicesInput": { @@ -167,7 +180,7 @@ "Engine": { "target": "com.amazonaws.polly#Engine", "traits": { - "smithy.api#documentation": "

Specifies the engine (standard, neural or\n long-form) used by Amazon Polly when processing input text for\n speech synthesis.

", + "smithy.api#documentation": "

Specifies the engine (standard, neural,\n long-form or generative) used by Amazon Polly when\n processing input text for speech synthesis.

", "smithy.api#httpQuery": "Engine" } }, @@ -238,6 +251,12 @@ "traits": { "smithy.api#enumValue": "long-form" } + }, + "GENERATIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "generative" + } } } }, @@ -2288,7 +2307,7 @@ "com.amazonaws.polly#SnsTopicArn": { "type": "string", "traits": { - "smithy.api#pattern": "^arn:aws(-(cn|iso(-b)?|us-gov))?:sns:[a-z0-9_-]{1,50}:\\d{12}:[a-zA-Z0-9_-]{1,256}$" + "smithy.api#pattern": "^arn:aws(-(cn|iso(-b)?|us-gov))?:sns:[a-z0-9_-]{1,50}:\\d{12}:[a-zA-Z0-9_-]{1,251}([a-zA-Z0-9_-]{0,5}|\\.fifo)$" } }, "com.amazonaws.polly#SpeechMarkType": { @@ -2406,7 +2425,7 @@ "Engine": { "target": "com.amazonaws.polly#Engine", "traits": { - "smithy.api#documentation": "

Specifies the engine (standard, neural or\n long-form) for Amazon Polly to use when processing input text for\n speech synthesis. Using a voice that is not supported for the engine\n selected will result in an error.

" + "smithy.api#documentation": "

Specifies the engine (standard, neural,\n long-form or generative) for Amazon Polly to use\n when processing input text for speech synthesis. Using a voice that\n is not supported for the engine selected will result in an error.

" } }, "LanguageCode": { @@ -2444,7 +2463,7 @@ "SampleRate": { "target": "com.amazonaws.polly#SampleRate", "traits": { - "smithy.api#documentation": "

The audio frequency specified in Hz.

\n

The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\", \"22050\",\n and \"24000\". The default value for standard voices is \"22050\". The default\n value for neural voices is \"24000\". The default value for long-form voices\n is \"24000\".

\n

Valid values for pcm are \"8000\" and \"16000\" The default value is\n \"16000\".

" + "smithy.api#documentation": "

The audio frequency specified in Hz.

\n

The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\", \"22050\",\n and \"24000\". The default value for standard voices is \"22050\". The default\n value for neural voices is \"24000\". The default value for long-form voices\n is \"24000\". The default value for generative voices is \"24000\".

\n

Valid values for pcm are \"8000\" and \"16000\" The default value is\n \"16000\".

" } }, "SnsTopicArn": { @@ -2504,7 +2523,7 @@ "Engine": { "target": "com.amazonaws.polly#Engine", "traits": { - "smithy.api#documentation": "

Specifies the engine (standard, neural or\n long-form) for Amazon Polly to use when processing input text for\n speech synthesis. Using a voice that is not supported for the engine\n selected will result in an error.

" + "smithy.api#documentation": "

Specifies the engine (standard, neural,\n long-form or generative) for Amazon Polly to use\n when processing input text for speech synthesis. Using a voice that\n is not supported for the engine selected will result in an error.

" } }, "TaskId": { @@ -2565,7 +2584,7 @@ "SampleRate": { "target": "com.amazonaws.polly#SampleRate", "traits": { - "smithy.api#documentation": "

The audio frequency specified in Hz.

\n

The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\", \"22050\",\n and \"24000\". The default value for standard voices is \"22050\". The default\n value for neural voices is \"24000\". The default value for long-form voices\n is \"24000\".

\n

Valid values for pcm are \"8000\" and \"16000\" The default value is\n \"16000\".

" + "smithy.api#documentation": "

The audio frequency specified in Hz.

\n

The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\", \"22050\",\n and \"24000\". The default value for standard voices is \"22050\". The default\n value for neural voices is \"24000\". The default value for long-form voices\n is \"24000\". The default value for generative voices is \"24000\".

\n

Valid values for pcm are \"8000\" and \"16000\" The default value is\n \"16000\".

" } }, "SpeechMarkTypes": { @@ -2689,7 +2708,7 @@ "Engine": { "target": "com.amazonaws.polly#Engine", "traits": { - "smithy.api#documentation": "

Specifies the engine (standard, neural or\n long-form) for Amazon Polly to use when processing input text for\n speech synthesis. For information on Amazon Polly voices and which voices are\n available for each engine, see Available Voices.

\n

\n NTTS-only voices\n

\n

When using NTTS-only voices such as Kevin (en-US), this parameter is\n required and must be set to neural. If the engine is not\n specified, or is set to standard, this will result in an\n error.

\n

\n long-form-only voices\n

\n

When using long-form-only voices such as Danielle (en-US), this\n parameter is required and must be set to long-form. If the\n engine is not specified, or is set to standard or\n neural, this will result in an error.

\n

Type: String

\n

Valid Values: standard | neural |\n long-form\n

\n

Required: Yes

\n

\n Standard voices\n

\n

For standard voices, this is not required; the engine parameter\n defaults to standard. If the engine is not specified, or is\n set to standard and an NTTS-only voice is selected, this will\n result in an error.

" + "smithy.api#documentation": "

Specifies the engine (standard, neural,\n long-form, or generative) for Amazon Polly\n to use when processing input text for speech synthesis. Provide an engine\n that is supported by the voice you select. If you don't provide an engine,\n the standard engine is selected by default. If a chosen voice isn't supported\n by the standard engine, this will result in an error. For information on\n Amazon Polly voices and which voices are available for each engine, see Available Voices.

\n

Type: String

\n

Valid Values: standard | neural |\n long-form | generative\n

\n

Required: Yes

" } }, "LanguageCode": { @@ -2714,7 +2733,7 @@ "SampleRate": { "target": "com.amazonaws.polly#SampleRate", "traits": { - "smithy.api#documentation": "

The audio frequency specified in Hz.

\n

The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\", \"22050\",\n and \"24000\". The default value for standard voices is \"22050\". The default\n value for neural voices is \"24000\". The default value for long-form voices\n is \"24000\".

\n

Valid values for pcm are \"8000\" and \"16000\" The default value is\n \"16000\".

" + "smithy.api#documentation": "

The audio frequency specified in Hz.

\n

The valid values for mp3 and ogg_vorbis are \"8000\", \"16000\", \"22050\",\n and \"24000\". The default value for standard voices is \"22050\". The default\n value for neural voices is \"24000\". The default value for long-form voices\n is \"24000\". The default value for generative voices is \"24000\".

\n

Valid values for pcm are \"8000\" and \"16000\" The default value is\n \"16000\".

" } }, "SpeechMarkTypes": { @@ -2918,7 +2937,7 @@ "SupportedEngines": { "target": "com.amazonaws.polly#EngineList", "traits": { - "smithy.api#documentation": "

Specifies which engines (standard, neural or\n long-form) are supported by a given voice.

" + "smithy.api#documentation": "

Specifies which engines (standard, neural,\n long-form or generative) are supported by a given voice.

" } } }, diff --git a/models/qbusiness.json b/models/qbusiness.json index a633282693..f8eab98893 100644 --- a/models/qbusiness.json +++ b/models/qbusiness.json @@ -760,7 +760,7 @@ "containsAny": { "target": "com.amazonaws.qbusiness#DocumentAttribute", "traits": { - "smithy.api#documentation": "

Returns true when a document contains any of the specified document\n attributes or metadata fields. Supported for the following document attribute value types: dateValue,\n longValue, stringListValue and\n stringValue.

" + "smithy.api#documentation": "

Returns true when a document contains any of the specified document\n attributes or metadata fields. Supported for the following document attribute value types:\n stringListValue.

" } }, "greaterThan": { @@ -1539,7 +1539,7 @@ "parentMessageId": { "target": "com.amazonaws.qbusiness#MessageId", "traits": { - "smithy.api#documentation": "

The identifier of the previous end user text input message in a conversation.

" + "smithy.api#documentation": "

The identifier of the previous system message in a conversation.

" } }, "attributeFilter": { @@ -1878,7 +1878,7 @@ "kms:DescribeKey", "kms:CreateGrant" ], - "smithy.api#documentation": "

Creates an Amazon Q Business application.

\n \n

There are new tiers for Amazon Q Business. Not all features in Amazon Q Business Pro are \n also available in Amazon Q Business Lite. For information on what's included in \n Amazon Q Business Lite and what's included in \n Amazon Q Business Pro, see Amazon Q Business tiers. \n You must use the Amazon Q Business console to assign subscription tiers to users.

\n
", + "smithy.api#documentation": "

Creates an Amazon Q Business application.

\n \n

There are new tiers for Amazon Q Business. Not all features in Amazon Q Business Pro are \n also available in Amazon Q Business Lite. For information on what's included in \n Amazon Q Business Lite and what's included in \n Amazon Q Business Pro, see Amazon Q Business tiers. \n You must use the Amazon Q Business console to assign subscription tiers to users.

\n
", "smithy.api#http": { "uri": "/applications", "method": "POST" @@ -1940,6 +1940,12 @@ "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

An option to allow end users to upload files directly during chat.

" } + }, + "qAppsConfiguration": { + "target": "com.amazonaws.qbusiness#QAppsConfiguration", + "traits": { + "smithy.api#documentation": "

An option to allow end users to create and use Amazon Q Apps in the web experience.

" + } } }, "traits": { @@ -2178,7 +2184,7 @@ "type": { "target": "com.amazonaws.qbusiness#IndexType", "traits": { - "smithy.api#documentation": "

The index type that's suitable for your needs. For more information on what's included\n in each type of index or index tier, see Amazon Q Business\n tiers.

" + "smithy.api#documentation": "

The index type that's suitable for your needs. For more information on what's included\n in each type of index, see Amazon Q Business\n tiers.

" } }, "description": { @@ -5246,6 +5252,12 @@ "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

Settings for whether end users can upload files directly during chat.

" } + }, + "qAppsConfiguration": { + "target": "com.amazonaws.qbusiness#QAppsConfiguration", + "traits": { + "smithy.api#documentation": "

Settings for whether end users can create and use Amazon Q Apps in the web experience.

" + } } }, "traits": { @@ -9046,6 +9058,38 @@ "smithy.api#output": {} } }, + "com.amazonaws.qbusiness#QAppsConfiguration": { + "type": "structure", + "members": { + "qAppsControlMode": { + "target": "com.amazonaws.qbusiness#QAppsControlMode", + "traits": { + "smithy.api#documentation": "

Status information about whether end users can create and use Amazon Q Apps in the web experience.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration information about Amazon Q Apps. (preview feature)

" + } + }, + "com.amazonaws.qbusiness#QAppsControlMode": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.qbusiness#ReadAccessType": { "type": "enum", "members": { @@ -10371,6 +10415,12 @@ "traits": { "smithy.api#documentation": "

An option to allow end users to upload files directly during chat.

" } + }, + "qAppsConfiguration": { + "target": "com.amazonaws.qbusiness#QAppsConfiguration", + "traits": { + "smithy.api#documentation": "

An option to allow end users to create and use Amazon Q Apps in the web experience.

" + } } }, "traits": { diff --git a/models/quicksight.json b/models/quicksight.json index 63489f660e..2962f27681 100644 --- a/models/quicksight.json +++ b/models/quicksight.json @@ -19184,6 +19184,94 @@ "smithy.api#output": {} } }, + "com.amazonaws.quicksight#DescribeKeyRegistration": { + "type": "operation", + "input": { + "target": "com.amazonaws.quicksight#DescribeKeyRegistrationRequest" + }, + "output": { + "target": "com.amazonaws.quicksight#DescribeKeyRegistrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.quicksight#AccessDeniedException" + }, + { + "target": "com.amazonaws.quicksight#InternalFailureException" + }, + { + "target": "com.amazonaws.quicksight#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.quicksight#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Describes all customer managed key registrations in a Amazon QuickSight account.

", + "smithy.api#http": { + "method": "GET", + "uri": "/accounts/{AwsAccountId}/key-registration", + "code": 200 + } + } + }, + "com.amazonaws.quicksight#DescribeKeyRegistrationRequest": { + "type": "structure", + "members": { + "AwsAccountId": { + "target": "com.amazonaws.quicksight#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account that contains the customer managed key registration that you want to describe.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "DefaultKeyOnly": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Determines whether the request returns the default key only.

", + "smithy.api#httpQuery": "default-key-only" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.quicksight#DescribeKeyRegistrationResponse": { + "type": "structure", + "members": { + "AwsAccountId": { + "target": "com.amazonaws.quicksight#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account that contains the customer managed key registration specified in the request.

" + } + }, + "KeyRegistration": { + "target": "com.amazonaws.quicksight#KeyRegistration", + "traits": { + "smithy.api#documentation": "

A list of RegisteredCustomerManagedKey objects in a Amazon QuickSight account.

" + } + }, + "RequestId": { + "target": "com.amazonaws.quicksight#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services request ID for this operation.

" + } + }, + "Status": { + "target": "com.amazonaws.quicksight#StatusCode", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The HTTP status of the request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.quicksight#DescribeNamespace": { "type": "operation", "input": { @@ -21532,6 +21620,49 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.quicksight#FailedKeyRegistrationEntries": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#FailedKeyRegistrationEntry" + } + }, + "com.amazonaws.quicksight#FailedKeyRegistrationEntry": { + "type": "structure", + "members": { + "KeyArn": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The ARN of the KMS key that failed to update.

" + } + }, + "Message": { + "target": "com.amazonaws.quicksight#NonEmptyString", + "traits": { + "smithy.api#documentation": "

A message that provides information about why a FailedKeyRegistrationEntry error occurred.

", + "smithy.api#required": {} + } + }, + "StatusCode": { + "target": "com.amazonaws.quicksight#StatusCode", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The HTTP status of a FailedKeyRegistrationEntry error.

", + "smithy.api#required": {} + } + }, + "SenderFault": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

A boolean that indicates whether a FailedKeyRegistrationEntry resulted from user error. If the value of this property is True, the error was caused by user error. If the value of this property is False, the error occurred on the backend. If your job continues fail and with a False\n SenderFault value, contact Amazon Web Services Support.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An entry that appears when a KeyRegistration update to Amazon QuickSight fails.

" + } + }, "com.amazonaws.quicksight#FieldBasedTooltip": { "type": "structure", "members": { @@ -27836,6 +27967,12 @@ } } }, + "com.amazonaws.quicksight#KeyRegistration": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#RegisteredCustomerManagedKey" + } + }, "com.amazonaws.quicksight#LabelOptions": { "type": "structure", "members": { @@ -36601,6 +36738,9 @@ { "target": "com.amazonaws.quicksight#DescribeIpRestriction" }, + { + "target": "com.amazonaws.quicksight#DescribeKeyRegistration" + }, { "target": "com.amazonaws.quicksight#DescribeNamespace" }, @@ -36847,6 +36987,9 @@ { "target": "com.amazonaws.quicksight#UpdateIpRestriction" }, + { + "target": "com.amazonaws.quicksight#UpdateKeyRegistration" + }, { "target": "com.amazonaws.quicksight#UpdatePublicSharingSettings" }, @@ -38144,8 +38287,7 @@ "DatabaseUser": { "target": "com.amazonaws.quicksight#DatabaseUser", "traits": { - "smithy.api#documentation": "

The user whose permissions and group memberships will be used by Amazon QuickSight to access the cluster. If this user already exists in your database, Amazon QuickSight is granted the same permissions that the user has. If the user doesn't exist, set the value of AutoCreateDatabaseUser to True to create a new user with PUBLIC permissions.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The user whose permissions and group memberships will be used by Amazon QuickSight to access the cluster. If this user already exists in your database, Amazon QuickSight is granted the same permissions that the user has. If the user doesn't exist, set the value of AutoCreateDatabaseUser to True to create a new user with PUBLIC permissions.

" } }, "DatabaseGroups": { @@ -38734,7 +38876,7 @@ "UserRole": { "target": "com.amazonaws.quicksight#UserRole", "traits": { - "smithy.api#documentation": "

The Amazon QuickSight role for the user. The user role can be one of the\n\t\t\tfollowing:

\n
    \n
  • \n

    \n READER: A user who has read-only access to dashboards.

    \n
  • \n
  • \n

    \n AUTHOR: A user who can create data sources, datasets, analyses, and\n\t\t\t\t\tdashboards.

    \n
  • \n
  • \n

    \n ADMIN: A user who is an author, who can also manage Amazon QuickSight\n\t\t\t\t\tsettings.

    \n
  • \n
  • \n

    \n RESTRICTED_READER: This role isn't currently available for\n\t\t\t\t\tuse.

    \n
  • \n
  • \n

    \n RESTRICTED_AUTHOR: This role isn't currently available for\n\t\t\t\t\tuse.

    \n
  • \n
", + "smithy.api#documentation": "

The Amazon QuickSight role for the user. The user role can be one of the\n\t\t\tfollowing:

\n
    \n
  • \n

    \n READER: A user who has read-only access to dashboards.

    \n
  • \n
  • \n

    \n AUTHOR: A user who can create data sources, datasets, analyses, and\n\t\t\t\t\tdashboards.

    \n
  • \n
  • \n

    \n ADMIN: A user who is an author, who can also manage Amazon QuickSight\n\t\t\t\t\tsettings.

    \n
  • \n
  • \n

    \n READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q in Amazon QuickSight, can build stories with Amazon Q, and can generate executive summaries from dashboards.

    \n
  • \n
  • \n

    \n AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards.

    \n
  • \n
  • \n

    \n ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing.

    \n
  • \n
  • \n

    \n RESTRICTED_READER: This role isn't currently available for\n\t\t\t\t\tuse.

    \n
  • \n
  • \n

    \n RESTRICTED_AUTHOR: This role isn't currently available for\n\t\t\t\t\tuse.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -38841,6 +38983,27 @@ "smithy.api#output": {} } }, + "com.amazonaws.quicksight#RegisteredCustomerManagedKey": { + "type": "structure", + "members": { + "KeyArn": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The ARN of the KMS key that is registered to a Amazon QuickSight account for encryption and decryption use.

" + } + }, + "DefaultKey": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Indicates whether a RegisteredCustomerManagedKey is set as the default key for encryption and decryption use.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A customer managed key structure that contains the information listed below:

\n
    \n
  • \n

    \n KeyArn - The ARN of a KMS key that is registered to a Amazon QuickSight account for encryption and decryption use.

    \n
  • \n
  • \n

    \n DefaultKey - Indicates whether the current key is set as the default key for encryption and decryption use.

    \n
  • \n
" + } + }, "com.amazonaws.quicksight#RegisteredUserConsoleFeatureConfigurations": { "type": "structure", "members": { @@ -44060,6 +44223,35 @@ "smithy.api#documentation": "

The subtotal options.

" } }, + "com.amazonaws.quicksight#SuccessfulKeyRegistrationEntries": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#SuccessfulKeyRegistrationEntry" + } + }, + "com.amazonaws.quicksight#SuccessfulKeyRegistrationEntry": { + "type": "structure", + "members": { + "KeyArn": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The ARN of the KMS key that is associated with the SuccessfulKeyRegistrationEntry entry.

", + "smithy.api#required": {} + } + }, + "StatusCode": { + "target": "com.amazonaws.quicksight#StatusCode", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The HTTP status of a SuccessfulKeyRegistrationEntry entry.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A success entry that occurs when a KeyRegistration job is successfully applied to the Amazon QuickSight account.

" + } + }, "com.amazonaws.quicksight#Suffix": { "type": "string", "traits": { @@ -50682,6 +50874,86 @@ "smithy.api#output": {} } }, + "com.amazonaws.quicksight#UpdateKeyRegistration": { + "type": "operation", + "input": { + "target": "com.amazonaws.quicksight#UpdateKeyRegistrationRequest" + }, + "output": { + "target": "com.amazonaws.quicksight#UpdateKeyRegistrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.quicksight#AccessDeniedException" + }, + { + "target": "com.amazonaws.quicksight#InternalFailureException" + }, + { + "target": "com.amazonaws.quicksight#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.quicksight#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates a customer managed key in a Amazon QuickSight account.

", + "smithy.api#http": { + "method": "POST", + "uri": "/accounts/{AwsAccountId}/key-registration", + "code": 200 + } + } + }, + "com.amazonaws.quicksight#UpdateKeyRegistrationRequest": { + "type": "structure", + "members": { + "AwsAccountId": { + "target": "com.amazonaws.quicksight#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account that contains the customer managed key registration that you want to update.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "KeyRegistration": { + "target": "com.amazonaws.quicksight#KeyRegistration", + "traits": { + "smithy.api#documentation": "

A list of RegisteredCustomerManagedKey objects to be updated to the Amazon QuickSight account.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.quicksight#UpdateKeyRegistrationResponse": { + "type": "structure", + "members": { + "FailedKeyRegistration": { + "target": "com.amazonaws.quicksight#FailedKeyRegistrationEntries", + "traits": { + "smithy.api#documentation": "

A list of all customer managed key registrations that failed to update.

" + } + }, + "SuccessfulKeyRegistration": { + "target": "com.amazonaws.quicksight#SuccessfulKeyRegistrationEntries", + "traits": { + "smithy.api#documentation": "

A list of all customer managed key registrations that were successfully updated.

" + } + }, + "RequestId": { + "target": "com.amazonaws.quicksight#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services request ID for this operation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.quicksight#UpdateLinkPermissionList": { "type": "list", "member": { @@ -52225,7 +52497,7 @@ "Role": { "target": "com.amazonaws.quicksight#UserRole", "traits": { - "smithy.api#documentation": "

The Amazon QuickSight role of the user. The role can be one of the\n\t\t\tfollowing default security cohorts:

\n
    \n
  • \n

    \n READER: A user who has read-only access to dashboards.

    \n
  • \n
  • \n

    \n AUTHOR: A user who can create data sources, datasets, analyses, and\n\t\t\t\t\tdashboards.

    \n
  • \n
  • \n

    \n ADMIN: A user who is an author, who can also manage Amazon QuickSight\n\t\t\t\t\tsettings.

    \n
  • \n
  • \n

    \n READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q Business, can build stories with Amazon Q, and can generate executive summaries from dashboards.

    \n
  • \n
  • \n

    \n AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards.

    \n
  • \n
  • \n

    \n ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing.

    \n
  • \n
\n

The name of the Amazon QuickSight role is invisible to the user except for the console\n\t screens dealing with permissions.

", + "smithy.api#documentation": "

The Amazon QuickSight role of the user. The role can be one of the\n\t\t\tfollowing default security cohorts:

\n
    \n
  • \n

    \n READER: A user who has read-only access to dashboards.

    \n
  • \n
  • \n

    \n AUTHOR: A user who can create data sources, datasets, analyses, and\n\t\t\t\t\tdashboards.

    \n
  • \n
  • \n

    \n ADMIN: A user who is an author, who can also manage Amazon QuickSight\n\t\t\t\t\tsettings.

    \n
  • \n
  • \n

    \n READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q in Amazon QuickSight, can build stories with Amazon Q, and can generate executive summaries from dashboards.

    \n
  • \n
  • \n

    \n AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards.

    \n
  • \n
  • \n

    \n ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing.

    \n
  • \n
\n

The name of the Amazon QuickSight role is invisible to the user except for the console\n\t screens dealing with permissions.

", "smithy.api#required": {} } }, @@ -52503,7 +52775,7 @@ "Role": { "target": "com.amazonaws.quicksight#UserRole", "traits": { - "smithy.api#documentation": "

The Amazon QuickSight role for the user. The user role can be one of the\n following:.

\n
    \n
  • \n

    \n READER: A user who has read-only access to dashboards.

    \n
  • \n
  • \n

    \n AUTHOR: A user who can create data sources, datasets, analyses,\n and dashboards.

    \n
  • \n
  • \n

    \n ADMIN: A user who is an author, who can also manage Amazon\n Amazon QuickSight settings.

    \n
  • \n
  • \n

    \n READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q Business, can build stories with Amazon Q, and can generate executive summaries from dashboards.

    \n
  • \n
  • \n

    \n AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards.

    \n
  • \n
  • \n

    \n ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing.

    \n
  • \n
  • \n

    \n RESTRICTED_READER: This role isn't currently available for\n use.

    \n
  • \n
  • \n

    \n RESTRICTED_AUTHOR: This role isn't currently available for\n use.

    \n
  • \n
" + "smithy.api#documentation": "

The Amazon QuickSight role for the user. The user role can be one of the\n following:.

\n
    \n
  • \n

    \n READER: A user who has read-only access to dashboards.

    \n
  • \n
  • \n

    \n AUTHOR: A user who can create data sources, datasets, analyses,\n and dashboards.

    \n
  • \n
  • \n

    \n ADMIN: A user who is an author, who can also manage Amazon\n Amazon QuickSight settings.

    \n
  • \n
  • \n

    \n READER_PRO: Reader Pro adds Generative BI capabilities to the Reader role. Reader Pros have access to Amazon Q in Amazon QuickSight, can build stories with Amazon Q, and can generate executive summaries from dashboards.

    \n
  • \n
  • \n

    \n AUTHOR_PRO: Author Pro adds Generative BI capabilities to the Author role. Author Pros can author dashboards with natural language with Amazon Q, build stories with Amazon Q, create Topics for Q&A, and generate executive summaries from dashboards.

    \n
  • \n
  • \n

    \n ADMIN_PRO: Admin Pros are Author Pros who can also manage Amazon QuickSight administrative settings. Admin Pro users are billed at Author Pro pricing.

    \n
  • \n
  • \n

    \n RESTRICTED_READER: This role isn't currently available for\n use.

    \n
  • \n
  • \n

    \n RESTRICTED_AUTHOR: This role isn't currently available for\n use.

    \n
  • \n
" } }, "IdentityType": { diff --git a/models/rds.json b/models/rds.json index b1c3b4943d..fe455b6a4e 100644 --- a/models/rds.json +++ b/models/rds.json @@ -419,7 +419,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.

\n

For an overview on tagging Amazon RDS resources, \n see Tagging Amazon RDS Resources.

", + "smithy.api#documentation": "

Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.

\n

For an overview on tagging your relational database resources, \n see Tagging Amazon RDS Resources\n or Tagging Amazon Aurora and Amazon RDS Resources.\n

", "smithy.api#examples": [ { "title": "To add tags to a resource", @@ -4177,7 +4177,7 @@ "AvailabilityZones": { "target": "com.amazonaws.rds#AvailabilityZones", "traits": { - "smithy.api#documentation": "

A list of Availability Zones (AZs) where DB instances in the DB cluster can be created.

\n

For information on Amazon Web Services Regions and Availability Zones, see \n Choosing the Regions and \n Availability Zones in the Amazon Aurora User Guide.

\n

Valid for Cluster Type: Aurora DB clusters only

" + "smithy.api#documentation": "

A list of Availability Zones (AZs) where you specifically want to create DB instances in the DB cluster.

\n

For information on AZs, see \n Availability Zones\n in the Amazon Aurora User Guide.

\n

Valid for Cluster Type: Aurora DB clusters only

\n

Constraints:

\n
    \n
  • \n

    Can't specify more than three AZs.

    \n
  • \n
" } }, "BackupRetentionPeriod": { @@ -4195,7 +4195,7 @@ "DatabaseName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The name for your database of up to 64 alphanumeric characters. If you don't\n provide a name, Amazon RDS doesn't create a database in the DB cluster you are\n creating.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" + "smithy.api#documentation": "

The name for your database of up to 64 alphanumeric characters. \n A database named postgres is always created. If this parameter is specified, an additional database with this name is created.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" } }, "DBClusterIdentifier": { @@ -4228,7 +4228,7 @@ "target": "com.amazonaws.rds#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The database engine to use for this DB cluster.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values: aurora-mysql | aurora-postgresql | mysql | postgres\n

", + "smithy.api#documentation": "

The database engine to use for this DB cluster.

\n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values:

\n
    \n
  • \n

    \n aurora-mysql\n

    \n
  • \n
  • \n

    \n aurora-postgresql\n

    \n
  • \n
  • \n

    \n mysql\n

    \n
  • \n
  • \n

    \n postgres\n

    \n
  • \n
  • \n

    \n neptune - For information about using Amazon Neptune, see the\n \n Amazon Neptune User Guide\n .

    \n
  • \n
", "smithy.api#required": {} } }, @@ -4492,6 +4492,12 @@ "traits": { "smithy.api#documentation": "

The CA certificate identifier to use for the DB cluster's server certificate.

\n

For more information, see Using SSL/TLS to encrypt a connection to a DB \n instance in the Amazon RDS User Guide.

\n

Valid for Cluster Type: Multi-AZ DB clusters

" } + }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for this DB cluster.

\n \n

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. \n At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, \n creating the DB cluster will fail if the DB major version is past its end of standard support date.

\n
\n

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, \n you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

\n \n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled\n

\n

Default: open-source-rds-extended-support\n

" + } } }, "traits": { @@ -4897,7 +4903,7 @@ "DBName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The meaning of this parameter differs according to the database engine you use.

\n
\n
Amazon Aurora MySQL
\n
\n

The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is\n created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created \n in the DB cluster.

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 64 alphanumeric characters.

    \n
  • \n
  • \n

    Can't be a word reserved by the database engine.

    \n
  • \n
\n
\n
Amazon Aurora PostgreSQL
\n
\n

The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is\n created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, \n a database named postgres is created in the DB cluster.

\n

Constraints:

\n
    \n
  • \n

    It must contain 1 to 63 alphanumeric characters.

    \n
  • \n
  • \n

    Must begin with a letter. Subsequent characters can be letters, underscores, or digits\n (0 to 9).

    \n
  • \n
  • \n

    Can't be a word reserved by the database engine.

    \n
  • \n
\n
\n
Amazon RDS Custom for Oracle
\n
\n

The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for non-CDBs and\n RDSCDB for CDBs.

\n

Default: ORCL\n

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 8 alphanumeric characters.

    \n
  • \n
  • \n

    Must contain a letter.

    \n
  • \n
  • \n

    Can't be a word reserved by the database engine.

    \n
  • \n
\n
\n
Amazon RDS Custom for SQL Server
\n
\n

Not applicable. Must be null.

\n
\n
RDS for Db2
\n
\n

The name of the database to create when the DB instance is created. If\n this parameter isn't specified, no database is created in the DB instance.\n In some cases, we recommend that you don't add a database name. For more\n information, see Additional considerations in the Amazon RDS User\n Guide.

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 64 letters or numbers.

    \n
  • \n
  • \n

    Must begin with a letter. Subsequent characters can be letters,\n underscores, or digits (0-9).

    \n
  • \n
  • \n

    Can't be a word reserved by the specified database engine.

    \n
  • \n
\n
\n
RDS for MariaDB
\n
\n

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 64 letters or numbers.

    \n
  • \n
  • \n

    Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    \n
  • \n
  • \n

    Can't be a word reserved by the specified database engine.

    \n
  • \n
\n
\n
RDS for MySQL
\n
\n

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 64 letters or numbers.

    \n
  • \n
  • \n

    Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    \n
  • \n
  • \n

    Can't be a word reserved by the specified database engine.

    \n
  • \n
\n
\n
RDS for Oracle
\n
\n

The Oracle System ID (SID) of the created DB instance. If you don't specify a value, \n the default value is ORCL. You can't specify the \n string null, or any other reserved word, for DBName.

\n

Default: ORCL\n

\n

Constraints:

\n
    \n
  • \n

    Can't be longer than 8 characters.

    \n
  • \n
\n
\n
RDS for PostgreSQL
\n
\n

The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres \n is created in the DB instance.

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 63 letters, numbers, or underscores.

    \n
  • \n
  • \n

    Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    \n
  • \n
  • \n

    Can't be a word reserved by the specified database engine.

    \n
  • \n
\n
\n
RDS for SQL Server
\n
\n

Not applicable. Must be null.

\n
\n
" + "smithy.api#documentation": "

The meaning of this parameter differs according to the database engine you use.

\n
\n
Amazon Aurora MySQL
\n
\n

The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is\n created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created \n in the DB cluster.

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 64 alphanumeric characters.

    \n
  • \n
  • \n

    Can't be a word reserved by the database engine.

    \n
  • \n
\n
\n
Amazon Aurora PostgreSQL
\n
\n

The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is\n created. A database named postgres is always created. If this parameter is specified, an additional database with this name is created.

\n

Constraints:

\n
    \n
  • \n

    It must contain 1 to 63 alphanumeric characters.

    \n
  • \n
  • \n

    Must begin with a letter. Subsequent characters can be letters, underscores, or digits\n (0 to 9).

    \n
  • \n
  • \n

    Can't be a word reserved by the database engine.

    \n
  • \n
\n
\n
Amazon RDS Custom for Oracle
\n
\n

The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for non-CDBs and\n RDSCDB for CDBs.

\n

Default: ORCL\n

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 8 alphanumeric characters.

    \n
  • \n
  • \n

    Must contain a letter.

    \n
  • \n
  • \n

    Can't be a word reserved by the database engine.

    \n
  • \n
\n
\n
Amazon RDS Custom for SQL Server
\n
\n

Not applicable. Must be null.

\n
\n
RDS for Db2
\n
\n

The name of the database to create when the DB instance is created. If\n this parameter isn't specified, no database is created in the DB instance.\n In some cases, we recommend that you don't add a database name. For more\n information, see Additional considerations in the Amazon RDS User\n Guide.

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 64 letters or numbers.

    \n
  • \n
  • \n

    Must begin with a letter. Subsequent characters can be letters,\n underscores, or digits (0-9).

    \n
  • \n
  • \n

    Can't be a word reserved by the specified database engine.

    \n
  • \n
\n
\n
RDS for MariaDB
\n
\n

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 64 letters or numbers.

    \n
  • \n
  • \n

    Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    \n
  • \n
  • \n

    Can't be a word reserved by the specified database engine.

    \n
  • \n
\n
\n
RDS for MySQL
\n
\n

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 64 letters or numbers.

    \n
  • \n
  • \n

    Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    \n
  • \n
  • \n

    Can't be a word reserved by the specified database engine.

    \n
  • \n
\n
\n
RDS for Oracle
\n
\n

The Oracle System ID (SID) of the created DB instance. If you don't specify a value, \n the default value is ORCL. You can't specify the \n string null, or any other reserved word, for DBName.

\n

Default: ORCL\n

\n

Constraints:

\n
    \n
  • \n

    Can't be longer than 8 characters.

    \n
  • \n
\n
\n
RDS for PostgreSQL
\n
\n

The name of the database to create when the DB instance is created. A database named postgres is always created. If this parameter is specified, an additional database with this name is created.

\n

Constraints:

\n
    \n
  • \n

    Must contain 1 to 63 letters, numbers, or underscores.

    \n
  • \n
  • \n

    Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

    \n
  • \n
  • \n

    Can't be a word reserved by the specified database engine.

    \n
  • \n
\n
\n
RDS for SQL Server
\n
\n

Not applicable. Must be null.

\n
\n
" } }, "DBInstanceIdentifier": { @@ -5017,7 +5023,7 @@ "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The license model information for this DB instance.

\n

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

\n

Valid Values:

\n
    \n
  • \n

    RDS for Db2 - bring-your-own-license\n

    \n
  • \n
  • \n

    RDS for MariaDB - general-public-license\n

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - license-included\n

    \n
  • \n
  • \n

    RDS for MySQL - general-public-license\n

    \n
  • \n
  • \n

    RDS for Oracle - bring-your-own-license | license-included\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql-license\n

    \n
  • \n
" + "smithy.api#documentation": "

The license model information for this DB instance.

\n \n

License models for RDS for Db2 require additional configuration. The Bring Your\n Own License (BYOL) model requires a custom parameter group. The Db2 license through\n Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more\n information, see RDS for Db2 licensing\n options in the Amazon RDS User Guide.

\n

The default for RDS for Db2 is bring-your-own-license.

\n
\n

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

\n

Valid Values:

\n
    \n
  • \n

    RDS for Db2 - bring-your-own-license | marketplace-license\n

    \n
  • \n
  • \n

    RDS for MariaDB - general-public-license\n

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - license-included\n

    \n
  • \n
  • \n

    RDS for MySQL - general-public-license\n

    \n
  • \n
  • \n

    RDS for Oracle - bring-your-own-license | license-included\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql-license\n

    \n
  • \n
" } }, "Iops": { @@ -5271,6 +5277,12 @@ "traits": { "smithy.api#documentation": "

Specifies whether to use the multi-tenant configuration or the single-tenant\n configuration (default). This parameter only applies to RDS for Oracle container\n database (CDB) engines.

\n

Note the following restrictions:

\n
    \n
  • \n

    The DB engine that you specify in the request must support the multi-tenant\n configuration. If you attempt to enable the multi-tenant configuration on a DB\n engine that doesn't support it, the request fails.

    \n
  • \n
  • \n

    If you specify the multi-tenant configuration when you create your DB instance,\n you can't later modify this DB instance to use the single-tenant configuration.

    \n
  • \n
" } + }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for this DB instance.

\n \n

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. \n At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, \n creating the DB instance will fail if the DB major version is past its end of standard support date.

\n
\n

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

\n

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, \n you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

\n

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled\n

\n

Default: open-source-rds-extended-support\n

" + } } }, "traits": { @@ -6449,7 +6461,7 @@ "SourceType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The type of source that is generating the events. For example, if you want to be\n notified of events generated by a DB instance, you set this parameter to\n db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are\n returned.

\n

Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy\n

" + "smithy.api#documentation": "

The type of source that is generating the events. For example, if you want to be\n notified of events generated by a DB instance, you set this parameter to\n db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are\n returned.

\n

Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment \n

" } }, "EventCategories": { @@ -6566,6 +6578,12 @@ "smithy.api#documentation": "

The engine version to use for this global database cluster.

\n

Constraints:

\n
    \n
  • \n

    Can't be specified if SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the engine version of the source DB cluster.

    \n
  • \n
" } }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for this global database cluster.

\n \n

By default, this value is set to open-source-rds-extended-support, which enrolls your global cluster into Amazon RDS Extended Support. \n At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, \n creating the global cluster will fail if the DB major version is past its end of standard support date.

\n
\n

This setting only applies to Aurora PostgreSQL-based global databases.

\n

You can use this setting to enroll your global cluster into Amazon RDS Extended Support. With RDS Extended Support, \n you can run the selected major engine version on your global cluster past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon Aurora User Guide.

\n

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled\n

\n

Default: open-source-rds-extended-support\n

" + } + }, "DeletionProtection": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { @@ -7510,6 +7528,12 @@ }, "CertificateDetails": { "target": "com.amazonaws.rds#CertificateDetails" + }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for the DB cluster.

\n

For more information, see CreateDBCluster.

" + } } }, "traits": { @@ -9090,7 +9114,7 @@ "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The license model information for this DB instance. This setting doesn't apply to RDS Custom DB instances.

" + "smithy.api#documentation": "

The license model information for this DB instance. This setting doesn't apply to\n Amazon Aurora or RDS Custom DB instances.

" } }, "Iops": { @@ -9431,6 +9455,12 @@ "traits": { "smithy.api#documentation": "

Specifies whether the DB instance is in the multi-tenant configuration (TRUE) or the\n single-tenant configuration (FALSE).

" } + }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for the DB instance.

\n

For more information, see CreateDBInstance.

" + } } }, "traits": { @@ -19380,6 +19410,12 @@ "smithy.api#documentation": "

Indicates the database engine version.

" } }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for the global cluster.

\n

For more information, see CreateGlobalCluster.

" + } + }, "DatabaseName": { "target": "com.amazonaws.rds#String", "traits": { @@ -23243,7 +23279,7 @@ "SourceType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned.

\n

Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy\n

" + "smithy.api#documentation": "

The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. For RDS Proxy events, specify db-proxy. If this value isn't specified, all events are returned.

\n

Valid Values: db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment \n

" } }, "EventCategories": { @@ -24652,7 +24688,7 @@ "Action": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The type of pending maintenance action that is available for the resource. \n Valid actions are system-update, db-upgrade, hardware-maintenance, \n and ca-certificate-rotation.

" + "smithy.api#documentation": "

The type of pending maintenance action that is available for the resource.

\n

For more information about maintenance actions, see Maintaining a DB instance.

\n

Valid Values: system-update | db-upgrade | hardware-maintenance | ca-certificate-rotation\n

" } }, "AutoAppliedAfterDate": { @@ -26943,6 +26979,12 @@ "traits": { "smithy.api#documentation": "

Specifies the storage type to be associated with the DB cluster.

\n

Valid Values: aurora, aurora-iopt1\n

\n

Default: aurora\n

\n

Valid for: Aurora DB clusters only

" } + }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for this DB cluster.

\n \n

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. \n At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, \n RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

\n
\n

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, \n you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

\n \n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled\n

\n

Default: open-source-rds-extended-support\n

" + } } }, "traits": { @@ -27272,6 +27314,12 @@ "traits": { "smithy.api#documentation": "

Reserved for future use.

" } + }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for this DB cluster.

\n \n

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. \n At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, \n RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

\n
\n

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, \n you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

\n \n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled\n

\n

Default: open-source-rds-extended-support\n

" + } } }, "traits": { @@ -27595,6 +27643,12 @@ "traits": { "smithy.api#documentation": "

Reserved for future use.

" } + }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for this DB cluster.

\n \n

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. \n At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, \n RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

\n
\n

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, \n you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

\n \n

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

\n

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled\n

\n

Default: open-source-rds-extended-support\n

" + } } }, "traits": { @@ -27792,7 +27846,7 @@ "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

License model information for the restored DB instance.

\n

This setting doesn't apply to RDS Custom.

\n

Default: Same as source.

\n

Valid Values: license-included | bring-your-own-license | general-public-license\n

" + "smithy.api#documentation": "

License model information for the restored DB instance.

\n \n

License models for RDS for Db2 require additional configuration. The Bring Your\n Own License (BYOL) model requires a custom parameter group. The Db2 license through\n Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more\n information, see RDS for Db2 licensing\n options in the Amazon RDS User Guide.

\n
\n

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

\n

Valid Values:

\n
    \n
  • \n

    RDS for Db2 - bring-your-own-license | marketplace-license\n

    \n
  • \n
  • \n

    RDS for MariaDB - general-public-license\n

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - license-included\n

    \n
  • \n
  • \n

    RDS for MySQL - general-public-license\n

    \n
  • \n
  • \n

    RDS for Oracle - bring-your-own-license | license-included\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql-license\n

    \n
  • \n
\n

Default: Same as the source.

" } }, "DBName": { @@ -27977,6 +28031,12 @@ "traits": { "smithy.api#documentation": "

The CA certificate identifier to use for the DB instance's server certificate.

\n

This setting doesn't apply to RDS Custom DB instances.

\n

For more information, see Using SSL/TLS to encrypt a connection to a DB \n instance in the Amazon RDS User Guide and \n \n Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora \n User Guide.

" } + }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for this DB instance.

\n \n

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. \n At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, \n RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

\n
\n

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, \n you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

\n

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

\n

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled\n

\n

Default: open-source-rds-extended-support\n

" + } } }, "traits": { @@ -28295,7 +28355,7 @@ "target": "com.amazonaws.rds#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

An Amazon Web Services Identity and Access Management (IAM) role to allow Amazon RDS to access your Amazon S3 bucket.

", + "smithy.api#documentation": "

An Amazon Web Services Identity and Access Management (IAM) role with a trust policy and a permissions policy that allows Amazon RDS to access your Amazon S3 bucket. \n For information about this role,\n see \n Creating an IAM role manually in the Amazon RDS User Guide.\n

", "smithy.api#required": {} } }, @@ -28382,6 +28442,12 @@ "traits": { "smithy.api#documentation": "

The CA certificate identifier to use for the DB instance's server certificate.

\n

This setting doesn't apply to RDS Custom DB instances.

\n

For more information, see Using SSL/TLS to encrypt a connection to a DB \n instance in the Amazon RDS User Guide and \n \n Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora \n User Guide.

" } + }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for this DB instance.

\n \n

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. \n At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, \n RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

\n
\n

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, \n you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

\n

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

\n

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled\n

\n

Default: open-source-rds-extended-support\n

" + } } }, "traits": { @@ -28652,7 +28718,7 @@ "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The license model information for the restored DB instance.

\n

This setting doesn't apply to RDS Custom.

\n

Valid Values: license-included | bring-your-own-license | general-public-license\n

\n

Default: Same as the source.

" + "smithy.api#documentation": "

The license model information for the restored DB instance.

\n \n

License models for RDS for Db2 require additional configuration. The Bring Your\n Own License (BYOL) model requires a custom parameter group. The Db2 license through\n Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more\n information, see RDS for Db2 licensing\n options in the Amazon RDS User Guide.

\n
\n

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

\n

Valid Values:

\n
    \n
  • \n

    RDS for Db2 - bring-your-own-license | marketplace-license\n

    \n
  • \n
  • \n

    RDS for MariaDB - general-public-license\n

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - license-included\n

    \n
  • \n
  • \n

    RDS for MySQL - general-public-license\n

    \n
  • \n
  • \n

    RDS for Oracle - bring-your-own-license | license-included\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql-license\n

    \n
  • \n
\n

Default: Same as the source.

" } }, "DBName": { @@ -28849,6 +28915,12 @@ "traits": { "smithy.api#documentation": "

The CA certificate identifier to use for the DB instance's server certificate.

\n

This setting doesn't apply to RDS Custom DB instances.

\n

For more information, see Using SSL/TLS to encrypt a connection to a DB \n instance in the Amazon RDS User Guide and \n \n Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora \n User Guide.

" } + }, + "EngineLifecycleSupport": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The life cycle type for this DB instance.

\n \n

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. \n At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, \n RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

\n
\n

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, \n you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide.

\n

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

\n

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled\n

\n

Default: open-source-rds-extended-support\n

" + } } }, "traits": { @@ -30682,7 +30754,7 @@ } }, "traits": { - "smithy.api#documentation": "

Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

\n

For more information, see Tagging \n Amazon RDS Resources in the Amazon RDS User Guide.\n

" + "smithy.api#documentation": "

Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

\n

For more information, see\n Tagging Amazon RDS Resources in the Amazon RDS User Guide\n or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide.\n

" } }, "com.amazonaws.rds#TagList": { @@ -31132,7 +31204,7 @@ "AutoUpgrade": { "target": "com.amazonaws.rds#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true.

" + "smithy.api#documentation": "

Indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true.

\n

This parameter is dynamic, and is set by RDS.

" } }, "IsMajorVersionUpgrade": { diff --git a/models/redshift.json b/models/redshift.json index 5a49d50171..3c33c7abb7 100644 --- a/models/redshift.json +++ b/models/redshift.json @@ -2814,7 +2814,7 @@ "target": "com.amazonaws.redshift#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The node type to be provisioned for the cluster. For information about node types,\n go to Working with\n Clusters in the Amazon Redshift Cluster Management Guide.

\n

Valid Values: ds2.xlarge | ds2.8xlarge |\n dc1.large | dc1.8xlarge | \n dc2.large | dc2.8xlarge | \n ra3.xlplus | ra3.4xlarge | ra3.16xlarge\n

", + "smithy.api#documentation": "

The node type to be provisioned for the cluster. For information about node types,\n go to Working with\n Clusters in the Amazon Redshift Cluster Management Guide.

\n

Valid Values: \n dc2.large | dc2.8xlarge | \n ra3.xlplus | ra3.4xlarge | ra3.16xlarge\n

", "smithy.api#required": {} } }, @@ -2883,7 +2883,7 @@ "Port": { "target": "com.amazonaws.redshift#IntegerOptional", "traits": { - "smithy.api#documentation": "

The port number on which the cluster accepts incoming connections.

\n

The cluster is accessible only via the JDBC and ODBC connection strings. Part of\n the connection string requires the port on which the cluster will listen for incoming\n connections.

\n

Default: 5439\n

\n

Valid Values:\n

\n
    \n
  • \n

    For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster \n with ra3 nodes, it isn't required that you change the port to these ranges.)

    \n
  • \n
  • \n

    For clusters with ds2 or dc2 nodes - Select a port within the range 1150-65535.

    \n
  • \n
" + "smithy.api#documentation": "

The port number on which the cluster accepts incoming connections.

\n

The cluster is accessible only via the JDBC and ODBC connection strings. Part of\n the connection string requires the port on which the cluster will listen for incoming\n connections.

\n

Default: 5439\n

\n

Valid Values:\n

\n
    \n
  • \n

    For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster \n with ra3 nodes, it isn't required that you change the port to these ranges.)

    \n
  • \n
  • \n

    For clusters with dc2 nodes - Select a port within the range 1150-65535.

    \n
  • \n
" } }, "ClusterVersion": { @@ -11453,7 +11453,7 @@ "NodeType": { "target": "com.amazonaws.redshift#String", "traits": { - "smithy.api#documentation": "

The new node type of the cluster. If you specify a new node type, you must also\n specify the number of nodes parameter.

\n

\nFor more information about resizing clusters, go to \nResizing Clusters in Amazon Redshift \nin the Amazon Redshift Cluster Management Guide.

\n

Valid Values: ds2.xlarge | ds2.8xlarge |\n dc1.large | dc1.8xlarge | \n dc2.large | dc2.8xlarge | \n ra3.xlplus | ra3.4xlarge | ra3.16xlarge\n

" + "smithy.api#documentation": "

The new node type of the cluster. If you specify a new node type, you must also\n specify the number of nodes parameter.

\n

\nFor more information about resizing clusters, go to \nResizing Clusters in Amazon Redshift \nin the Amazon Redshift Cluster Management Guide.

\n

Valid Values: \n dc2.large | dc2.8xlarge | \n ra3.xlplus | ra3.4xlarge | ra3.16xlarge\n

" } }, "NumberOfNodes": { @@ -11585,7 +11585,7 @@ "Port": { "target": "com.amazonaws.redshift#IntegerOptional", "traits": { - "smithy.api#documentation": "

The option to change the port of an Amazon Redshift cluster.

\n

Valid Values:\n

\n
    \n
  • \n

    For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster \n with ra3 nodes, it isn't required that you change the port to these ranges.)

    \n
  • \n
  • \n

    For clusters with ds2 or dc2 nodes - Select a port within the range 1150-65535.

    \n
  • \n
" + "smithy.api#documentation": "

The option to change the port of an Amazon Redshift cluster.

\n

Valid Values:\n

\n
    \n
  • \n

    For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster \n with ra3 nodes, it isn't required that you change the port to these ranges.)

    \n
  • \n
  • \n

    For clusters with dc2 nodes - Select a port within the range 1150-65535.

    \n
  • \n
" } }, "ManageMasterPassword": { @@ -12508,7 +12508,7 @@ "NodeType": { "target": "com.amazonaws.redshift#String", "traits": { - "smithy.api#documentation": "

The node type, such as, \"ds2.8xlarge\".

" + "smithy.api#documentation": "

The node type, such as, \"ra3.4xlarge\".

" } }, "NumberOfNodes": { @@ -15412,7 +15412,7 @@ "SourceReservedNodeType": { "target": "com.amazonaws.redshift#String", "traits": { - "smithy.api#documentation": "

The source reserved-node type, for example ds2.xlarge.

" + "smithy.api#documentation": "

The source reserved-node type, for example ra3.4xlarge.

" } }, "SourceReservedNodeCount": { @@ -15789,7 +15789,7 @@ } ], "traits": { - "smithy.api#documentation": "

Changes the size of the cluster. You can change the cluster's type, or change the\n number or type of nodes. The default behavior is to use the elastic resize method. With\n an elastic resize, your cluster is available for read and write operations more quickly\n than with the classic resize method.

\n

Elastic resize operations have the following restrictions:

\n
    \n
  • \n

    You can only resize clusters of the following types:

    \n
      \n
    • \n

      dc1.large (if your cluster is in a VPC)

      \n
    • \n
    • \n

      dc1.8xlarge (if your cluster is in a VPC)

      \n
    • \n
    • \n

      dc2.large

      \n
    • \n
    • \n

      dc2.8xlarge

      \n
    • \n
    • \n

      ds2.xlarge

      \n
    • \n
    • \n

      ds2.8xlarge

      \n
    • \n
    • \n

      ra3.xlplus

      \n
    • \n
    • \n

      ra3.4xlarge

      \n
    • \n
    • \n

      ra3.16xlarge

      \n
    • \n
    \n
  • \n
  • \n

    The type of nodes that you add must match the node type for the\n cluster.

    \n
  • \n
" + "smithy.api#documentation": "

Changes the size of the cluster. You can change the cluster's type, or change the\n number or type of nodes. The default behavior is to use the elastic resize method. With\n an elastic resize, your cluster is available for read and write operations more quickly\n than with the classic resize method.

\n

Elastic resize operations have the following restrictions:

\n
    \n
  • \n

    You can only resize clusters of the following types:

    \n
      \n
    • \n

      dc2.large

      \n
    • \n
    • \n

      dc2.8xlarge

      \n
    • \n
    • \n

      ra3.xlplus

      \n
    • \n
    • \n

      ra3.4xlarge

      \n
    • \n
    • \n

      ra3.16xlarge

      \n
    • \n
    \n
  • \n
  • \n

    The type of nodes that you add must match the node type for the\n cluster.

    \n
  • \n
" } }, "com.amazonaws.redshift#ResizeClusterMessage": { @@ -16187,7 +16187,7 @@ "Port": { "target": "com.amazonaws.redshift#IntegerOptional", "traits": { - "smithy.api#documentation": "

The port number on which the cluster accepts connections.

\n

Default: The same port as the original cluster.

\n

Valid values: For clusters with ds2 or dc2 nodes, must be within the range 1150-65535. For clusters with ra3 nodes, must be \n within the ranges 5431-5455 or 8191-8215.

" + "smithy.api#documentation": "

The port number on which the cluster accepts connections.

\n

Default: The same port as the original cluster.

\n

Valid values: For clusters with DC2 nodes, must be within the range 1150-65535. For clusters with ra3 nodes, must be \n within the ranges 5431-5455 or 8191-8215.

" } }, "AvailabilityZone": { @@ -16283,7 +16283,7 @@ "NodeType": { "target": "com.amazonaws.redshift#String", "traits": { - "smithy.api#documentation": "

The node type that the restored cluster will be provisioned with.

\n

Default: The node type of the cluster from which the snapshot was taken. You can\n modify this if you are using any DS node type. In that case, you can choose to restore\n into another DS node type of the same size. For example, you can restore ds1.8xlarge\n into ds2.8xlarge, or ds1.xlarge into ds2.xlarge. If you have a DC instance type, you\n must restore into that same instance type and size. In other words, you can only restore\n a dc1.large instance type into another dc1.large instance type or dc2.large instance\n type. You can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlarge\n cluster, then resize to a dc2.8large cluster. For more information about node types, see\n \n About Clusters and Nodes in the Amazon Redshift Cluster Management Guide.

" + "smithy.api#documentation": "

The node type that the restored cluster will be provisioned with.

\n

If you have a DC instance type, you\n must restore into that same instance type and size. In other words, you can only restore\n a dc2.large node type into another dc2 type. For more information about node types, see \n \n About Clusters and Nodes in the Amazon Redshift Cluster Management Guide.

" } }, "EnhancedVpcRouting": { @@ -16411,31 +16411,31 @@ "CurrentRestoreRateInMegaBytesPerSecond": { "target": "com.amazonaws.redshift#Double", "traits": { - "smithy.api#documentation": "

The number of megabytes per second being transferred from the backup storage.\n Returns the average rate for a completed backup.\n This field is only updated when you restore to DC2 and DS2 node types.

" + "smithy.api#documentation": "

The number of megabytes per second being transferred from the backup storage.\n Returns the average rate for a completed backup.\n This field is only updated when you restore to DC2 node types.

" } }, "SnapshotSizeInMegaBytes": { "target": "com.amazonaws.redshift#Long", "traits": { - "smithy.api#documentation": "

The size of the set of snapshot data used to restore the cluster.\n This field is only updated when you restore to DC2 and DS2 node types.

" + "smithy.api#documentation": "

The size of the set of snapshot data used to restore the cluster.\n This field is only updated when you restore to DC2 node types.

" } }, "ProgressInMegaBytes": { "target": "com.amazonaws.redshift#Long", "traits": { - "smithy.api#documentation": "

The number of megabytes that have been transferred from snapshot storage.\n This field is only updated when you restore to DC2 and DS2 node types.

" + "smithy.api#documentation": "

The number of megabytes that have been transferred from snapshot storage.\n This field is only updated when you restore to DC2 node types.

" } }, "ElapsedTimeInSeconds": { "target": "com.amazonaws.redshift#Long", "traits": { - "smithy.api#documentation": "

The amount of time an in-progress restore has been running, or the amount of time\n it took a completed restore to finish.\n This field is only updated when you restore to DC2 and DS2 node types.

" + "smithy.api#documentation": "

The amount of time an in-progress restore has been running, or the amount of time\n it took a completed restore to finish.\n This field is only updated when you restore to DC2 node types.

" } }, "EstimatedTimeToCompletionInSeconds": { "target": "com.amazonaws.redshift#Long", "traits": { - "smithy.api#documentation": "

The estimate of the time remaining before the restore will complete. Returns 0 for\n a completed restore.\n This field is only updated when you restore to DC2 and DS2 node types.

" + "smithy.api#documentation": "

The estimate of the time remaining before the restore will complete. Returns 0 for\n a completed restore.\n This field is only updated when you restore to DC2 node types.

" } } }, @@ -17024,7 +17024,7 @@ "TargetAction": { "target": "com.amazonaws.redshift#ScheduledActionType", "traits": { - "smithy.api#documentation": "

A JSON format string of the Amazon Redshift API operation with input parameters.

\n

\"{\\\"ResizeCluster\\\":{\\\"NodeType\\\":\\\"ds2.8xlarge\\\",\\\"ClusterIdentifier\\\":\\\"my-test-cluster\\\",\\\"NumberOfNodes\\\":3}}\".

" + "smithy.api#documentation": "

A JSON format string of the Amazon Redshift API operation with input parameters.

\n

\"{\\\"ResizeCluster\\\":{\\\"NodeType\\\":\\\"ra3.4xlarge\\\",\\\"ClusterIdentifier\\\":\\\"my-test-cluster\\\",\\\"NumberOfNodes\\\":3}}\".

" } }, "Schedule": { diff --git a/models/rekognition.json b/models/rekognition.json index 1310144456..dcc61af9ec 100644 --- a/models/rekognition.json +++ b/models/rekognition.json @@ -8310,7 +8310,20 @@ "outputToken": "NextToken", "items": "CollectionIds", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListCollectionsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.rekognition#ListCollectionsRequest": { diff --git a/models/resiliencehub.json b/models/resiliencehub.json index e8899833b5..f02d0058b0 100644 --- a/models/resiliencehub.json +++ b/models/resiliencehub.json @@ -123,14 +123,14 @@ "recommendationId": { "target": "com.amazonaws.resiliencehub#Uuid", "traits": { - "smithy.api#documentation": "

Identifier\n of the alarm recommendation.

", + "smithy.api#documentation": "

Identifier of the alarm recommendation.

", "smithy.api#required": {} } }, "referenceId": { "target": "com.amazonaws.resiliencehub#SpecReferenceId", "traits": { - "smithy.api#documentation": "

Reference\n identifier of the alarm recommendation.

", + "smithy.api#documentation": "

Reference identifier of the alarm recommendation.

", "smithy.api#required": {} } }, @@ -160,7 +160,7 @@ "smithy.api#deprecated": { "message": "An alarm recommendation can be attached to multiple Application Components, hence this property will be replaced by the new property 'appComponentNames'." }, - "smithy.api#documentation": "

Application Component name for the CloudWatch alarm\n recommendation. This\n name is saved as the first item in the appComponentNames\n list.

" + "smithy.api#documentation": "

Application Component name for the CloudWatch alarm recommendation. This name is saved as the first\n item in the appComponentNames list.

" } }, "items": { @@ -250,14 +250,14 @@ "name": { "target": "com.amazonaws.resiliencehub#EntityName", "traits": { - "smithy.api#documentation": "

Name\n for the application.

", + "smithy.api#documentation": "

Name for the application.

", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.resiliencehub#EntityDescription", "traits": { - "smithy.api#documentation": "

Optional\n description for an\n application.

" + "smithy.api#documentation": "

Optional description for an application.

" } }, "policyArn": { @@ -269,7 +269,7 @@ "creationTime": { "target": "com.amazonaws.resiliencehub#TimeStamp", "traits": { - "smithy.api#documentation": "

Date\n and time when the app was created.

", + "smithy.api#documentation": "

Date and time when the app was created.

", "smithy.api#required": {} } }, @@ -282,26 +282,26 @@ "complianceStatus": { "target": "com.amazonaws.resiliencehub#AppComplianceStatusType", "traits": { - "smithy.api#documentation": "

Current\n status of compliance for the resiliency policy.

" + "smithy.api#documentation": "

Current status of compliance for the resiliency policy.

" } }, "lastAppComplianceEvaluationTime": { "target": "com.amazonaws.resiliencehub#TimeStamp", "traits": { - "smithy.api#documentation": "

Date\n and time the most recent compliance evaluation.

" + "smithy.api#documentation": "

Date and time the most recent compliance evaluation.

" } }, "resiliencyScore": { "target": "com.amazonaws.resiliencehub#Double", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

Current\n resiliency score for the application.

" + "smithy.api#documentation": "

Current resiliency score for the application.

" } }, "lastResiliencyScoreEvaluationTime": { "target": "com.amazonaws.resiliencehub#TimeStamp", "traits": { - "smithy.api#documentation": "

Date\n and time the most recent resiliency score evaluation.

" + "smithy.api#documentation": "

Date and time the most recent resiliency score evaluation.

" } }, "tags": { @@ -313,7 +313,7 @@ "assessmentSchedule": { "target": "com.amazonaws.resiliencehub#AppAssessmentScheduleType", "traits": { - "smithy.api#documentation": "

Assessment\n execution schedule with 'Daily' or 'Disabled' values.

" + "smithy.api#documentation": "

Assessment execution schedule with 'Daily' or 'Disabled' values.

" } }, "permissionModel": { @@ -337,7 +337,7 @@ "lastDriftEvaluationTime": { "target": "com.amazonaws.resiliencehub#TimeStamp", "traits": { - "smithy.api#documentation": "

Indicates the last time\n that\n a drift was evaluated.

" + "smithy.api#documentation": "

Indicates the last time that a drift was evaluated.

" } }, "rtoInSecs": { @@ -394,7 +394,7 @@ "compliance": { "target": "com.amazonaws.resiliencehub#AssessmentCompliance", "traits": { - "smithy.api#documentation": "

Application\n compliance against the resiliency policy.

" + "smithy.api#documentation": "

Application compliance against the resiliency policy.

" } }, "complianceStatus": { @@ -635,7 +635,7 @@ "id": { "target": "com.amazonaws.resiliencehub#String255", "traits": { - "smithy.api#documentation": "

Identifier\n of the Application Component.

" + "smithy.api#documentation": "

Identifier of the Application Component.

" } }, "additionalInfo": { @@ -817,7 +817,7 @@ "creationTime": { "target": "com.amazonaws.resiliencehub#TimeStamp", "traits": { - "smithy.api#documentation": "

Date\n and time when the app was created.

", + "smithy.api#documentation": "

Date and time when the app was created.

", "smithy.api#required": {} } }, @@ -855,7 +855,7 @@ "lastAppComplianceEvaluationTime": { "target": "com.amazonaws.resiliencehub#TimeStamp", "traits": { - "smithy.api#documentation": "

Date\n and time of the most recent compliance evaluation.

" + "smithy.api#documentation": "

Date and time of the most recent compliance evaluation.

" } }, "rtoInSecs": { @@ -1089,6 +1089,9 @@ { "target": "com.amazonaws.resiliencehub#ListAppAssessmentComplianceDrifts" }, + { + "target": "com.amazonaws.resiliencehub#ListAppAssessmentResourceDrifts" + }, { "target": "com.amazonaws.resiliencehub#ListAppAssessments" }, @@ -2128,7 +2131,7 @@ } }, "traits": { - "smithy.api#documentation": "

List\n of operational recommendations that did not get included or excluded.

" + "smithy.api#documentation": "

List of operational recommendations that did not get included or excluded.

" } }, "com.amazonaws.resiliencehub#BatchUpdateRecommendationStatusRequest": { @@ -2215,7 +2218,7 @@ "excluded": { "target": "com.amazonaws.resiliencehub#BooleanOptional", "traits": { - "smithy.api#documentation": "

Indicates\n if\n the operational recommendation was successfully excluded.

", + "smithy.api#documentation": "

Indicates if the operational recommendation was successfully excluded.

", "smithy.api#required": {} } }, @@ -2227,7 +2230,7 @@ } }, "traits": { - "smithy.api#documentation": "

List\n of operational recommendations that were successfully included or excluded.

" + "smithy.api#documentation": "

List of operational recommendations that were successfully included or excluded.

" } }, "com.amazonaws.resiliencehub#BooleanOptional": { @@ -2267,7 +2270,7 @@ "appId": { "target": "com.amazonaws.resiliencehub#String255", "traits": { - "smithy.api#documentation": "

Identifier\n of your application.

" + "smithy.api#documentation": "

Identifier of your application.

" } }, "appVersion": { @@ -2297,18 +2300,18 @@ "actualValue": { "target": "com.amazonaws.resiliencehub#AssessmentCompliance", "traits": { - "smithy.api#documentation": "

Actual\n compliance value of the entity.

" + "smithy.api#documentation": "

Actual compliance value of the entity.

" } }, "diffType": { "target": "com.amazonaws.resiliencehub#DifferenceType", "traits": { - "smithy.api#documentation": "

Difference\n type between actual and expected\n recovery\n point\n objective\n (RPO) and\n recovery\n time\n objective\n (RTO) values. Currently, Resilience Hub supports only NotEqual difference type.

" + "smithy.api#documentation": "

Difference type between actual and expected recovery point objective (RPO) and recovery\n time objective (RTO) values. Currently, Resilience Hub supports only NotEqual difference type.

" } } }, "traits": { - "smithy.api#documentation": "

Indicates\n the compliance drifts (recovery time objective (RTO) and recovery point\n objective (RPO)) that\n were detected for an assessed entity.

" + "smithy.api#documentation": "

Indicates the compliance drifts (recovery time objective (RTO) and recovery point\n objective (RPO)) that were detected for an assessed entity.

" } }, "com.amazonaws.resiliencehub#ComplianceDriftList": { @@ -2435,7 +2438,7 @@ "referenceId": { "target": "com.amazonaws.resiliencehub#SpecReferenceId", "traits": { - "smithy.api#documentation": "

Reference\n identifier for the recommendation configuration.

", + "smithy.api#documentation": "

Reference identifier for the recommendation configuration.

", "smithy.api#required": {} } } @@ -2591,7 +2594,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Resilience Hub application. An Resilience Hub application is a\n collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe a Resilience Hub application, you provide an\n application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate\n resiliency policy. In addition, you can also add resources that are located on Amazon Elastic Kubernetes Service (Amazon EKS) clusters as optional resources. For more information\n about the number of resources supported per application, see Service\n quotas.

\n

After you create an Resilience Hub application, you publish it so that you can run a resiliency\n assessment on it. You can then use recommendations from the assessment to improve resiliency\n by running another assessment, comparing results, and then iterating the process until you\n achieve your goals for recovery time objective (RTO) and recovery point objective\n (RPO).

", + "smithy.api#documentation": "

Creates an Resilience Hub application. An Resilience Hub application is a\n collection of Amazon Web Services resources structured to prevent and recover Amazon Web Services application disruptions. To describe a Resilience Hub application,\n you provide an application name, resources from one or more CloudFormation stacks, Resource Groups, Terraform state files, AppRegistry applications, and an appropriate\n resiliency policy. In addition, you can also add resources that are located on Amazon Elastic Kubernetes Service (Amazon EKS) clusters as optional resources. For more information about the number of resources supported per application, see Service\n quotas.

\n

After you create an Resilience Hub application, you publish it so that you can run a resiliency\n assessment on it. You can then use recommendations from the assessment to improve resiliency\n by running another assessment, comparing results, and then iterating the process until you\n achieve your goals for recovery time objective (RTO) and recovery point objective\n (RPO).

", "smithy.api#http": { "method": "POST", "uri": "/create-app", @@ -4449,6 +4452,14 @@ { "value": "NotEqual", "name": "NOT_EQUAL" + }, + { + "value": "Added", + "name": "ADDED" + }, + { + "value": "Removed", + "name": "REMOVED" } ] } @@ -4473,7 +4484,7 @@ "rtoReferenceId": { "target": "com.amazonaws.resiliencehub#String500", "traits": { - "smithy.api#documentation": "

Reference\n identifier of the RTO.

" + "smithy.api#documentation": "

Reference identifier of the RTO.

" } }, "rtoDescription": { @@ -4492,7 +4503,7 @@ "rpoReferenceId": { "target": "com.amazonaws.resiliencehub#String500", "traits": { - "smithy.api#documentation": "

Reference\n identifier of\n the\n RPO\n .

" + "smithy.api#documentation": "

Reference identifier of the RPO .

" } }, "rpoDescription": { @@ -4608,6 +4619,10 @@ { "value": "ApplicationCompliance", "name": "APPLICATION_COMPLIANCE" + }, + { + "value": "AppComponentResiliencyComplianceStatus", + "name": "APP_COMPONENT_RESILIENCY_COMPLIANCE_STATUS" } ] } @@ -4756,19 +4771,19 @@ "eventType": { "target": "com.amazonaws.resiliencehub#EventType", "traits": { - "smithy.api#documentation": "

The type of event you would like to subscribe and get notification for. Currently, Resilience Hub\n supports\n notifications only for Drift detected\n (DriftDetected) and Scheduled assessment\n failure (ScheduledAssessmentFailure) events.

", + "smithy.api#documentation": "

The type of event you would like to subscribe and get notification for. Currently, Resilience Hub supports notifications only for Drift\n detected (DriftDetected) and Scheduled\n assessment failure (ScheduledAssessmentFailure) events.

", "smithy.api#required": {} } }, "snsTopicArn": { "target": "com.amazonaws.resiliencehub#Arn", "traits": { - "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Amazon Simple Notification Service topic. The format for this ARN is: arn:partition:sns:region:account:topic-name. \n For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

" + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the Amazon Simple Notification Service topic. The format for this ARN\n is: arn:partition:sns:region:account:topic-name. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

" } } }, "traits": { - "smithy.api#documentation": "

Indicates an event you would like to subscribe and get notification for. Currently,\n Resilience Hub\n supports\n notifications only for Drift detected and Scheduled assessment failure events.

" + "smithy.api#documentation": "

Indicates an event you would like to subscribe and get notification for. Currently,\n Resilience Hub supports notifications only for Drift\n detected and Scheduled assessment failure\n events.

" } }, "com.amazonaws.resiliencehub#EventSubscriptionList": { @@ -5199,6 +5214,91 @@ "smithy.api#output": {} } }, + "com.amazonaws.resiliencehub#ListAppAssessmentResourceDrifts": { + "type": "operation", + "input": { + "target": "com.amazonaws.resiliencehub#ListAppAssessmentResourceDriftsRequest" + }, + "output": { + "target": "com.amazonaws.resiliencehub#ListAppAssessmentResourceDriftsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.resiliencehub#AccessDeniedException" + }, + { + "target": "com.amazonaws.resiliencehub#InternalServerException" + }, + { + "target": "com.amazonaws.resiliencehub#ThrottlingException" + }, + { + "target": "com.amazonaws.resiliencehub#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Indicates the list of resource drifts that were detected while running an assessment.

", + "smithy.api#http": { + "method": "POST", + "uri": "/list-app-assessment-resource-drifts", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "resourceDrifts" + } + } + }, + "com.amazonaws.resiliencehub#ListAppAssessmentResourceDriftsRequest": { + "type": "structure", + "members": { + "assessmentArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the assessment. The format for this ARN is: \narn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.resiliencehub#NextToken", + "traits": { + "smithy.api#documentation": "

Null, or the token from a previous call to get the next set of results.

" + } + }, + "maxResults": { + "target": "com.amazonaws.resiliencehub#MaxResults", + "traits": { + "smithy.api#documentation": "

Indicates the maximum number of drift results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resiliencehub#ListAppAssessmentResourceDriftsResponse": { + "type": "structure", + "members": { + "resourceDrifts": { + "target": "com.amazonaws.resiliencehub#ResourceDriftList", + "traits": { + "smithy.api#documentation": "

Indicates all the resource drifts detected for an assessed entity.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.resiliencehub#NextToken", + "traits": { + "smithy.api#documentation": "

Null, or the token from a previous call to get the next set of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resiliencehub#ListAppAssessments": { "type": "operation", "input": { @@ -6107,8 +6207,7 @@ "target": "com.amazonaws.resiliencehub#Arn", "traits": { "smithy.api#documentation": "

Amazon Resource Name (ARN) of the assessment. The format for this ARN is: \narn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

", - "smithy.api#httpQuery": "assessmentArn", - "smithy.api#required": {} + "smithy.api#httpQuery": "assessmentArn" } }, "reverseOrder": { @@ -6670,7 +6769,7 @@ "identifier": { "target": "com.amazonaws.resiliencehub#String255", "traits": { - "smithy.api#documentation": "

Identifier\n of the resource.

", + "smithy.api#documentation": "

Identifier of the resource.

", "smithy.api#required": {} } }, @@ -6740,7 +6839,7 @@ "invokerRoleName": { "target": "com.amazonaws.resiliencehub#IamRoleName", "traits": { - "smithy.api#documentation": "

Existing Amazon Web Services\n IAM role name in the primary Amazon Web Services account that will be assumed by\n Resilience Hub Service Principle to obtain a read-only access to your application\n resources while running an assessment.

\n \n
    \n
  • \n

    You must have iam:passRole permission for this role while creating or updating the application.

    \n
  • \n
  • \n

    Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-] characters.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Existing Amazon Web Services\n IAM role name in the primary Amazon Web Services account that will be assumed by\n Resilience Hub Service Principle to obtain a read-only access to your application\n resources while running an assessment.

\n \n
    \n
  • \n

    You must have iam:passRole permission for this role while creating or\n updating the application.

    \n
  • \n
  • \n

    Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-]\n characters.

    \n
  • \n
\n
" } }, "crossAccountRoleArns": { @@ -6796,21 +6895,21 @@ "logicalResourceId": { "target": "com.amazonaws.resiliencehub#LogicalResourceId", "traits": { - "smithy.api#documentation": "

Logical\n identifier of the resource.

", + "smithy.api#documentation": "

Logical identifier of the resource.

", "smithy.api#required": {} } }, "physicalResourceId": { "target": "com.amazonaws.resiliencehub#PhysicalResourceId", "traits": { - "smithy.api#documentation": "

Identifier\n of the physical\n resource.

", + "smithy.api#documentation": "

Identifier of the physical resource.

", "smithy.api#required": {} } }, "resourceType": { "target": "com.amazonaws.resiliencehub#String255", "traits": { - "smithy.api#documentation": "

The type of resource.

", + "smithy.api#documentation": "

Type of resource.

", "smithy.api#required": {} } }, @@ -6855,7 +6954,7 @@ "identifier": { "target": "com.amazonaws.resiliencehub#String255", "traits": { - "smithy.api#documentation": "

Identifier\n of the physical resource.

", + "smithy.api#documentation": "

Identifier of the physical resource.

", "smithy.api#required": {} } }, @@ -7140,7 +7239,7 @@ "targetAccountId": { "target": "com.amazonaws.resiliencehub#CustomerId", "traits": { - "smithy.api#documentation": "

Identifier\n of the target account.

" + "smithy.api#documentation": "

Identifier of the target account.

" } }, "targetRegion": { @@ -7527,7 +7626,7 @@ "creationTime": { "target": "com.amazonaws.resiliencehub#TimeStamp", "traits": { - "smithy.api#documentation": "

Date\n and time when the resiliency policy was created.

" + "smithy.api#documentation": "

Date and time when the resiliency policy was created.

" } }, "tags": { @@ -7538,7 +7637,7 @@ } }, "traits": { - "smithy.api#documentation": "

Defines a resiliency policy.

\n \n

Resilience Hub allows you to provide a value of zero for rtoInSecs and rpoInSecs of your resiliency policy. But, while assessing\n your application, the lowest possible assessment result is near zero. Hence,\n if you provide value zero for rtoInSecs and rpoInSecs, the estimated workload RTO and estimated\n workload RPO result will be near zero and the Compliance status for your application will be set to Policy breached.

\n
" + "smithy.api#documentation": "

Defines a resiliency policy.

\n \n

Resilience Hub allows you to provide a value of zero for rtoInSecs\n and rpoInSecs of your resiliency policy. But, while assessing your application,\n the lowest possible assessment result is near zero. Hence, if you provide value zero for\n rtoInSecs and rpoInSecs, the estimated workload RTO and\n estimated workload RPO result will be near zero and the Compliance\n status for your application will be set to Policy\n breached.

\n
" } }, "com.amazonaws.resiliencehub#ResiliencyPolicyTier": { @@ -7593,7 +7692,7 @@ "componentScore": { "target": "com.amazonaws.resiliencehub#ScoringComponentResiliencyScores", "traits": { - "smithy.api#documentation": "

The score generated by Resilience Hub for the scoring component after running an assessment.

\n

For example, if the score is 25 points, it indicates the overall score of\n your application generated by Resilience Hub after running an assessment.

" + "smithy.api#documentation": "

The score generated by Resilience Hub for the scoring component after running an\n assessment.

\n

For example, if the score is 25 points, it indicates the overall score of\n your application generated by Resilience Hub after running an assessment.

" } } }, @@ -7713,19 +7812,63 @@ } } }, + "com.amazonaws.resiliencehub#ResourceDrift": { + "type": "structure", + "members": { + "appArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of the application whose resources have drifted. The format for this ARN is: \narn:partition:resiliencehub:region:account:app-assessment/app-id. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

" + } + }, + "appVersion": { + "target": "com.amazonaws.resiliencehub#EntityVersion", + "traits": { + "smithy.api#documentation": "

Version of the application whose resources have drifted.

" + } + }, + "referenceId": { + "target": "com.amazonaws.resiliencehub#EntityId", + "traits": { + "smithy.api#documentation": "

Reference identifier of the resource drift.

" + } + }, + "resourceIdentifier": { + "target": "com.amazonaws.resiliencehub#ResourceIdentifier", + "traits": { + "smithy.api#documentation": "

Identifier of the drifted resource.

" + } + }, + "diffType": { + "target": "com.amazonaws.resiliencehub#DifferenceType", + "traits": { + "smithy.api#documentation": "

Indicates if the resource was added or removed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates the resources that have drifted in the current application version.

" + } + }, + "com.amazonaws.resiliencehub#ResourceDriftList": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#ResourceDrift" + } + }, "com.amazonaws.resiliencehub#ResourceError": { "type": "structure", "members": { "logicalResourceId": { "target": "com.amazonaws.resiliencehub#String255", "traits": { - "smithy.api#documentation": "

Identifier\n of the\n logical resource.

" + "smithy.api#documentation": "

Identifier of the logical resource.

" } }, "physicalResourceId": { "target": "com.amazonaws.resiliencehub#String255", "traits": { - "smithy.api#documentation": "

Identifier\n of the physical resource.

" + "smithy.api#documentation": "

Identifier of the physical resource.

" } }, "reason": { @@ -7771,6 +7914,26 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.resiliencehub#ResourceIdentifier": { + "type": "structure", + "members": { + "logicalResourceId": { + "target": "com.amazonaws.resiliencehub#LogicalResourceId", + "traits": { + "smithy.api#documentation": "

Logical identifier of the drifted resource.

" + } + }, + "resourceType": { + "target": "com.amazonaws.resiliencehub#String255", + "traits": { + "smithy.api#documentation": "

Type of the drifted resource.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines a resource identifier for the drifted resource.

" + } + }, "com.amazonaws.resiliencehub#ResourceImportStatusType": { "type": "string", "traits": { @@ -7815,51 +7978,51 @@ "resourceName": { "target": "com.amazonaws.resiliencehub#EntityName", "traits": { - "smithy.api#documentation": "

Name\n of the resource that\n the\n resource is mapped to.

" + "smithy.api#documentation": "

Name of the resource that this resource is mapped to when the mappingType is Resource.

" } }, "logicalStackName": { "target": "com.amazonaws.resiliencehub#String255", "traits": { - "smithy.api#documentation": "

The name of the CloudFormation stack this resource is mapped to.

" + "smithy.api#documentation": "

Name of the CloudFormation stack this resource is mapped to when the mappingType is CfnStack.

" } }, "appRegistryAppName": { "target": "com.amazonaws.resiliencehub#EntityName", "traits": { - "smithy.api#documentation": "

The name of the application this resource is mapped to.

" + "smithy.api#documentation": "

Name of the application this resource is mapped to when the mappingType is AppRegistryApp.

" } }, "resourceGroupName": { "target": "com.amazonaws.resiliencehub#EntityName", "traits": { - "smithy.api#documentation": "

Name\n of the resource group\n that\n the\n resource is mapped to.

" + "smithy.api#documentation": "

Name of the Resource Groups that this resource is mapped to when the mappingType is ResourceGroup.

" } }, "mappingType": { "target": "com.amazonaws.resiliencehub#ResourceMappingType", "traits": { - "smithy.api#documentation": "

Specifies the type of resource mapping.

\n
\n
AppRegistryApp
\n
\n

The resource is mapped to another application. The name of the application is\n contained in the appRegistryAppName property.

\n
\n
CfnStack
\n
\n

The resource is mapped to a CloudFormation stack. The name of the CloudFormation stack is contained in\n the logicalStackName property.

\n
\n
Resource
\n
\n

The resource is mapped to another resource. The name of the resource is contained in\n the resourceName property.

\n
\n
ResourceGroup
\n
\n

The resource is mapped to Resource Groups. The name of the resource group is\n contained in the resourceGroupName property.

\n
\n
", + "smithy.api#documentation": "

Specifies the type of resource mapping.

", "smithy.api#required": {} } }, "physicalResourceId": { "target": "com.amazonaws.resiliencehub#PhysicalResourceId", "traits": { - "smithy.api#documentation": "

Identifier\n of the physical resource.

", + "smithy.api#documentation": "

Identifier of the physical resource.

", "smithy.api#required": {} } }, "terraformSourceName": { "target": "com.amazonaws.resiliencehub#String255", "traits": { - "smithy.api#documentation": "

The short name of the Terraform source.

" + "smithy.api#documentation": "

Name of the Terraform source that this resource is mapped to when the mappingType is Terraform.

" } }, "eksSourceName": { "target": "com.amazonaws.resiliencehub#String255", "traits": { - "smithy.api#documentation": "

Name of the Amazon Elastic Kubernetes Service cluster and namespace this resource belongs to.

\n \n

This parameter accepts values in \"eks-cluster/namespace\" format.

\n
" + "smithy.api#documentation": "

Name of the Amazon Elastic Kubernetes Service cluster and namespace that this resource is mapped to when the mappingType is\n EKS.

\n \n

This parameter accepts values in \"eks-cluster/namespace\" format.

\n
" } } }, @@ -8012,33 +8175,33 @@ "target": "com.amazonaws.resiliencehub#Double", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

Resiliency score of your application.

" + "smithy.api#documentation": "

Resiliency score points given for the scoring component. The score is always less than or\n equal to the possibleScore.

" } }, "possibleScore": { "target": "com.amazonaws.resiliencehub#Double", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

Maximum\n possible score that can be obtained for the scoring component. If the Possible\n score is 20 points, it indicates the maximum possible score you can achieve for your\n application when you run a new assessment after implementing all the Resilience Hub\n recommendations.

" + "smithy.api#documentation": "

Maximum possible score that can be obtained for the scoring component.

\n

For example, if the possibleScore is 20 points, it indicates the maximum\n possible score you can achieve for the scoring component when you run a new assessment after\n implementing all the Resilience Hub recommendations.

" } }, "outstandingCount": { "target": "com.amazonaws.resiliencehub#Long", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

Number\n of issues that must be resolved to obtain the maximum possible score for the scoring\n component. For SOPs, alarms, and FIS experiments, these are the number of\n recommendations that must be implemented. For compliance, it is the number of Application\n Components that has breached the resiliency policy.

\n

For example, if the Outstanding count for Resilience Hub recommended Amazon CloudWatch\n alarms is 5, it indicates that 5 Amazon CloudWatch alarms must be fixed to achieve the\n maximum possible score.

" + "smithy.api#documentation": "

Number of recommendations that must be implemented to obtain the maximum possible score\n for the scoring component. For SOPs, alarms, and tests, these are the number of\n recommendations that must be implemented. For compliance, these are the number of Application\n Components that have breached the resiliency policy.

\n

For example, if the outstandingCount for Alarms coverage scoring component is\n 5, it indicates that 5 Amazon CloudWatch alarms need to be implemented to achieve the\n maximum possible score.

" } }, "excludedCount": { "target": "com.amazonaws.resiliencehub#Long", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

Number\n of recommendations that were excluded from the assessment.

\n

For example, if the Excluded count for Resilience Hub recommended Amazon CloudWatch alarms\n is 7, it indicates that 7 Amazon CloudWatch alarms are excluded from the assessment.

" + "smithy.api#documentation": "

Number of recommendations that were excluded from the assessment.

\n

For example, if the excludedCount for Alarms coverage scoring component is 7,\n it indicates that 7 Amazon CloudWatch alarms are excluded from the assessment.

" } } }, "traits": { - "smithy.api#documentation": "

Resiliency score of each scoring component. For more information about scoring component,\n see Calculating resiliency\n score.

" + "smithy.api#documentation": "

Resiliency score of each scoring component. For more information about scoring component,\n see Calculating resiliency score.

" } }, "com.amazonaws.resiliencehub#ScoringComponentResiliencyScores": { @@ -8116,7 +8279,7 @@ "referenceId": { "target": "com.amazonaws.resiliencehub#SpecReferenceId", "traits": { - "smithy.api#documentation": "

Reference\n identifier for the SOP recommendation.

", + "smithy.api#documentation": "

Reference identifier for the SOP recommendation.

", "smithy.api#required": {} } }, @@ -8315,7 +8478,7 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": "^(?!aws:)[^\\x00-\\x1f\\x22]+$" + "smithy.api#pattern": "^[^\\x00-\\x1f\\x22]+$" } }, "com.amazonaws.resiliencehub#TagKeyList": { @@ -8463,7 +8626,7 @@ "referenceId": { "target": "com.amazonaws.resiliencehub#SpecReferenceId", "traits": { - "smithy.api#documentation": "

Reference\n identifier for the test recommendation.

", + "smithy.api#documentation": "

Reference identifier for the test recommendation.

", "smithy.api#required": {} } }, @@ -8608,14 +8771,14 @@ "logicalResourceId": { "target": "com.amazonaws.resiliencehub#LogicalResourceId", "traits": { - "smithy.api#documentation": "

Logical\n resource identifier for the unsupported resource.

", + "smithy.api#documentation": "

Logical resource identifier for the unsupported resource.

", "smithy.api#required": {} } }, "physicalResourceId": { "target": "com.amazonaws.resiliencehub#PhysicalResourceId", "traits": { - "smithy.api#documentation": "

Physical\n resource identifier for the unsupported resource.

", + "smithy.api#documentation": "

Physical resource identifier for the unsupported resource.

", "smithy.api#required": {} } }, @@ -8629,7 +8792,7 @@ "unsupportedResourceStatus": { "target": "com.amazonaws.resiliencehub#String255", "traits": { - "smithy.api#documentation": "

The status of the\n unsupported resource.

" + "smithy.api#documentation": "

The status of the unsupported resource.

" } } }, diff --git a/models/route-53-domains.json b/models/route-53-domains.json index c856bf397e..e78189dd06 100644 --- a/models/route-53-domains.json +++ b/models/route-53-domains.json @@ -3235,7 +3235,22 @@ } ], "traits": { - "smithy.api#documentation": "

This operation returns detailed information about a specified domain that is\n\t\t\tassociated with the current Amazon Web Services account. Contact information for the\n\t\t\tdomain is also returned as part of the output.

" + "smithy.api#documentation": "

This operation returns detailed information about a specified domain that is\n\t\t\tassociated with the current Amazon Web Services account. Contact information for the\n\t\t\tdomain is also returned as part of the output.

", + "smithy.test#smokeTests": [ + { + "id": "GetDomainDetailFailure", + "params": { + "DomainName": "fake-domain-name" + }, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.route53domains#GetDomainDetailRequest": { @@ -3649,7 +3664,20 @@ "outputToken": "NextPageMarker", "items": "Domains", "pageSize": "MaxItems" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListDomainsSuccess", + "params": {}, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.route53domains#ListDomainsAttributeName": { diff --git a/models/route53profiles.json b/models/route53profiles.json index 54abbdfaa3..bbffd5470c 100644 --- a/models/route53profiles.json +++ b/models/route53profiles.json @@ -66,7 +66,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Associates a Route 53 Profiles profile with a VPC. A VPC can have only one Profile associated with it, but a Profile can be associated with up to 5000 VPCs.\n

", + "smithy.api#documentation": "

\n Associates a Route 53 Profiles profile with a VPC. A VPC can have only one Profile associated with it, but a Profile can be associated with 1000 of VPCs (and you can request a higher quota). \n For more information, see https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-entities.\n

", "smithy.api#http": { "method": "POST", "uri": "/profileassociation", @@ -193,7 +193,7 @@ "ResourceProperties": { "target": "com.amazonaws.route53profiles#ResourceProperties", "traits": { - "smithy.api#documentation": "

\n If you are adding a DNS Firewall rule group, include also a priority in this format:\n

\n

\n Key=FirewallRuleGroupPriority,Value=100\n

" + "smithy.api#documentation": "

\n If you are adding a DNS Firewall rule group, include also a priority. The priority indicates the processing order for the rule groups, starting with the priority assinged the lowest value.\n

\n

The allowed values for priority are between 100 and 9900.

" } } }, @@ -2572,7 +2572,7 @@ "ResourceProperties": { "target": "com.amazonaws.route53profiles#ResourceProperties", "traits": { - "smithy.api#documentation": "

\n If you are adding a DNS Firewall rule group, include also a priority in this format:

\n

\n Key=FirewallRuleGroupPriority,Value=100.\n

" + "smithy.api#documentation": "

\n If you are adding a DNS Firewall rule group, include also a priority. The priority indicates the processing order for the rule groups, starting with the priority assinged the lowest value.\n

\n

The allowed values for priority are between 100 and 9900.

" } } }, diff --git a/models/route53resolver.json b/models/route53resolver.json index 8d56c9b1a0..95aa1d4cc8 100644 --- a/models/route53resolver.json +++ b/models/route53resolver.json @@ -760,7 +760,7 @@ "FirewallDomainRedirectionAction": { "target": "com.amazonaws.route53resolver#FirewallDomainRedirectionAction", "traits": { - "smithy.api#documentation": "

\n\t\t\tHow you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME, DNAME, ot ALIAS. \n\t\t

\n

\n Inspect_Redirection_Domain (Default) inspects all domains in the redirection chain. The individual domains in the redirection chain must be \n\t\t\tadded to the allow domain list.

\n

\n Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the redirection list to \n\t\tthe domain alloww list.

" + "smithy.api#documentation": "

\n\t\t\tHow you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME or DNAME. \n\t\t

\n

\n Inspect_Redirection_Domain (Default) inspects all domains in the redirection chain. The individual domains in the redirection chain must be \n\t\t\tadded to the domain list.

\n

\n Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the domain in the redirection list to \n\t\t\tthe domain list.

" } }, "Qtype": { @@ -2326,7 +2326,7 @@ "FirewallDomainRedirectionAction": { "target": "com.amazonaws.route53resolver#FirewallDomainRedirectionAction", "traits": { - "smithy.api#documentation": "

\n\t\t\tHow you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME, DNAME, ot ALIAS. \n\t\t

\n

\n Inspect_Redirection_Domain (Default) inspects all domains in the redirection chain. The individual domains in the redirection chain must be \n\t\t\tadded to the allow domain list.

\n

\n Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the domain in the redirection list to \n\t\t\tthe domain alloww list.

" + "smithy.api#documentation": "

\n\t\t\tHow you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME or DNAME. \n\t\t

\n

\n Inspect_Redirection_Domain (Default) inspects all domains in the redirection chain. The individual domains in the redirection chain must be \n\t\t\tadded to the domain list.

\n

\n Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the domain in the redirection list to \n\t\t\tthe domain list.

" } }, "Qtype": { @@ -8629,7 +8629,7 @@ "FirewallDomainRedirectionAction": { "target": "com.amazonaws.route53resolver#FirewallDomainRedirectionAction", "traits": { - "smithy.api#documentation": "

\n\t\t\tHow you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME, DNAME, ot ALIAS. \n\t\t

\n

\n Inspect_Redirection_Domain (Default) inspects all domains in the redirection chain. The individual domains in the redirection chain must be \n\t\t\tadded to the allow domain list.

\n

\n Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the domain in the redirection list to \n\t\t\tthe domain alloww list.

" + "smithy.api#documentation": "

\n\t\t\tHow you want the the rule to evaluate DNS redirection in the DNS redirection chain, such as CNAME or DNAME. \n\t\t

\n

\n Inspect_Redirection_Domain (Default) inspects all domains in the redirection chain. The individual domains in the redirection chain must be \n\t\t\tadded to the domain list.

\n

\n Trust_Redirection_Domain inspects only the first domain in the redirection chain. You don't need to add the subsequent domains in the domain in the redirection list to \n\t\t\tthe domain list.

" } }, "Qtype": { diff --git a/models/s3.json b/models/s3.json index 0148e679b0..3b48bc3858 100644 --- a/models/s3.json +++ b/models/s3.json @@ -647,6 +647,11 @@ "documentation": "The S3 Prefix used to send the request. This is an optional parameter that will be set automatically for operations that are scoped to an S3 Prefix.", "type": "String" }, + "CopySource": { + "required": false, + "documentation": "The Copy Source used for Copy Object request. This is an optional parameter that will be set automatically for operations that are scoped to Copy Source.", + "type": "String" + }, "DisableAccessPoints": { "required": false, "documentation": "Internal parameter to disable Access Point Buckets", @@ -10663,6 +10668,33 @@ "Key": "key" } }, + { + "documentation": "virtual addressing, aws-global region with Copy Source, and Key uses the global endpoint. Copy Source and Key parameters should not be used in endpoint evaluation.", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3", + "signingRegion": "us-east-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://bucket-name.s3.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "Bucket": "bucket-name", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false, + "CopySource": "/copy/source", + "Key": "key" + } + }, { "documentation": "virtual addressing, aws-global region with fips uses the regional fips endpoint", "expect": { @@ -18297,7 +18329,7 @@ "smithy.api#documentation": "

Completes a multipart upload by assembling previously uploaded parts.

\n

You first initiate the multipart upload and then upload all parts using the UploadPart\n operation or the UploadPartCopy\n operation. After successfully uploading all relevant parts of an upload, you call this\n CompleteMultipartUpload operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts\n in ascending order by part number to create a new object. In the CompleteMultipartUpload \n request, you must provide the parts list and ensure that the parts list is complete.\n The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list,\n you must provide the PartNumber value and the ETag value that are returned after that part\n was uploaded.

\n

The processing of a CompleteMultipartUpload request could take several minutes to\n finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that\n specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white\n space characters to keep the connection from timing out. A request could fail after the\n initial 200 OK response has been sent. This means that a 200 OK response can\n contain either a success or an error. The error response might be embedded in the 200 OK response. \n If you call this API operation directly, make sure to design\n your application to parse the contents of the response and handle it appropriately. If you\n use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply\n error handling per your configuration settings (including automatically retrying the\n request as appropriate). If the condition persists, the SDKs throw an exception (or, for\n the SDKs that don't use exceptions, they return an error).

\n

Note that if CompleteMultipartUpload fails, applications should be prepared\n to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best\n Practices.

\n \n

You can't use Content-Type: application/x-www-form-urlencoded for the \n CompleteMultipartUpload requests. Also, if you don't provide a\n Content-Type header, CompleteMultipartUpload can still return a 200\n OK response.

\n
\n

For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload\n and Permissions in the Amazon S3\n User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Special errors
\n
\n
    \n
  • \n

    Error Code: EntityTooSmall\n

    \n
      \n
    • \n

      Description: Your proposed upload is smaller than the minimum allowed object\n size. Each part must be at least 5 MB in size, except the last part.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: InvalidPart\n

    \n
      \n
    • \n

      Description: One or more of the specified parts could not be found. The part\n might not have been uploaded, or the specified ETag might not have\n matched the uploaded part's ETag.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: InvalidPartOrder\n

    \n
      \n
    • \n

      Description: The list of parts was not in ascending order. The parts list\n must be specified in order by part number.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: NoSuchUpload\n

    \n
      \n
    • \n

      Description: The specified multipart upload does not exist. The upload ID\n might be invalid, or the multipart upload might have been aborted or\n completed.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CompleteMultipartUpload:

\n ", "smithy.api#http": { "method": "POST", - "uri": "/{Bucket}/{Key+}?x-id=CompleteMultipartUpload", + "uri": "/{Bucket}/{Key+}", "code": 200 } } @@ -18844,7 +18876,10 @@ "traits": { "smithy.api#documentation": "

Specifies the source object for the copy operation. The source object \n can be up to 5 GB. If the source object is an object that was uploaded by using a multipart upload, the object copy will be a single part object after the source object is copied to the destination bucket.

\n

You specify the value of the copy source in one of two\n formats, depending on whether you want to access the source object through an access point:

\n
    \n
  • \n

    For objects not accessed through an access point, specify the name of the source bucket\n and the key of the source object, separated by a slash (/). For example, to copy the\n object reports/january.pdf from the general purpose bucket \n awsexamplebucket, use awsexamplebucket/reports/january.pdf.\n The value must be URL-encoded. To copy the\n object reports/january.pdf from the directory bucket \n awsexamplebucket--use1-az5--x-s3, use awsexamplebucket--use1-az5--x-s3/reports/january.pdf.\n The value must be URL-encoded.

    \n
  • \n
  • \n

    For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:::accesspoint//object/. For example, to copy the object reports/january.pdf through access point my-access-point owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. The value must be URL encoded.

    \n \n
      \n
    • \n

      Amazon S3 supports copy operations using Access points only when the source and destination buckets are in the same Amazon Web Services Region.

      \n
    • \n
    • \n

      Access points are not supported by directory buckets.

      \n
    • \n
    \n
    \n

    Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. For example, to copy the object reports/january.pdf through outpost my-outpost owned by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. The value must be URL-encoded.

    \n
  • \n
\n

If your source bucket versioning is enabled, the x-amz-copy-source header by default identifies the current\n version of an object to copy. If the current version is a delete marker, Amazon S3\n behaves as if the object was deleted. To copy a different version, use the\n versionId query parameter. Specifically, append ?versionId=\n to the value (for example,\n awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).\n If you don't specify a version ID, Amazon S3 copies the latest version of the source\n object.

\n

If you enable versioning on the destination bucket, Amazon S3 generates a unique version\n ID for the copied object. This version ID is different from the version ID\n of the source object. Amazon S3 returns the version ID of the copied object in the\n x-amz-version-id response header in the response.

\n

If you do not enable versioning or suspend it on the destination bucket, the version\n ID that Amazon S3 generates in the\n x-amz-version-id response header is always null.

\n \n

\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets.

\n
", "smithy.api#httpHeader": "x-amz-copy-source", - "smithy.api#required": {} + "smithy.api#required": {}, + "smithy.rules#contextParam": { + "name": "CopySource" + } } }, "CopySourceIfMatch": { @@ -18915,7 +18950,10 @@ "traits": { "smithy.api#documentation": "

The key of the destination object.

", "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#required": {}, + "smithy.rules#contextParam": { + "name": "Key" + } } }, "Metadata": { @@ -19408,7 +19446,7 @@ ], "smithy.api#http": { "method": "POST", - "uri": "/{Bucket}/{Key+}?uploads&x-id=CreateMultipartUpload", + "uri": "/{Bucket}/{Key+}?uploads", "code": 200 } } @@ -20683,6 +20721,14 @@ "traits": { "smithy.api#documentation": "

Removes an object from a bucket. The behavior depends on the bucket's versioning state:

\n
    \n
  • \n

    If bucket versioning is not enabled, the operation permanently deletes the object.

    \n
  • \n
  • \n

    If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object’s versionId in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket.

    \n
  • \n
  • \n

    If bucket versioning is suspended, the operation removes the object that has a null versionId, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId, and all versions of the object have a versionId, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId, you must include the object’s versionId in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets.

    \n
  • \n
\n \n
    \n
  • \n

    \n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n

To remove a specific version, you must use the versionId query parameter. Using this\n query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3\n sets the response header x-amz-delete-marker to true.

\n

If the object you want to delete is in a bucket where the bucket versioning\n configuration is MFA Delete enabled, you must include the x-amz-mfa request\n header in the DELETE versionId request. Requests that include\n x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3\n User Guide. To see sample\n requests that use versioning, see Sample\n Request.

\n \n

\n Directory buckets - MFA delete is not supported by directory buckets.

\n
\n

You can delete objects by explicitly calling DELETE Object or calling \n (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block\n users or accounts from removing or deleting objects from your bucket, you must deny them\n the s3:DeleteObject, s3:DeleteObjectVersion, and\n s3:PutLifeCycleConfiguration actions.

\n \n

\n Directory buckets - S3 Lifecycle is not supported by directory buckets.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The following permissions are required in your policies when your \n DeleteObjects request includes specific headers.

    \n
      \n
    • \n

      \n \n s3:DeleteObject\n - To delete an object from a bucket, you must always have the s3:DeleteObject permission.

      \n
    • \n
    • \n

      \n \n s3:DeleteObjectVersion\n - To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following action is related to DeleteObject:

\n ", "smithy.api#examples": [ + { + "title": "To delete an object (from a non-versioned bucket)", + "documentation": "The following example deletes an object from a non-versioned bucket.", + "input": { + "Bucket": "ExampleBucket", + "Key": "HappyFace.jpg" + } + }, { "title": "To delete an object", "documentation": "The following example deletes an object from an S3 bucket.", @@ -20691,14 +20737,6 @@ "Key": "objectkey.jpg" }, "output": {} - }, - { - "title": "To delete an object (from a non-versioned bucket)", - "documentation": "The following example deletes an object from a non-versioned bucket.", - "input": { - "Bucket": "ExampleBucket", - "Key": "HappyFace.jpg" - } } ], "smithy.api#http": { @@ -20948,7 +20986,7 @@ ], "smithy.api#http": { "method": "POST", - "uri": "/{Bucket}?delete&x-id=DeleteObjects", + "uri": "/{Bucket}?delete", "code": 200 } } @@ -24213,40 +24251,40 @@ "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the tag-set of an object. You send the GET request against the tagging\n subresource associated with the object.

\n

To use this operation, you must have permission to perform the\n s3:GetObjectTagging action. By default, the GET action returns information\n about current version of an object. For a versioned bucket, you can have multiple versions\n of an object in your bucket. To retrieve tags of any other version, use the versionId query\n parameter. You also need permission for the s3:GetObjectVersionTagging\n action.

\n

By default, the bucket owner has this permission and can grant this permission to\n others.

\n

For information about the Amazon S3 object tagging feature, see Object Tagging.

\n

The following actions are related to GetObjectTagging:

\n ", "smithy.api#examples": [ { - "title": "To retrieve tag set of a specific object version", - "documentation": "The following example retrieves tag set of an object. The request specifies object version.", + "title": "To retrieve tag set of an object", + "documentation": "The following example retrieves tag set of an object.", "input": { "Bucket": "examplebucket", - "Key": "exampleobject", - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" + "Key": "HappyFace.jpg" }, "output": { - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI", + "VersionId": "null", "TagSet": [ { - "Value": "Value1", - "Key": "Key1" + "Value": "Value4", + "Key": "Key4" + }, + { + "Value": "Value3", + "Key": "Key3" } ] } }, { - "title": "To retrieve tag set of an object", - "documentation": "The following example retrieves tag set of an object.", + "title": "To retrieve tag set of a specific object version", + "documentation": "The following example retrieves tag set of an object. The request specifies object version.", "input": { "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "exampleobject", + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" }, "output": { - "VersionId": "null", + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI", "TagSet": [ { - "Value": "Value4", - "Key": "Key4" - }, - { - "Value": "Value3", - "Key": "Key3" + "Value": "Value1", + "Key": "Key1" } ] } @@ -26690,44 +26728,6 @@ "traits": { "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns metadata about all versions of the objects in a bucket. You can also use request\n parameters as selection criteria to return metadata about a subset of all the object\n versions.

\n \n

To use this operation, you must have permission to perform the\n s3:ListBucketVersions action. Be aware of the name difference.

\n
\n \n

A 200 OK response can contain valid or invalid XML. Make sure to design\n your application to parse the contents of the response and handle it\n appropriately.

\n
\n

To use this operation, you must have READ access to the bucket.

\n

The following operations are related to ListObjectVersions:

\n ", "smithy.api#examples": [ - { - "title": "To list object versions", - "documentation": "The following example return versions of an object with specific key name prefix. The request limits the number of items returned to two. If there are are more than two object version, S3 returns NextToken in the response. You can specify this token value in your next request to fetch next set of object versions.", - "input": { - "Bucket": "examplebucket", - "Prefix": "HappyFace.jpg" - }, - "output": { - "Versions": [ - { - "LastModified": "2016-12-15T01:19:41.000Z", - "VersionId": "null", - "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "StorageClass": "STANDARD", - "Key": "HappyFace.jpg", - "Owner": { - "DisplayName": "owner-display-name", - "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" - }, - "IsLatest": true, - "Size": 3191 - }, - { - "LastModified": "2016-12-13T00:58:26.000Z", - "VersionId": "PHtexPGjH2y.zBgT8LmB7wwLI2mpbz.k", - "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "StorageClass": "STANDARD", - "Key": "HappyFace.jpg", - "Owner": { - "DisplayName": "owner-display-name", - "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" - }, - "IsLatest": false, - "Size": 3191 - } - ] - } - }, { "title": "To list object versions", "documentation": "The following example returns versions of an object with specific key name prefix.", @@ -30650,15 +30650,31 @@ "smithy.api#documentation": "

Adds an object to a bucket.

\n \n
    \n
  • \n

    Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket. You cannot use PutObject to only update a\n single piece of metadata for an existing object. You must put the entire object with\n updated metadata if you want to update some values.

    \n
  • \n
  • \n

    If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All\n objects written to the bucket by any account will be owned by the bucket owner.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n

Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:

\n
    \n
  • \n

    \n S3 Object Lock - To prevent objects from\n being deleted or overwritten, you can use Amazon S3 Object\n Lock in the Amazon S3 User Guide.

    \n \n

    This functionality is not supported for directory buckets.

    \n
    \n
  • \n
  • \n

    \n S3 Versioning - When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID\n of that object being stored in Amazon S3. \n You can retrieve, replace, or delete any version of the object. For more information about versioning, see\n Adding Objects to\n Versioning-Enabled Buckets in the Amazon S3\n User Guide. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.

    \n \n

    This functionality is not supported for directory buckets.

    \n
    \n
  • \n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The following permissions are required in your policies when your \n PutObject request includes specific headers.

    \n
      \n
    • \n

      \n \n s3:PutObject\n - To successfully complete the PutObject request, you must always have the s3:PutObject permission on a bucket to add an object\n to it.

      \n
    • \n
    • \n

      \n \n s3:PutObjectAcl\n - To successfully change the objects ACL of your PutObject request, you must have the s3:PutObjectAcl.

      \n
    • \n
    • \n

      \n \n s3:PutObjectTagging\n - To successfully set the tag-set with your PutObject request, you\n must have the s3:PutObjectTagging.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Data integrity with Content-MD5
\n
\n
    \n
  • \n

    \n General purpose bucket - To ensure that data is not corrupted traversing the network, use the\n Content-MD5 header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, \n you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.

    \n
  • \n
  • \n

    \n Directory bucket - This functionality is not supported for directory buckets.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

For more information about related Amazon S3 APIs, see the following:

\n ", "smithy.api#examples": [ { - "title": "To upload an object", - "documentation": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", + "title": "To upload an object and specify server-side encryption and object tags", + "documentation": "The following example uploads an object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", "input": { - "Body": "HappyFace.jpg", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "exampleobject", + "ServerSideEncryption": "AES256", + "Tagging": "key1=value1&key2=value2" }, "output": { - "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk", + "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt", + "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", + "ServerSideEncryption": "AES256" + } + }, + { + "title": "To create an object.", + "documentation": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", + "input": { + "Body": "filetoupload", + "Bucket": "examplebucket", + "Key": "objectkey" + }, + "output": { + "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ", "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"" } }, @@ -30678,6 +30694,20 @@ "ServerSideEncryption": "AES256" } }, + { + "title": "To upload an object and specify optional tags", + "documentation": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", + "input": { + "Body": "c:\\HappyFace.jpg", + "Bucket": "examplebucket", + "Key": "HappyFace.jpg", + "Tagging": "key1=value1&key2=value2" + }, + "output": { + "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a", + "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"" + } + }, { "title": "To upload object and specify user-defined metadata", "documentation": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", @@ -30710,45 +30740,15 @@ } }, { - "title": "To create an object.", - "documentation": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", - "input": { - "Body": "filetoupload", - "Bucket": "examplebucket", - "Key": "objectkey" - }, - "output": { - "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ", - "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"" - } - }, - { - "title": "To upload an object and specify server-side encryption and object tags", - "documentation": "The following example uploads an object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", - "input": { - "Body": "filetoupload", - "Bucket": "examplebucket", - "Key": "exampleobject", - "ServerSideEncryption": "AES256", - "Tagging": "key1=value1&key2=value2" - }, - "output": { - "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt", - "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "ServerSideEncryption": "AES256" - } - }, - { - "title": "To upload an object and specify optional tags", - "documentation": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", + "title": "To upload an object", + "documentation": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", "input": { - "Body": "c:\\HappyFace.jpg", + "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", - "Tagging": "key1=value1&key2=value2" + "Key": "HappyFace.jpg" }, "output": { - "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a", + "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk", "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"" } } @@ -32390,7 +32390,7 @@ ], "smithy.api#http": { "method": "POST", - "uri": "/{Bucket}/{Key+}?restore&x-id=RestoreObject", + "uri": "/{Bucket}/{Key+}?restore", "code": 200 } } @@ -32744,7 +32744,7 @@ "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

For more information about Amazon S3 Select, see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.

\n

\n
\n
Permissions
\n
\n

You must have the s3:GetObject permission for this operation. Amazon S3\n Select does not support anonymous access. For more information about permissions,\n see Specifying Permissions in\n a Policy in the Amazon S3 User Guide.

\n
\n
Object Data Formats
\n
\n

You can use Amazon S3 Select to query objects that have the following format\n properties:

\n
    \n
  • \n

    \n CSV, JSON, and Parquet - Objects must be in CSV,\n JSON, or Parquet format.

    \n
  • \n
  • \n

    \n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.

    \n
  • \n
  • \n

    \n GZIP or BZIP2 - CSV and JSON files can be compressed\n using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that\n Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar\n compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support\n whole-object compression for Parquet objects.

    \n
  • \n
  • \n

    \n Server-side encryption - Amazon S3 Select supports\n querying objects that are protected with server-side encryption.

    \n

    For objects that are encrypted with customer-provided encryption keys\n (SSE-C), you must use HTTPS, and you must use the headers that are\n documented in the GetObject. For more\n information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys)\n in the Amazon S3 User Guide.

    \n

    For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and\n Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently,\n so you don't need to specify anything. For more information about\n server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Working with the Response Body
\n
\n

Given the response size is unknown, Amazon S3 Select streams the response as a\n series of messages and includes a Transfer-Encoding header with\n chunked as its value in the response. For more information, see\n Appendix:\n SelectObjectContent\n Response.

\n
\n
GetObject Support
\n
\n

The SelectObjectContent action does not support the following\n GetObject functionality. For more information, see GetObject.

\n
    \n
  • \n

    \n Range: Although you can specify a scan range for an Amazon S3 Select\n request (see SelectObjectContentRequest - ScanRange in the request\n parameters), you cannot specify the range of bytes of an object to return.\n

    \n
  • \n
  • \n

    The GLACIER, DEEP_ARCHIVE, and\n REDUCED_REDUNDANCY storage classes, or the\n ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access\n tiers of the INTELLIGENT_TIERING storage class: You cannot\n query objects in the GLACIER, DEEP_ARCHIVE, or\n REDUCED_REDUNDANCY storage classes, nor objects in the\n ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access\n tiers of the INTELLIGENT_TIERING storage class. For more\n information about storage classes, see Using Amazon S3\n storage classes in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Special Errors
\n
\n

For a list of special errors for this operation, see List of SELECT Object Content Error Codes\n

\n
\n
\n

The following operations are related to SelectObjectContent:

\n ", "smithy.api#http": { "method": "POST", - "uri": "/{Bucket}/{Key+}?select&select-type=2&x-id=SelectObjectContent", + "uri": "/{Bucket}/{Key+}?select&select-type=2", "code": 200 } } @@ -34224,7 +34224,7 @@ }, "smithy.api#http": { "method": "POST", - "uri": "/WriteGetObjectResponse?x-id=WriteGetObjectResponse", + "uri": "/WriteGetObjectResponse", "code": 200 }, "smithy.rules#staticContextParams": { diff --git a/models/sagemaker.json b/models/sagemaker.json index f843b6bb34..e494fc80ec 100644 --- a/models/sagemaker.json +++ b/models/sagemaker.json @@ -1374,6 +1374,54 @@ "smithy.api#enumValue": "ml.g5.48xlarge" } }, + "ML_G6_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.xlarge" + } + }, + "ML_G6_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.2xlarge" + } + }, + "ML_G6_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.4xlarge" + } + }, + "ML_G6_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.8xlarge" + } + }, + "ML_G6_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.12xlarge" + } + }, + "ML_G6_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.16xlarge" + } + }, + "ML_G6_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.24xlarge" + } + }, + "ML_G6_48XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.48xlarge" + } + }, "ML_GEOSPATIAL_INTERACTIVE": { "target": "smithy.api#Unit", "traits": { @@ -2799,6 +2847,41 @@ } } }, + "com.amazonaws.sagemaker#AuthenticationRequestExtraParams": { + "type": "map", + "key": { + "target": "com.amazonaws.sagemaker#AuthenticationRequestExtraParamsKey" + }, + "value": { + "target": "com.amazonaws.sagemaker#AuthenticationRequestExtraParamsValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.sagemaker#AuthenticationRequestExtraParamsKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.sagemaker#AuthenticationRequestExtraParamsValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": ".*" + } + }, "com.amazonaws.sagemaker#AutoGenerateEndpointName": { "type": "boolean" }, @@ -2858,6 +2941,42 @@ "traits": { "smithy.api#enumValue": "fastai" } + }, + "CNN_QR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "cnn-qr" + } + }, + "DEEPAR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "deepar" + } + }, + "PROPHET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "prophet" + } + }, + "NPTS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "npts" + } + }, + "ARIMA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "arima" + } + }, + "ETS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ets" + } } } }, @@ -2868,13 +2987,13 @@ "target": "com.amazonaws.sagemaker#AutoMLAlgorithms", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The selection of algorithms run on a dataset to train the model candidates of an Autopilot\n job.

\n \n

Selected algorithms must belong to the list corresponding to the training mode set in\n AutoMLJobConfig.Mode (ENSEMBLING or\n HYPERPARAMETER_TUNING). Choose a minimum of 1 algorithm.

\n
\n
    \n
  • \n

    In ENSEMBLING mode:

    \n
      \n
    • \n

      \"catboost\"

      \n
    • \n
    • \n

      \"extra-trees\"

      \n
    • \n
    • \n

      \"fastai\"

      \n
    • \n
    • \n

      \"lightgbm\"

      \n
    • \n
    • \n

      \"linear-learner\"

      \n
    • \n
    • \n

      \"nn-torch\"

      \n
    • \n
    • \n

      \"randomforest\"

      \n
    • \n
    • \n

      \"xgboost\"

      \n
    • \n
    \n
  • \n
  • \n

    In HYPERPARAMETER_TUNING mode:

    \n
      \n
    • \n

      \"linear-learner\"

      \n
    • \n
    • \n

      \"mlp\"

      \n
    • \n
    • \n

      \"xgboost\"

      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

The selection of algorithms trained on your dataset to generate the model candidates for\n an Autopilot job.

\n
    \n
  • \n

    \n For the tabular problem type TabularJobConfig:\n

    \n \n

    Selected algorithms must belong to the list corresponding to the training mode\n set in AutoMLJobConfig.Mode (ENSEMBLING or\n HYPERPARAMETER_TUNING). Choose a minimum of 1 algorithm.

    \n
    \n
      \n
    • \n

      In ENSEMBLING mode:

      \n
        \n
      • \n

        \"catboost\"

        \n
      • \n
      • \n

        \"extra-trees\"

        \n
      • \n
      • \n

        \"fastai\"

        \n
      • \n
      • \n

        \"lightgbm\"

        \n
      • \n
      • \n

        \"linear-learner\"

        \n
      • \n
      • \n

        \"nn-torch\"

        \n
      • \n
      • \n

        \"randomforest\"

        \n
      • \n
      • \n

        \"xgboost\"

        \n
      • \n
      \n
    • \n
    • \n

      In HYPERPARAMETER_TUNING mode:

      \n
        \n
      • \n

        \"linear-learner\"

        \n
      • \n
      • \n

        \"mlp\"

        \n
      • \n
      • \n

        \"xgboost\"

        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    \n For the time-series forecasting problem type TimeSeriesForecastingJobConfig:\n

    \n
      \n
    • \n

      Choose your algorithms from this list.

      \n
        \n
      • \n

        \"cnn-qr\"

        \n
      • \n
      • \n

        \"deepar\"

        \n
      • \n
      • \n

        \"prophet\"

        \n
      • \n
      • \n

        \"arima\"

        \n
      • \n
      • \n

        \"npts\"

        \n
      • \n
      • \n

        \"ets\"

        \n
      • \n
      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The collection of algorithms run on a dataset for training the model candidates of an\n Autopilot job.

" + "smithy.api#documentation": "

The selection of algorithms trained on your dataset to generate the model candidates for\n an Autopilot job.

" } }, "com.amazonaws.sagemaker#AutoMLAlgorithms": { @@ -2996,13 +3115,13 @@ "FeatureSpecificationS3Uri": { "target": "com.amazonaws.sagemaker#S3Uri", "traits": { - "smithy.api#documentation": "

A URL to the Amazon S3 data source containing selected features from the input data source to\n run an Autopilot job. You can input FeatureAttributeNames (optional) in JSON\n format as shown below:

\n

\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }.

\n

You can also specify the data type of the feature (optional) in the format shown\n below:

\n

\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }\n

\n \n

These column keys may not include the target column.

\n
\n

In ensembling mode, Autopilot only supports the following data types: numeric,\n categorical, text, and datetime. In HPO mode,\n Autopilot can support numeric, categorical, text,\n datetime, and sequence.

\n

If only FeatureDataTypes is provided, the column keys (col1,\n col2,..) should be a subset of the column names in the input data.

\n

If both FeatureDataTypes and FeatureAttributeNames are\n provided, then the column keys should be a subset of the column names provided in\n FeatureAttributeNames.

\n

The key name FeatureAttributeNames is fixed. The values listed in\n [\"col1\", \"col2\", ...] are case sensitive and should be a list of strings\n containing unique values that are a subset of the column names in the input data. The list\n of columns provided must not include the target column.

" + "smithy.api#documentation": "

A URL to the Amazon S3 data source containing selected features from the input\n data source to run an Autopilot job. You can input FeatureAttributeNames\n (optional) in JSON format as shown below:

\n

\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }.

\n

You can also specify the data type of the feature (optional) in the format shown\n below:

\n

\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }\n

\n \n

These column keys may not include the target column.

\n
\n

In ensembling mode, Autopilot only supports the following data types: numeric,\n categorical, text, and datetime. In HPO mode,\n Autopilot can support numeric, categorical, text,\n datetime, and sequence.

\n

If only FeatureDataTypes is provided, the column keys (col1,\n col2,..) should be a subset of the column names in the input data.

\n

If both FeatureDataTypes and FeatureAttributeNames are\n provided, then the column keys should be a subset of the column names provided in\n FeatureAttributeNames.

\n

The key name FeatureAttributeNames is fixed. The values listed in\n [\"col1\", \"col2\", ...] are case sensitive and should be a list of strings\n containing unique values that are a subset of the column names in the input data. The list\n of columns provided must not include the target column.

" } }, "AlgorithmsConfig": { "target": "com.amazonaws.sagemaker#AutoMLAlgorithmsConfig", "traits": { - "smithy.api#documentation": "

Stores the configuration information for the selection of algorithms used to train the\n model candidates.

\n

The list of available algorithms to choose from depends on the training mode set in\n \n AutoMLJobConfig.Mode\n .

\n
    \n
  • \n

    \n AlgorithmsConfig should not be set in AUTO training\n mode.

    \n
  • \n
  • \n

    When AlgorithmsConfig is provided, one AutoMLAlgorithms\n attribute must be set and one only.

    \n

    If the list of algorithms provided as values for AutoMLAlgorithms is\n empty, AutoMLCandidateGenerationConfig uses the full set of algorithms\n for the given training mode.

    \n
  • \n
  • \n

    When AlgorithmsConfig is not provided,\n AutoMLCandidateGenerationConfig uses the full set of algorithms for\n the given training mode.

    \n
  • \n
\n

For the list of all algorithms per training mode, see \n AutoMLAlgorithmConfig.

\n

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" + "smithy.api#documentation": "

Stores the configuration information for the selection of algorithms trained on tabular data.

\n

The list of available algorithms to choose from depends on the training mode set in\n \n TabularJobConfig.Mode\n .

\n
    \n
  • \n

    \n AlgorithmsConfig should not be set if the training mode is set on AUTO.

    \n
  • \n
  • \n

    When AlgorithmsConfig is provided, one AutoMLAlgorithms\n attribute must be set and one only.

    \n

    If the list of algorithms provided as values for AutoMLAlgorithms is\n empty, CandidateGenerationConfig uses the full set of algorithms for the\n given training mode.

    \n
  • \n
  • \n

    When AlgorithmsConfig is not provided,\n CandidateGenerationConfig uses the full set of algorithms for the\n given training mode.

    \n
  • \n
\n

For the list of all algorithms per problem type and training mode, see \n AutoMLAlgorithmConfig.

\n

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" } } }, @@ -3381,7 +3500,7 @@ "target": "com.amazonaws.sagemaker#AutoMLMetricEnum", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the objective metric used to measure the predictive quality of a machine\n learning system. During training, the model's parameters are updated iteratively to\n optimize its performance based on the feedback provided by the objective metric when\n evaluating the model on the validation dataset.

\n

The list of available metrics supported by Autopilot and the default metric applied when you\n do not specify a metric name explicitly depend on the problem type.

\n
    \n
  • \n

    For tabular problem types:

    \n
      \n
    • \n

      List of available metrics:

      \n
        \n
      • \n

        Regression: MAE,\n MSE, R2, RMSE\n

        \n
      • \n
      • \n

        Binary classification: Accuracy, AUC,\n BalancedAccuracy, F1,\n Precision, Recall\n

        \n
      • \n
      • \n

        Multiclass classification: Accuracy,\n BalancedAccuracy, F1macro,\n PrecisionMacro, RecallMacro\n

        \n
      • \n
      \n

      For a description of each metric, see Autopilot metrics for classification and regression.

      \n
    • \n
    • \n

      Default objective metrics:

      \n
        \n
      • \n

        Regression: MSE.

        \n
      • \n
      • \n

        Binary classification: F1.

        \n
      • \n
      • \n

        Multiclass classification: Accuracy.

        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    For image or text classification problem types:

    \n \n
  • \n
  • \n

    For time-series forecasting problem types:

    \n
      \n
    • \n

      List of available metrics: RMSE, wQL,\n Average wQL, MASE, MAPE,\n WAPE\n

      \n

      For a description of each metric, see Autopilot metrics for\n time-series forecasting.

      \n
    • \n
    • \n

      Default objective metrics: AverageWeightedQuantileLoss\n

      \n
    • \n
    \n
  • \n
  • \n

    For text generation problem types (LLMs fine-tuning): \n Fine-tuning language models in Autopilot does not\n require setting the AutoMLJobObjective field. Autopilot fine-tunes LLMs\n without requiring multiple candidates to be trained and evaluated. \n Instead, using your dataset, Autopilot directly fine-tunes your target model to enhance a\n default objective metric, the cross-entropy loss. After fine-tuning a language model,\n you can evaluate the quality of its generated text using different metrics. \n For a list of the available metrics, see Metrics for\n fine-tuning LLMs in Autopilot.

    \n
  • \n
", + "smithy.api#documentation": "

The name of the objective metric used to measure the predictive quality of a machine\n learning system. During training, the model's parameters are updated iteratively to\n optimize its performance based on the feedback provided by the objective metric when\n evaluating the model on the validation dataset.

\n

The list of available metrics supported by Autopilot and the default metric applied when you\n do not specify a metric name explicitly depend on the problem type.

\n
    \n
  • \n

    For tabular problem types:

    \n
      \n
    • \n

      List of available metrics:

      \n
        \n
      • \n

        Regression: MAE, MSE, R2,\n RMSE\n

        \n
      • \n
      • \n

        Binary classification: Accuracy, AUC,\n BalancedAccuracy, F1,\n Precision, Recall\n

        \n
      • \n
      • \n

        Multiclass classification: Accuracy,\n BalancedAccuracy, F1macro,\n PrecisionMacro, RecallMacro\n

        \n
      • \n
      \n

      For a description of each metric, see Autopilot metrics for classification and regression.

      \n
    • \n
    • \n

      Default objective metrics:

      \n
        \n
      • \n

        Regression: MSE.

        \n
      • \n
      • \n

        Binary classification: F1.

        \n
      • \n
      • \n

        Multiclass classification: Accuracy.

        \n
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    For image or text classification problem types:

    \n \n
  • \n
  • \n

    For time-series forecasting problem types:

    \n
      \n
    • \n

      List of available metrics: RMSE, wQL,\n Average wQL, MASE, MAPE,\n WAPE\n

      \n

      For a description of each metric, see Autopilot metrics for\n time-series forecasting.

      \n
    • \n
    • \n

      Default objective metrics: AverageWeightedQuantileLoss\n

      \n
    • \n
    \n
  • \n
  • \n

    For text generation problem types (LLMs fine-tuning): \n Fine-tuning language models in Autopilot does not\n require setting the AutoMLJobObjective field. Autopilot fine-tunes LLMs\n without requiring multiple candidates to be trained and evaluated. \n Instead, using your dataset, Autopilot directly fine-tunes your target model to enhance a\n default objective metric, the cross-entropy loss. After fine-tuning a language model,\n you can evaluate the quality of its generated text using different metrics. \n For a list of the available metrics, see Metrics for\n fine-tuning LLMs in Autopilot.

    \n
  • \n
", "smithy.api#required": {} } } @@ -4159,7 +4278,7 @@ "target": "com.amazonaws.sagemaker#AutoMLS3DataType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The data type.

\n
    \n
  • \n

    If you choose S3Prefix, S3Uri identifies a key name\n prefix. SageMaker uses all objects that match the specified key name prefix for model\n training.

    \n

    The S3Prefix should have the following format:

    \n

    \n s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE\n

    \n
  • \n
  • \n

    If you choose ManifestFile, S3Uri identifies an object\n that is a manifest file containing a list of object keys that you want SageMaker to use\n for model training.

    \n

    A ManifestFile should have the format shown below:

    \n

    \n [ {\"prefix\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/\"}, \n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1\",\n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2\",\n

    \n

    \n ... \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N\" ]\n

    \n
  • \n
  • \n

    If you choose AugmentedManifestFile, S3Uri identifies an\n object that is an augmented manifest file in JSON lines format. This file contains\n the data you want to use for model training. AugmentedManifestFile is\n available for V2 API jobs only (for example, for jobs created by calling\n CreateAutoMLJobV2).

    \n

    Here is a minimal, single-record example of an\n AugmentedManifestFile:

    \n

    \n {\"source-ref\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg\",\n

    \n

    \n \"label-metadata\": {\"class-name\": \"cat\" }

    \n

    For more information on AugmentedManifestFile, see Provide\n Dataset Metadata to Training Jobs with an Augmented Manifest File.

    \n
  • \n
", + "smithy.api#documentation": "

The data type.

\n
    \n
  • \n

    If you choose S3Prefix, S3Uri identifies a key name\n prefix. SageMaker uses all objects that match the specified key name prefix\n for model training.

    \n

    The S3Prefix should have the following format:

    \n

    \n s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE\n

    \n
  • \n
  • \n

    If you choose ManifestFile, S3Uri identifies an object\n that is a manifest file containing a list of object keys that you want SageMaker to use for model training.

    \n

    A ManifestFile should have the format shown below:

    \n

    \n [ {\"prefix\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/\"}, \n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1\",\n

    \n

    \n \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2\",\n

    \n

    \n ... \"DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N\" ]\n

    \n
  • \n
  • \n

    If you choose AugmentedManifestFile, S3Uri identifies an\n object that is an augmented manifest file in JSON lines format. This file contains\n the data you want to use for model training. AugmentedManifestFile is\n available for V2 API jobs only (for example, for jobs created by calling\n CreateAutoMLJobV2).

    \n

    Here is a minimal, single-record example of an\n AugmentedManifestFile:

    \n

    \n {\"source-ref\":\n \"s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/cats/cat.jpg\",\n

    \n

    \n \"label-metadata\": {\"class-name\": \"cat\" }

    \n

    For more information on AugmentedManifestFile, see Provide\n Dataset Metadata to Training Jobs with an Augmented Manifest File.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -4167,7 +4286,7 @@ "target": "com.amazonaws.sagemaker#S3Uri", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The URL to the Amazon S3 data source. The Uri refers to the Amazon S3 prefix or ManifestFile\n depending on the data type.

", + "smithy.api#documentation": "

The URL to the Amazon S3 data source. The Uri refers to the Amazon S3\n prefix or ManifestFile depending on the data type.

", "smithy.api#required": {} } } @@ -4843,13 +4962,13 @@ "ModelInsights": { "target": "com.amazonaws.sagemaker#ModelInsightsLocation", "traits": { - "smithy.api#documentation": "

The Amazon S3 prefix to the model insight artifacts generated for the AutoML candidate.

" + "smithy.api#documentation": "

The Amazon S3 prefix to the model insight artifacts generated for the AutoML\n candidate.

" } }, "BacktestResults": { "target": "com.amazonaws.sagemaker#BacktestResultsLocation", "traits": { - "smithy.api#documentation": "

The Amazon S3 prefix to the accuracy metrics and the inference results observed over the\n testing window. Available only for the time-series forecasting problem type.

" + "smithy.api#documentation": "

The Amazon S3 prefix to the accuracy metrics and the inference results observed\n over the testing window. Available only for the time-series forecasting problem\n type.

" } } }, @@ -4871,7 +4990,7 @@ "AlgorithmsConfig": { "target": "com.amazonaws.sagemaker#AutoMLAlgorithmsConfig", "traits": { - "smithy.api#documentation": "

Stores the configuration information for the selection of algorithms used to train model\n candidates on tabular data.

\n

The list of available algorithms to choose from depends on the training mode set in\n \n TabularJobConfig.Mode\n .

\n
    \n
  • \n

    \n AlgorithmsConfig should not be set in AUTO training\n mode.

    \n
  • \n
  • \n

    When AlgorithmsConfig is provided, one AutoMLAlgorithms\n attribute must be set and one only.

    \n

    If the list of algorithms provided as values for AutoMLAlgorithms is\n empty, CandidateGenerationConfig uses the full set of algorithms for the\n given training mode.

    \n
  • \n
  • \n

    When AlgorithmsConfig is not provided,\n CandidateGenerationConfig uses the full set of algorithms for the\n given training mode.

    \n
  • \n
\n

For the list of all algorithms per problem type and training mode, see \n AutoMLAlgorithmConfig.

\n

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" + "smithy.api#documentation": "

Your Autopilot job trains a default set of algorithms on your dataset. For tabular and\n time-series data, you can customize the algorithm list by selecting a subset of algorithms\n for your problem type.

\n

\n AlgorithmsConfig stores the customized selection of algorithms to train on\n your data.

\n
    \n
  • \n

    \n For the tabular problem type TabularJobConfig,\n the list of available algorithms to choose from depends on the training mode set\n in \n AutoMLJobConfig.Mode\n .

    \n
      \n
    • \n

      \n AlgorithmsConfig should not be set when the training mode\n AutoMLJobConfig.Mode is set to AUTO.

      \n
    • \n
    • \n

      When AlgorithmsConfig is provided, one\n AutoMLAlgorithms attribute must be set and one only.

      \n

      If the list of algorithms provided as values for\n AutoMLAlgorithms is empty,\n CandidateGenerationConfig uses the full set of algorithms for\n the given training mode.

      \n
    • \n
    • \n

      When AlgorithmsConfig is not provided,\n CandidateGenerationConfig uses the full set of algorithms for\n the given training mode.

      \n
    • \n
    \n

    For the list of all algorithms per training mode, see \n AlgorithmConfig.

    \n

    For more information on each algorithm, see the Algorithm support section in the Autopilot developer guide.

    \n
  • \n
  • \n

    \n For the time-series forecasting problem type TimeSeriesForecastingJobConfig,\n choose your algorithms from the list provided in\n \n AlgorithmConfig.

    \n

    For more information on each algorithm, see the Algorithms support for time-series forecasting section in the Autopilot developer guide.

    \n
      \n
    • \n

      When AlgorithmsConfig is provided, one\n AutoMLAlgorithms attribute must be set and one only.

      \n

      If the list of algorithms provided as values for\n AutoMLAlgorithms is empty,\n CandidateGenerationConfig uses the full set of algorithms for\n time-series forecasting.

      \n
    • \n
    • \n

      When AlgorithmsConfig is not provided,\n CandidateGenerationConfig uses the full set of algorithms for\n time-series forecasting.

      \n
    • \n
    \n
  • \n
" } } }, @@ -6359,6 +6478,43 @@ "smithy.api#pattern": "^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:cluster/[a-z0-9]{12}$" } }, + "com.amazonaws.sagemaker#ClusterAvailabilityZone": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-z]{2}-[a-z]+-\\d[a-z]$" + } + }, + "com.amazonaws.sagemaker#ClusterAvailabilityZoneId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-z]{3}\\d-az\\d$" + } + }, + "com.amazonaws.sagemaker#ClusterEbsVolumeConfig": { + "type": "structure", + "members": { + "VolumeSizeInGB": { + "target": "com.amazonaws.sagemaker#ClusterEbsVolumeSizeInGB", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The size in gigabytes (GB) of the additional EBS volume to be attached to the instances\n in the SageMaker HyperPod cluster instance group. The additional EBS volume is attached to each\n instance within the SageMaker HyperPod cluster instance group and mounted to\n /opt/sagemaker.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines the configuration for attaching an additional Amazon Elastic Block Store (EBS)\n volume to each instance of the SageMaker HyperPod cluster instance group.

" + } + }, + "com.amazonaws.sagemaker#ClusterEbsVolumeSizeInGB": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 16384 + } + } + }, "com.amazonaws.sagemaker#ClusterInstanceCount": { "type": "integer", "traits": { @@ -6373,13 +6529,13 @@ "CurrentCount": { "target": "com.amazonaws.sagemaker#ClusterNonNegativeInstanceCount", "traits": { - "smithy.api#documentation": "

The number of instances that are currently in the instance group of a\n SageMaker HyperPod cluster.

" + "smithy.api#documentation": "

The number of instances that are currently in the instance group of a SageMaker HyperPod\n cluster.

" } }, "TargetCount": { "target": "com.amazonaws.sagemaker#ClusterInstanceCount", "traits": { - "smithy.api#documentation": "

The number of instances you specified to add to the instance group of a SageMaker HyperPod cluster.

" + "smithy.api#documentation": "

The number of instances you specified to add to the instance group of a SageMaker HyperPod\n cluster.

" } }, "InstanceGroupName": { @@ -6409,7 +6565,13 @@ "ThreadsPerCore": { "target": "com.amazonaws.sagemaker#ClusterThreadsPerCore", "traits": { - "smithy.api#documentation": "

The number you specified to TreadsPerCore in CreateCluster for\n enabling or disabling multithreading. For instance types that support multithreading, you\n can specify 1 for disabling multithreading and 2 for enabling multithreading. For more\n information, see the reference table of CPU cores and threads per CPU core per instance type in the Amazon Elastic Compute Cloud\n User Guide.

" + "smithy.api#documentation": "

The number you specified to TreadsPerCore in CreateCluster for\n enabling or disabling multithreading. For instance types that support multithreading, you\n can specify 1 for disabling multithreading and 2 for enabling multithreading. For more\n information, see the reference table of CPU cores and\n threads per CPU core per instance type in the Amazon Elastic Compute Cloud User\n Guide.

" + } + }, + "InstanceStorageConfigs": { + "target": "com.amazonaws.sagemaker#ClusterInstanceStorageConfigs", + "traits": { + "smithy.api#documentation": "

The additional storage configurations for the instances in the SageMaker HyperPod cluster instance\n group.

" } } }, @@ -6440,7 +6602,7 @@ "target": "com.amazonaws.sagemaker#ClusterInstanceCount", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Specifies the number of instances to add to the instance group of a SageMaker HyperPod cluster.

", + "smithy.api#documentation": "

Specifies the number of instances to add to the instance group of a SageMaker HyperPod\n cluster.

", "smithy.api#required": {} } }, @@ -6479,7 +6641,13 @@ "ThreadsPerCore": { "target": "com.amazonaws.sagemaker#ClusterThreadsPerCore", "traits": { - "smithy.api#documentation": "

Specifies the value for Threads per core. For instance types that\n support multithreading, you can specify 1 for disabling multithreading and\n 2 for enabling multithreading. For instance types that doesn't support\n multithreading, specify 1. For more information, see the reference table of\n CPU cores and threads per CPU core per instance type in the Amazon Elastic Compute Cloud\n User Guide.

" + "smithy.api#documentation": "

Specifies the value for Threads per core. For instance\n types that support multithreading, you can specify 1 for disabling\n multithreading and 2 for enabling multithreading. For instance types that\n doesn't support multithreading, specify 1. For more information, see the\n reference table of CPU cores and\n threads per CPU core per instance type in the Amazon Elastic Compute Cloud User\n Guide.

" + } + }, + "InstanceStorageConfigs": { + "target": "com.amazonaws.sagemaker#ClusterInstanceStorageConfigs", + "traits": { + "smithy.api#documentation": "

Specifies the additional storage configurations for the instances in the SageMaker HyperPod cluster\n instance group.

" } } }, @@ -6495,10 +6663,30 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 5 + "max": 20 } } }, + "com.amazonaws.sagemaker#ClusterInstancePlacement": { + "type": "structure", + "members": { + "AvailabilityZone": { + "target": "com.amazonaws.sagemaker#ClusterAvailabilityZone", + "traits": { + "smithy.api#documentation": "

The Availability Zone where the node in the SageMaker HyperPod cluster is launched.

" + } + }, + "AvailabilityZoneId": { + "target": "com.amazonaws.sagemaker#ClusterAvailabilityZoneId", + "traits": { + "smithy.api#documentation": "

The unique identifier (ID) of the Availability Zone where the node in the SageMaker HyperPod cluster\n is launched.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the placement details for the node in the SageMaker HyperPod cluster, including the\n Availability Zone and the unique identifier (ID) of the Availability Zone.

" + } + }, "com.amazonaws.sagemaker#ClusterInstanceStatus": { "type": "enum", "members": { @@ -6556,6 +6744,32 @@ "smithy.api#documentation": "

Details of an instance in a SageMaker HyperPod cluster.

" } }, + "com.amazonaws.sagemaker#ClusterInstanceStorageConfig": { + "type": "union", + "members": { + "EbsVolumeConfig": { + "target": "com.amazonaws.sagemaker#ClusterEbsVolumeConfig", + "traits": { + "smithy.api#documentation": "

Defines the configuration for attaching additional Amazon Elastic Block Store (EBS)\n volumes to the instances in the SageMaker HyperPod cluster instance group. The additional EBS volume is\n attached to each instance within the SageMaker HyperPod cluster instance group and mounted to\n /opt/sagemaker.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines the configuration for attaching additional storage to the instances in the\n SageMaker HyperPod cluster instance group.

" + } + }, + "com.amazonaws.sagemaker#ClusterInstanceStorageConfigs": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#ClusterInstanceStorageConfig" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1 + } + } + }, "com.amazonaws.sagemaker#ClusterInstanceType": { "type": "enum", "members": { @@ -6887,6 +7101,30 @@ "traits": { "smithy.api#documentation": "

The number of threads per CPU core you specified under\n CreateCluster.

" } + }, + "InstanceStorageConfigs": { + "target": "com.amazonaws.sagemaker#ClusterInstanceStorageConfigs", + "traits": { + "smithy.api#documentation": "

The configurations of additional storage specified to the instance group where the\n instance (node) is launched.

" + } + }, + "PrivatePrimaryIp": { + "target": "com.amazonaws.sagemaker#ClusterPrivatePrimaryIp", + "traits": { + "smithy.api#documentation": "

The private primary IP address of the SageMaker HyperPod cluster node.

" + } + }, + "PrivateDnsHostname": { + "target": "com.amazonaws.sagemaker#ClusterPrivateDnsHostname", + "traits": { + "smithy.api#documentation": "

The private DNS hostname of the SageMaker HyperPod cluster node.

" + } + }, + "Placement": { + "target": "com.amazonaws.sagemaker#ClusterInstancePlacement", + "traits": { + "smithy.api#documentation": "

The placement details of the SageMaker HyperPod cluster node.

" + } } }, "traits": { @@ -6900,7 +7138,7 @@ "min": 1, "max": 256 }, - "smithy.api#pattern": "^[a-zA-Z][-a-zA-Z0-9]*$" + "smithy.api#pattern": "^i-[a-f0-9]{8}(?:[a-f0-9]{9})?$" } }, "com.amazonaws.sagemaker#ClusterNodeSummaries": { @@ -6965,6 +7203,18 @@ } } }, + "com.amazonaws.sagemaker#ClusterPrivateDnsHostname": { + "type": "string", + "traits": { + "smithy.api#pattern": "^ip-((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)-?\\b){4}\\..*$" + } + }, + "com.amazonaws.sagemaker#ClusterPrivatePrimaryIp": { + "type": "string", + "traits": { + "smithy.api#pattern": "^((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)\\.?\\b){4}$" + } + }, "com.amazonaws.sagemaker#ClusterSortBy": { "type": "enum", "members": { @@ -8612,7 +8862,7 @@ "target": "com.amazonaws.sagemaker#AutoMLOutputDataConfig", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Provides information about encryption and the Amazon S3 output path needed to store artifacts\n from an AutoML job. Format(s) supported: CSV.

", + "smithy.api#documentation": "

Provides information about encryption and the Amazon S3 output path needed to\n store artifacts from an AutoML job. Format(s) supported: CSV.

", "smithy.api#required": {} } }, @@ -8724,7 +8974,7 @@ "target": "com.amazonaws.sagemaker#AutoMLOutputDataConfig", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Provides information about encryption and the Amazon S3 output path needed to store artifacts\n from an AutoML job.

", + "smithy.api#documentation": "

Provides information about encryption and the Amazon S3 output path needed to\n store artifacts from an AutoML job.

", "smithy.api#required": {} } }, @@ -8812,7 +9062,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a SageMaker HyperPod cluster. SageMaker HyperPod is a capability of SageMaker for creating and managing\n persistent clusters for developing large machine learning models, such as large language\n models (LLMs) and diffusion models. To learn more, see Amazon SageMaker HyperPod in the Amazon SageMaker Developer Guide.

" + "smithy.api#documentation": "

Creates a SageMaker HyperPod cluster. SageMaker HyperPod is a capability of SageMaker for creating and managing\n persistent clusters for developing large machine learning models, such as large language\n models (LLMs) and diffusion models. To learn more, see Amazon SageMaker HyperPod in the\n Amazon SageMaker Developer Guide.

" } }, "com.amazonaws.sagemaker#CreateClusterRequest": { @@ -8840,7 +9090,7 @@ "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { - "smithy.api#documentation": "

Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can\n add tags to your cluster in the same way you add them in other Amazon Web Services services\n that support tagging. To learn more about tagging Amazon Web Services resources in general,\n see Tagging Amazon Web Services Resources User Guide.

" + "smithy.api#documentation": "

Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can\n add tags to your cluster in the same way you add them in other Amazon Web Services services\n that support tagging. To learn more about tagging Amazon Web Services resources in general,\n see Tagging\n Amazon Web Services Resources User Guide.

" } } }, @@ -10072,7 +10322,96 @@ } ], "traits": { - "smithy.api#documentation": "

Create a hub.

\n \n

Hub APIs are only callable through SageMaker Studio.

\n
" + "smithy.api#documentation": "

Create a hub.

" + } + }, + "com.amazonaws.sagemaker#CreateHubContentReference": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#CreateHubContentReferenceRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#CreateHubContentReferenceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceInUse" + }, + { + "target": "com.amazonaws.sagemaker#ResourceLimitExceeded" + }, + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Create a hub content reference in order to add a model in the JumpStart public hub to a private hub.

" + } + }, + "com.amazonaws.sagemaker#CreateHubContentReferenceRequest": { + "type": "structure", + "members": { + "HubName": { + "target": "com.amazonaws.sagemaker#HubNameOrArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the hub to add the hub content reference to.

", + "smithy.api#required": {} + } + }, + "SageMakerPublicHubContentArn": { + "target": "com.amazonaws.sagemaker#SageMakerPublicHubContentArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ARN of the public hub content to reference.

", + "smithy.api#required": {} + } + }, + "HubContentName": { + "target": "com.amazonaws.sagemaker#HubContentName", + "traits": { + "smithy.api#documentation": "

The name of the hub content to reference.

" + } + }, + "MinVersion": { + "target": "com.amazonaws.sagemaker#HubContentVersion", + "traits": { + "smithy.api#documentation": "

The minimum version of the hub content to reference.

" + } + }, + "Tags": { + "target": "com.amazonaws.sagemaker#TagList", + "traits": { + "smithy.api#documentation": "

Any tags associated with the hub content to reference.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#CreateHubContentReferenceResponse": { + "type": "structure", + "members": { + "HubArn": { + "target": "com.amazonaws.sagemaker#HubArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ARN of the hub that the hub content reference was added to.

", + "smithy.api#required": {} + } + }, + "HubContentArn": { + "target": "com.amazonaws.sagemaker#HubContentArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ARN of the hub content.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.sagemaker#CreateHubRequest": { @@ -10913,6 +11252,99 @@ "smithy.api#output": {} } }, + "com.amazonaws.sagemaker#CreateMlflowTrackingServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#CreateMlflowTrackingServerRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#CreateMlflowTrackingServerResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceLimitExceeded" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an MLflow Tracking Server using a general purpose Amazon S3 bucket as the artifact\n store. For more information, see Create an MLflow Tracking\n Server.

" + } + }, + "com.amazonaws.sagemaker#CreateMlflowTrackingServerRequest": { + "type": "structure", + "members": { + "TrackingServerName": { + "target": "com.amazonaws.sagemaker#TrackingServerName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

A unique string identifying the tracking server name. This string is part of the tracking server\n ARN.

", + "smithy.api#required": {} + } + }, + "ArtifactStoreUri": { + "target": "com.amazonaws.sagemaker#S3Uri", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The S3 URI for a general purpose bucket to use as the MLflow Tracking Server artifact\n store.

", + "smithy.api#required": {} + } + }, + "TrackingServerSize": { + "target": "com.amazonaws.sagemaker#TrackingServerSize", + "traits": { + "smithy.api#documentation": "

The size of the tracking server you want to create. You can choose between\n \"Small\", \"Medium\", and \"Large\". The default MLflow\n Tracking Server configuration size is \"Small\". You can choose a size depending on\n the projected use of the tracking server such as the volume of data logged, number of users,\n and frequency of use.

\n

We recommend using a small tracking server for teams of up to 25 users, a medium tracking\n server for teams of up to 50 users, and a large tracking server for teams of up to 100 users.

" + } + }, + "MlflowVersion": { + "target": "com.amazonaws.sagemaker#MlflowVersion", + "traits": { + "smithy.api#documentation": "

The version of MLflow that the tracking server uses. To see which MLflow versions are\n available to use, see How it works.

" + } + }, + "RoleArn": { + "target": "com.amazonaws.sagemaker#RoleArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for an IAM role in your account that the MLflow Tracking Server uses to\n access the artifact store in Amazon S3. The role should have AmazonS3FullAccess\n permissions. For more information on IAM permissions for tracking server creation, see\n Set up IAM permissions for MLflow.

", + "smithy.api#required": {} + } + }, + "AutomaticModelRegistration": { + "target": "com.amazonaws.sagemaker#Boolean", + "traits": { + "smithy.api#documentation": "

Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. To enable automatic model registration, set this value to True. \n To disable automatic model registration, set this value to False. If not specified, AutomaticModelRegistration defaults to False.

" + } + }, + "WeeklyMaintenanceWindowStart": { + "target": "com.amazonaws.sagemaker#WeeklyMaintenanceWindowStart", + "traits": { + "smithy.api#documentation": "

The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. For example: TUE:03:30.

" + } + }, + "Tags": { + "target": "com.amazonaws.sagemaker#TagList", + "traits": { + "smithy.api#documentation": "

Tags consisting of key-value pairs used to manage metadata for the tracking server.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#CreateMlflowTrackingServerResponse": { + "type": "structure", + "members": { + "TrackingServerArn": { + "target": "com.amazonaws.sagemaker#TrackingServerArn", + "traits": { + "smithy.api#documentation": "

The ARN of the tracking server.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.sagemaker#CreateModel": { "type": "operation", "input": { @@ -11584,6 +12016,18 @@ "traits": { "smithy.api#documentation": "

The URI of the source for the model package. If you want to clone a model package,\n set it to the model package Amazon Resource Name (ARN). If you want to register a model,\n set it to the model ARN.

" } + }, + "SecurityConfig": { + "target": "com.amazonaws.sagemaker#ModelPackageSecurityConfig", + "traits": { + "smithy.api#documentation": "

The KMS Key ID (KMSKeyId) used for encryption of model package information.

" + } + }, + "ModelCard": { + "target": "com.amazonaws.sagemaker#ModelPackageModelCard", + "traits": { + "smithy.api#documentation": "

The model card associated with the model package. Since ModelPackageModelCard is\n tied to a model package, it is a specific usage of a model card and its schema is\n simplified compared to the schema of ModelCard. The \n ModelPackageModelCard schema does not include model_package_details,\n and model_overview is composed of the model_creator and\n model_artifact properties. For more information about the model package model\n card schema, see Model\n package model card schema. For more information about\n the model card associated with the model package, see View\n the Details of a Model Version.

" + } } }, "traits": { @@ -12168,6 +12612,65 @@ "smithy.api#output": {} } }, + "com.amazonaws.sagemaker#CreatePresignedMlflowTrackingServerUrl": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#CreatePresignedMlflowTrackingServerUrlRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#CreatePresignedMlflowTrackingServerUrlResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a presigned URL that you can use to connect to the MLflow UI attached to your\n tracking server. For more information, see Launch the MLflow UI using a presigned URL.

" + } + }, + "com.amazonaws.sagemaker#CreatePresignedMlflowTrackingServerUrlRequest": { + "type": "structure", + "members": { + "TrackingServerName": { + "target": "com.amazonaws.sagemaker#TrackingServerName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the tracking server to connect to your MLflow UI.

", + "smithy.api#required": {} + } + }, + "ExpiresInSeconds": { + "target": "com.amazonaws.sagemaker#ExpiresInSeconds", + "traits": { + "smithy.api#documentation": "

The duration in seconds that your presigned URL is valid. The presigned URL can be used\n only once.

" + } + }, + "SessionExpirationDurationInSeconds": { + "target": "com.amazonaws.sagemaker#SessionExpirationDurationInSeconds", + "traits": { + "smithy.api#documentation": "

The duration in seconds that your MLflow UI session is valid.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#CreatePresignedMlflowTrackingServerUrlResponse": { + "type": "structure", + "members": { + "AuthorizedUrl": { + "target": "com.amazonaws.sagemaker#TrackingServerUrl", + "traits": { + "smithy.api#documentation": "

A presigned URL with an authorization token.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.sagemaker#CreatePresignedNotebookInstanceUrl": { "type": "operation", "input": { @@ -12428,7 +12931,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a space used for real time collaboration in a domain.

" + "smithy.api#documentation": "

Creates a private space or a space used for real time collaboration in a domain.

" } }, "com.amazonaws.sagemaker#CreateSpaceRequest": { @@ -13286,6 +13789,12 @@ "smithy.api#documentation": "

Configures notification of workers regarding available or expiring work items.

" } }, + "WorkerAccessConfiguration": { + "target": "com.amazonaws.sagemaker#WorkerAccessConfiguration", + "traits": { + "smithy.api#documentation": "

Use this optional parameter to constrain access to an Amazon S3 resource based on the IP address using supported IAM global condition keys. The Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL.

" + } + }, "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { @@ -13389,7 +13898,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 2 + "max": 10 } } }, @@ -13401,7 +13910,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 1 + "max": 5 } } }, @@ -15004,7 +15513,7 @@ } ], "traits": { - "smithy.api#documentation": "

Delete a hub.

\n \n

Hub APIs are only callable through SageMaker Studio.

\n
" + "smithy.api#documentation": "

Delete a hub.

" } }, "com.amazonaws.sagemaker#DeleteHubContent": { @@ -15024,14 +15533,63 @@ } ], "traits": { - "smithy.api#documentation": "

Delete the contents of a hub.

\n \n

Hub APIs are only callable through SageMaker Studio.

\n
" + "smithy.api#documentation": "

Delete the contents of a hub.

" + } + }, + "com.amazonaws.sagemaker#DeleteHubContentReference": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#DeleteHubContentReferenceRequest" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Delete a hub content reference in order to remove a model from a private hub.

" + } + }, + "com.amazonaws.sagemaker#DeleteHubContentReferenceRequest": { + "type": "structure", + "members": { + "HubName": { + "target": "com.amazonaws.sagemaker#HubNameOrArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the hub to delete the hub content reference from.

", + "smithy.api#required": {} + } + }, + "HubContentType": { + "target": "com.amazonaws.sagemaker#HubContentType", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The type of hub content to delete.

", + "smithy.api#required": {} + } + }, + "HubContentName": { + "target": "com.amazonaws.sagemaker#HubContentName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the hub content to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.sagemaker#DeleteHubContentRequest": { "type": "structure", "members": { "HubName": { - "target": "com.amazonaws.sagemaker#HubName", + "target": "com.amazonaws.sagemaker#HubNameOrArn", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "

The name of the hub that you want to delete content in.

", @@ -15071,7 +15629,7 @@ "type": "structure", "members": { "HubName": { - "target": "com.amazonaws.sagemaker#HubName", + "target": "com.amazonaws.sagemaker#HubNameOrArn", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "

The name of the hub to delete.

", @@ -15329,6 +15887,53 @@ "smithy.api#output": {} } }, + "com.amazonaws.sagemaker#DeleteMlflowTrackingServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#DeleteMlflowTrackingServerRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#DeleteMlflowTrackingServerResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an MLflow Tracking Server. For more information, see Clean up MLflow resources.

" + } + }, + "com.amazonaws.sagemaker#DeleteMlflowTrackingServerRequest": { + "type": "structure", + "members": { + "TrackingServerName": { + "target": "com.amazonaws.sagemaker#TrackingServerName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the the tracking server to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#DeleteMlflowTrackingServerResponse": { + "type": "structure", + "members": { + "TrackingServerArn": { + "target": "com.amazonaws.sagemaker#TrackingServerArn", + "traits": { + "smithy.api#documentation": "

A TrackingServerArn object, the ARN of the tracking server that is deleted if\n successfully found.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.sagemaker#DeleteModel": { "type": "operation", "input": { @@ -16037,7 +16642,7 @@ "target": "com.amazonaws.sagemaker#DeleteWorkforceResponse" }, "traits": { - "smithy.api#documentation": "

Use this operation to delete a workforce.

\n

If you want to create a new workforce in an Amazon Web Services Region where\n a workforce already exists, use this operation to delete the \n existing workforce and then use CreateWorkforce\n to create a new workforce.

\n \n

If a private workforce contains one or more work teams, you must use \n the DeleteWorkteam\n operation to delete all work teams before you delete the workforce.\n If you try to delete a workforce that contains one or more work teams,\n you will recieve a ResourceInUse error.

\n
" + "smithy.api#documentation": "

Use this operation to delete a workforce.

\n

If you want to create a new workforce in an Amazon Web Services Region where\n a workforce already exists, use this operation to delete the \n existing workforce and then use CreateWorkforce\n to create a new workforce.

\n \n

If a private workforce contains one or more work teams, you must use \n the DeleteWorkteam\n operation to delete all work teams before you delete the workforce.\n If you try to delete a workforce that contains one or more work teams,\n you will receive a ResourceInUse error.

\n
" } }, "com.amazonaws.sagemaker#DeleteWorkforceRequest": { @@ -17029,7 +17634,7 @@ "BestCandidate": { "target": "com.amazonaws.sagemaker#AutoMLCandidate", "traits": { - "smithy.api#documentation": "

The best model candidate selected by SageMaker Autopilot using both the best objective metric and\n lowest InferenceLatency for\n an experiment.

" + "smithy.api#documentation": "

The best model candidate selected by SageMaker Autopilot using both the best\n objective metric and lowest InferenceLatency for\n an experiment.

" } }, "AutoMLJobStatus": { @@ -17155,7 +17760,7 @@ "target": "com.amazonaws.sagemaker#RoleArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ARN of the IAM role that has read permission to the input data location and\n write permission to the output data location in Amazon S3.

", + "smithy.api#documentation": "

The ARN of the IAM role that has read permission to the input data\n location and write permission to the output data location in Amazon S3.

", "smithy.api#required": {} } }, @@ -17263,7 +17868,7 @@ "SecurityConfig": { "target": "com.amazonaws.sagemaker#AutoMLSecurityConfig", "traits": { - "smithy.api#documentation": "

Returns the security configuration for traffic encryption or Amazon VPC settings.

" + "smithy.api#documentation": "

Returns the security configuration for traffic encryption or Amazon VPC\n settings.

" } } }, @@ -17302,7 +17907,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information of an instance (also called a node\n interchangeably) of a SageMaker HyperPod cluster.

" + "smithy.api#documentation": "

Retrieves information of a node (also called a instance\n interchangeably) of a SageMaker HyperPod cluster.

" } }, "com.amazonaws.sagemaker#DescribeClusterNodeRequest": { @@ -17312,7 +17917,7 @@ "target": "com.amazonaws.sagemaker#ClusterNameOrArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The string name or the Amazon Resource Name (ARN) of the SageMaker HyperPod cluster in which the instance is.

", + "smithy.api#documentation": "

The string name or the Amazon Resource Name (ARN) of the SageMaker HyperPod cluster in which the node is.

", "smithy.api#required": {} } }, @@ -17320,7 +17925,7 @@ "target": "com.amazonaws.sagemaker#ClusterNodeId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the instance.

", + "smithy.api#documentation": "

The ID of the SageMaker HyperPod cluster node.

", "smithy.api#required": {} } } @@ -17336,7 +17941,7 @@ "target": "com.amazonaws.sagemaker#ClusterNodeDetails", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The details of the instance.

", + "smithy.api#documentation": "

The details of the SageMaker HyperPod cluster node.

", "smithy.api#required": {} } } @@ -19337,7 +19942,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describe a hub.

\n \n

Hub APIs are only callable through SageMaker Studio.

\n
" + "smithy.api#documentation": "

Describes a hub.

" } }, "com.amazonaws.sagemaker#DescribeHubContent": { @@ -19354,14 +19959,14 @@ } ], "traits": { - "smithy.api#documentation": "

Describe the content of a hub.

\n \n

Hub APIs are only callable through SageMaker Studio.

\n
" + "smithy.api#documentation": "

Describe the content of a hub.

" } }, "com.amazonaws.sagemaker#DescribeHubContentRequest": { "type": "structure", "members": { "HubName": { - "target": "com.amazonaws.sagemaker#HubName", + "target": "com.amazonaws.sagemaker#HubNameOrArn", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "

The name of the hub that contains the content to describe.

", @@ -19480,6 +20085,24 @@ "smithy.api#required": {} } }, + "SageMakerPublicHubContentArn": { + "target": "com.amazonaws.sagemaker#SageMakerPublicHubContentArn", + "traits": { + "smithy.api#documentation": "

The ARN of the public hub content.

" + } + }, + "ReferenceMinVersion": { + "target": "com.amazonaws.sagemaker#ReferenceMinVersion", + "traits": { + "smithy.api#documentation": "

The minimum version of the hub content.

" + } + }, + "SupportStatus": { + "target": "com.amazonaws.sagemaker#HubContentSupportStatus", + "traits": { + "smithy.api#documentation": "

The support status of the hub content.

" + } + }, "HubContentSearchKeywords": { "target": "com.amazonaws.sagemaker#HubContentSearchKeywordList", "traits": { @@ -19523,7 +20146,7 @@ "type": "structure", "members": { "HubName": { - "target": "com.amazonaws.sagemaker#HubName", + "target": "com.amazonaws.sagemaker#HubNameOrArn", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "

The name of the hub to describe.

", @@ -20910,6 +21533,131 @@ "smithy.api#output": {} } }, + "com.amazonaws.sagemaker#DescribeMlflowTrackingServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#DescribeMlflowTrackingServerRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#DescribeMlflowTrackingServerResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Returns information about an MLflow Tracking Server.

" + } + }, + "com.amazonaws.sagemaker#DescribeMlflowTrackingServerRequest": { + "type": "structure", + "members": { + "TrackingServerName": { + "target": "com.amazonaws.sagemaker#TrackingServerName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the MLflow Tracking Server to describe.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#DescribeMlflowTrackingServerResponse": { + "type": "structure", + "members": { + "TrackingServerArn": { + "target": "com.amazonaws.sagemaker#TrackingServerArn", + "traits": { + "smithy.api#documentation": "

The ARN of the described tracking server.

" + } + }, + "TrackingServerName": { + "target": "com.amazonaws.sagemaker#TrackingServerName", + "traits": { + "smithy.api#documentation": "

The name of the described tracking server.

" + } + }, + "ArtifactStoreUri": { + "target": "com.amazonaws.sagemaker#S3Uri", + "traits": { + "smithy.api#documentation": "

The S3 URI of the general purpose bucket used as the MLflow Tracking Server\n artifact store.

" + } + }, + "TrackingServerSize": { + "target": "com.amazonaws.sagemaker#TrackingServerSize", + "traits": { + "smithy.api#documentation": "

The size of the described tracking server.

" + } + }, + "MlflowVersion": { + "target": "com.amazonaws.sagemaker#MlflowVersion", + "traits": { + "smithy.api#documentation": "

The MLflow version used for the described tracking server.

" + } + }, + "RoleArn": { + "target": "com.amazonaws.sagemaker#RoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for an IAM role in your account that the described MLflow Tracking Server\n uses to access the artifact store in Amazon S3.

" + } + }, + "TrackingServerStatus": { + "target": "com.amazonaws.sagemaker#TrackingServerStatus", + "traits": { + "smithy.api#documentation": "

The current creation status of the described MLflow Tracking Server.

" + } + }, + "IsActive": { + "target": "com.amazonaws.sagemaker#IsTrackingServerActive", + "traits": { + "smithy.api#documentation": "

Whether the described MLflow Tracking Server is currently active.

" + } + }, + "TrackingServerUrl": { + "target": "com.amazonaws.sagemaker#TrackingServerUrl", + "traits": { + "smithy.api#documentation": "

The URL to connect to the MLflow user interface for the described tracking server.

" + } + }, + "WeeklyMaintenanceWindowStart": { + "target": "com.amazonaws.sagemaker#WeeklyMaintenanceWindowStart", + "traits": { + "smithy.api#documentation": "

The day and time of the week when weekly maintenance occurs on the described tracking server.

" + } + }, + "AutomaticModelRegistration": { + "target": "com.amazonaws.sagemaker#Boolean", + "traits": { + "smithy.api#documentation": "

Whether automatic registration of new MLflow models to the SageMaker Model Registry is enabled.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the described MLflow Tracking Server was created.

" + } + }, + "CreatedBy": { + "target": "com.amazonaws.sagemaker#UserContext" + }, + "LastModifiedTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the described MLflow Tracking Server was last modified.

" + } + }, + "LastModifiedBy": { + "target": "com.amazonaws.sagemaker#UserContext" + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.sagemaker#DescribeModel": { "type": "operation", "input": { @@ -21497,7 +22245,7 @@ "target": "com.amazonaws.sagemaker#DescribeModelPackageOutput" }, "traits": { - "smithy.api#documentation": "

Returns a description of the specified model package, which is used to create SageMaker\n models or list them on Amazon Web Services Marketplace.

\n

To create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services\n Marketplace.

" + "smithy.api#documentation": "

Returns a description of the specified model package, which is used to create SageMaker\n models or list them on Amazon Web Services Marketplace.

\n \n

If you provided a KMS Key ID when you created your model package,\n you will see the KMS\n Decrypt API call in your CloudTrail logs when you use this API.

\n
\n

To create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services\n Marketplace.

" } }, "com.amazonaws.sagemaker#DescribeModelPackageGroup": { @@ -21762,6 +22510,18 @@ "traits": { "smithy.api#documentation": "

The URI of the source for the model package.

" } + }, + "SecurityConfig": { + "target": "com.amazonaws.sagemaker#ModelPackageSecurityConfig", + "traits": { + "smithy.api#documentation": "

The KMS Key ID (KMSKeyId) used for encryption of model package information.

" + } + }, + "ModelCard": { + "target": "com.amazonaws.sagemaker#ModelPackageModelCard", + "traits": { + "smithy.api#documentation": "

The model card associated with the model package. Since ModelPackageModelCard is\n tied to a model package, it is a specific usage of a model card and its schema is\n simplified compared to the schema of ModelCard. The \n ModelPackageModelCard schema does not include model_package_details,\n and model_overview is composed of the model_creator and\n model_artifact properties. For more information about the model package model\n card schema, see Model\n package model card schema. For more information about\n the model card associated with the model package, see View\n the Details of a Model Version.

" + } } }, "traits": { @@ -24156,7 +24916,7 @@ "target": "com.amazonaws.sagemaker#DescribeWorkteamResponse" }, "traits": { - "smithy.api#documentation": "

Gets information about a specific work team. You can see information such as the\n create date, the last updated date, membership information, and the work team's Amazon\n Resource Name (ARN).

" + "smithy.api#documentation": "

Gets information about a specific work team. You can see information such as the\n creation date, the last updated date, membership information, and the work team's Amazon\n Resource Name (ARN).

" } }, "com.amazonaws.sagemaker#DescribeWorkteamRequest": { @@ -26029,6 +26789,23 @@ "com.amazonaws.sagemaker#EnableSessionTagChaining": { "type": "boolean" }, + "com.amazonaws.sagemaker#EnabledOrDisabled": { + "type": "enum", + "members": { + "Enabled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Enabled" + } + }, + "Disabled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Disabled" + } + } + } + }, "com.amazonaws.sagemaker#Endpoint": { "type": "structure", "members": { @@ -28969,6 +29746,12 @@ "smithy.api#required": {} } }, + "SageMakerPublicHubContentArn": { + "target": "com.amazonaws.sagemaker#SageMakerPublicHubContentArn", + "traits": { + "smithy.api#documentation": "

The ARN of the public hub content.

" + } + }, "HubContentVersion": { "target": "com.amazonaws.sagemaker#HubContentVersion", "traits": { @@ -29005,6 +29788,12 @@ "smithy.api#documentation": "

A description of the hub content.

" } }, + "SupportStatus": { + "target": "com.amazonaws.sagemaker#HubContentSupportStatus", + "traits": { + "smithy.api#documentation": "

The support status of the hub content.

" + } + }, "HubContentSearchKeywords": { "target": "com.amazonaws.sagemaker#HubContentSearchKeywordList", "traits": { @@ -29026,6 +29815,12 @@ "smithy.api#documentation": "

The date and time that the hub content was created.

", "smithy.api#required": {} } + }, + "OriginalCreationTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

The date and time when the hub content was originally created, before any updates or revisions.

" + } } }, "traits": { @@ -29044,8 +29839,7 @@ "smithy.api#length": { "min": 0, "max": 65535 - }, - "smithy.api#pattern": ".*" + } } }, "com.amazonaws.sagemaker#HubContentName": { @@ -29128,6 +29922,23 @@ } } }, + "com.amazonaws.sagemaker#HubContentSupportStatus": { + "type": "enum", + "members": { + "SUPPORTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Supported" + } + }, + "DEPRECATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deprecated" + } + } + } + }, "com.amazonaws.sagemaker#HubContentType": { "type": "enum", "members": { @@ -29142,6 +29953,12 @@ "traits": { "smithy.api#enumValue": "Notebook" } + }, + "MODEL_REFERENCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ModelReference" + } } } }, @@ -29257,6 +30074,12 @@ "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" } }, + "com.amazonaws.sagemaker#HubNameOrArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub\\/)?[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + } + }, "com.amazonaws.sagemaker#HubS3StorageConfig": { "type": "structure", "members": { @@ -30759,6 +31582,26 @@ "smithy.api#documentation": "

The IAM Identity details associated with the user. These details are\n associated with model package groups, model packages and project entities only.

" } }, + "com.amazonaws.sagemaker#IamPolicyConstraints": { + "type": "structure", + "members": { + "SourceIp": { + "target": "com.amazonaws.sagemaker#EnabledOrDisabled", + "traits": { + "smithy.api#documentation": "

When SourceIp is Enabled the worker's IP address when a task is rendered in the worker portal is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. This IP address is checked by Amazon S3 and must match in order for the Amazon S3 resource to be rendered in the worker portal.

" + } + }, + "VpcSourceIp": { + "target": "com.amazonaws.sagemaker#EnabledOrDisabled", + "traits": { + "smithy.api#documentation": "

When VpcSourceIp is Enabled the worker's IP address when a task is rendered in private worker portal inside the VPC is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. To render the task successfully Amazon S3 checks that the presigned URL is being accessed over an Amazon S3 VPC Endpoint, and that the worker's IP address matches the IP address in the IAM policy. To learn more about configuring private worker portal, see Use Amazon VPC mode from a private worker portal.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Use this parameter to specify a supported global condition key that is added to the IAM policy.

" + } + }, "com.amazonaws.sagemaker#IdempotencyToken": { "type": "string", "traits": { @@ -31303,7 +32146,7 @@ } ], "traits": { - "smithy.api#documentation": "

Import hub content.

\n \n

Hub APIs are only callable through SageMaker Studio.

\n
" + "smithy.api#documentation": "

Import hub content.

" } }, "com.amazonaws.sagemaker#ImportHubContentRequest": { @@ -31340,7 +32183,7 @@ } }, "HubName": { - "target": "com.amazonaws.sagemaker#HubName", + "target": "com.amazonaws.sagemaker#HubNameOrArn", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "

The name of the hub to import content into.

", @@ -32073,6 +32916,22 @@ } } }, + "com.amazonaws.sagemaker#InferenceHubAccessConfig": { + "type": "structure", + "members": { + "HubContentArn": { + "target": "com.amazonaws.sagemaker#HubContentArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ARN of the hub content for which deployment access is allowed.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration information specifying which hub contents have accessible deployment options.

" + } + }, "com.amazonaws.sagemaker#InferenceImage": { "type": "string", "traits": { @@ -32118,9 +32977,7 @@ "Metrics": { "target": "com.amazonaws.sagemaker#RecommendationMetrics", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The metrics used to decide what recommendation to make.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The metrics used to decide what recommendation to make.

" } }, "EndpointConfiguration": { @@ -33510,6 +34367,54 @@ "traits": { "smithy.api#enumValue": "ml.r6id.32xlarge" } + }, + "ML_G6_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.xlarge" + } + }, + "ML_G6_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.2xlarge" + } + }, + "ML_G6_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.4xlarge" + } + }, + "ML_G6_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.8xlarge" + } + }, + "ML_G6_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.12xlarge" + } + }, + "ML_G6_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.16xlarge" + } + }, + "ML_G6_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.24xlarge" + } + }, + "ML_G6_48XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6.48xlarge" + } } } }, @@ -33620,6 +34525,23 @@ "smithy.api#pattern": "^arn:aws[a-z\\-]*:iam::\\d{12}:rolealias/?[a-zA-Z_0-9+=,.@\\-_/]+$" } }, + "com.amazonaws.sagemaker#IsTrackingServerActive": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Active" + } + }, + "INACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Inactive" + } + } + } + }, "com.amazonaws.sagemaker#ItemIdentifierAttributeName": { "type": "string", "traits": { @@ -35057,7 +35979,7 @@ "MaxResults": { "target": "com.amazonaws.sagemaker#MaxResults", "traits": { - "smithy.api#documentation": "

The total number of items to return in the response. If the total\n number of items available is more than the value specified, a NextToken\n is provided in the response. To resume pagination, provide the NextToken\n value in the as part of a subsequent call. The default value is 10.

" + "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken\n is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" } }, "SortOrder": { @@ -35560,14 +36482,14 @@ "target": "com.amazonaws.sagemaker#ClusterNameOrArn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The string name or the Amazon Resource Name (ARN) of the SageMaker HyperPod cluster in which you want to retrieve the list of nodes.

", + "smithy.api#documentation": "

The string name or the Amazon Resource Name (ARN) of the SageMaker HyperPod cluster in which you want to retrieve the\n list of nodes.

", "smithy.api#required": {} } }, "CreationTimeAfter": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

A filter that returns nodes in a SageMaker HyperPod cluster created after the specified time. Timestamps are\n formatted according to the ISO 8601 standard.

\n

Acceptable formats include:

\n
    \n
  • \n

    \n YYYY-MM-DDThh:mm:ss.sssTZD (UTC), for example,\n 2014-10-01T20:30:00.000Z\n

    \n
  • \n
  • \n

    \n YYYY-MM-DDThh:mm:ss.sssTZD (with offset), for example,\n 2014-10-01T12:30:00.000-08:00\n

    \n
  • \n
  • \n

    \n YYYY-MM-DD, for example, 2014-10-01\n

    \n
  • \n
  • \n

    Unix time in seconds, for example, 1412195400. This is also referred to as Unix\n Epoch time and represents the number of seconds since midnight, January 1, 1970\n UTC.

    \n
  • \n
\n

For more information about the timestamp format, see Timestamp in the Amazon Web Services Command Line Interface User\n Guide.

" + "smithy.api#documentation": "

A filter that returns nodes in a SageMaker HyperPod cluster created after the specified time.\n Timestamps are formatted according to the ISO 8601 standard.

\n

Acceptable formats include:

\n
    \n
  • \n

    \n YYYY-MM-DDThh:mm:ss.sssTZD (UTC), for example,\n 2014-10-01T20:30:00.000Z\n

    \n
  • \n
  • \n

    \n YYYY-MM-DDThh:mm:ss.sssTZD (with offset), for example,\n 2014-10-01T12:30:00.000-08:00\n

    \n
  • \n
  • \n

    \n YYYY-MM-DD, for example, 2014-10-01\n

    \n
  • \n
  • \n

    Unix time in seconds, for example, 1412195400. This is also referred\n to as Unix Epoch time and represents the number of seconds since midnight, January 1,\n 1970 UTC.

    \n
  • \n
\n

For more information about the timestamp format, see Timestamp in the Amazon Web Services Command Line Interface User\n Guide.

" } }, "CreationTimeBefore": { @@ -35659,7 +36581,7 @@ "CreationTimeBefore": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "

Set an end time for the time range during which you want to list SageMaker HyperPod clusters. A\n filter that returns nodes in a SageMaker HyperPod cluster created before the specified time. The acceptable\n formats are the same as the timestamp formats for CreationTimeAfter. For more\n information about the timestamp format, see Timestamp in the Amazon Web Services Command Line Interface User\n Guide.

" + "smithy.api#documentation": "

Set an end time for the time range during which you want to list SageMaker HyperPod clusters. A\n filter that returns nodes in a SageMaker HyperPod cluster created before the specified time. The\n acceptable formats are the same as the timestamp formats for\n CreationTimeAfter. For more information about the timestamp format, see Timestamp in the Amazon Web Services Command Line Interface User\n Guide.

" } }, "MaxResults": { @@ -36382,7 +37304,7 @@ "MaxResults": { "target": "com.amazonaws.sagemaker#MaxResults", "traits": { - "smithy.api#documentation": "

The total number of items to return in the response. If the total number of items\n available is more than the value specified, a NextToken is provided in the\n response. To resume pagination, provide the NextToken value in the as part of a\n subsequent call. The default value is 10.

" + "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken\n is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" } } }, @@ -37170,14 +38092,14 @@ } ], "traits": { - "smithy.api#documentation": "

List hub content versions.

\n \n

Hub APIs are only callable through SageMaker Studio.

\n
" + "smithy.api#documentation": "

List hub content versions.

" } }, "com.amazonaws.sagemaker#ListHubContentVersionsRequest": { "type": "structure", "members": { "HubName": { - "target": "com.amazonaws.sagemaker#HubName", + "target": "com.amazonaws.sagemaker#HubNameOrArn", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "

The name of the hub to list the content versions of.

", @@ -37289,14 +38211,14 @@ } ], "traits": { - "smithy.api#documentation": "

List the contents of a hub.

\n \n

Hub APIs are only callable through SageMaker Studio.

\n
" + "smithy.api#documentation": "

List the contents of a hub.

" } }, "com.amazonaws.sagemaker#ListHubContentsRequest": { "type": "structure", "members": { "HubName": { - "target": "com.amazonaws.sagemaker#HubName", + "target": "com.amazonaws.sagemaker#HubNameOrArn", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "

The name of the hub to list the contents of.

", @@ -37395,7 +38317,7 @@ "target": "com.amazonaws.sagemaker#ListHubsResponse" }, "traits": { - "smithy.api#documentation": "

List all existing hubs.

\n \n

Hub APIs are only callable through SageMaker Studio.

\n
" + "smithy.api#documentation": "

List all existing hubs.

" } }, "com.amazonaws.sagemaker#ListHubsRequest": { @@ -38649,6 +39571,100 @@ } } }, + "com.amazonaws.sagemaker#ListMlflowTrackingServers": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#ListMlflowTrackingServersRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#ListMlflowTrackingServersResponse" + }, + "traits": { + "smithy.api#documentation": "

Lists all MLflow Tracking Servers.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "TrackingServerSummaries", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.sagemaker#ListMlflowTrackingServersRequest": { + "type": "structure", + "members": { + "CreatedAfter": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

Use the CreatedAfter filter to only list tracking servers created after a\n specific date and time. Listed tracking servers are shown with a date and time such as\n \"2024-03-16T01:46:56+00:00\". The CreatedAfter parameter takes in a\n Unix timestamp. To convert a date and time into a Unix timestamp, see EpochConverter.

" + } + }, + "CreatedBefore": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

Use the CreatedBefore filter to only list tracking servers created before a\n specific date and time. Listed tracking servers are shown with a date and time such as\n \"2024-03-16T01:46:56+00:00\". The CreatedBefore parameter takes in\n a Unix timestamp. To convert a date and time into a Unix timestamp, see EpochConverter.

" + } + }, + "TrackingServerStatus": { + "target": "com.amazonaws.sagemaker#TrackingServerStatus", + "traits": { + "smithy.api#documentation": "

Filter for tracking servers with a specified creation status.

" + } + }, + "MlflowVersion": { + "target": "com.amazonaws.sagemaker#MlflowVersion", + "traits": { + "smithy.api#documentation": "

Filter for tracking servers using the specified MLflow version.

" + } + }, + "SortBy": { + "target": "com.amazonaws.sagemaker#SortTrackingServerBy", + "traits": { + "smithy.api#documentation": "

Filter for trackings servers sorting by name, creation time, or creation status.

" + } + }, + "SortOrder": { + "target": "com.amazonaws.sagemaker#SortOrder", + "traits": { + "smithy.api#documentation": "

Change the order of the listed tracking servers. By default, tracking servers are listed in Descending order by creation time. \n To change the list order, you can specify SortOrder to be Ascending.

" + } + }, + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.sagemaker#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of tracking servers to list.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#ListMlflowTrackingServersResponse": { + "type": "structure", + "members": { + "TrackingServerSummaries": { + "target": "com.amazonaws.sagemaker#TrackingServerSummaryList", + "traits": { + "smithy.api#documentation": "

A list of tracking servers according to chosen filters.

" + } + }, + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.sagemaker#ListModelBiasJobDefinitions": { "type": "operation", "input": { @@ -39278,6 +40294,12 @@ "traits": { "smithy.api#documentation": "

The sort order for results. The default is Ascending.

" } + }, + "CrossAccountFilterOption": { + "target": "com.amazonaws.sagemaker#CrossAccountFilterOption", + "traits": { + "smithy.api#documentation": "

A filter that returns either model groups shared with you or model groups in\n\t your own account. When the value is CrossAccount, the results show\n\t the resources made discoverable to you from other accounts. When the value is\n SameAccount or null, the results show resources from your\n \t account. The default is SameAccount.

" + } } }, "traits": { @@ -40908,7 +41930,7 @@ "MaxResults": { "target": "com.amazonaws.sagemaker#MaxResults", "traits": { - "smithy.api#documentation": "

The total number of items to return in the response. If the total number of items\n available is more than the value specified, a NextToken is provided in the\n response. To resume pagination, provide the NextToken value in the as part\n of a subsequent call. The default value is 10.

" + "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken\n is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" } }, "SortOrder": { @@ -41846,7 +42868,7 @@ "MaxResults": { "target": "com.amazonaws.sagemaker#MaxResults", "traits": { - "smithy.api#documentation": "

The total number of items to return in the response. If the total\n number of items available is more than the value specified, a NextToken\n is provided in the response. To resume pagination, provide the NextToken\n value in the as part of a subsequent call. The default value is 10.

" + "smithy.api#documentation": "

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are\n more results available than the value specified, a NextToken\n is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" } }, "SortOrder": { @@ -42615,6 +43637,16 @@ "smithy.api#pattern": "^1|2$" } }, + "com.amazonaws.sagemaker#MlflowVersion": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 16 + }, + "smithy.api#pattern": "^[0-9]*.[0-9]*.[0-9]*$" + } + }, "com.amazonaws.sagemaker#Model": { "type": "structure", "members": { @@ -42742,7 +43774,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides information about the location that is configured for storing model\n artifacts.

\n

Model artifacts are outputs that result from training a model. They typically consist\n of trained parameters, a model definition that describes how to compute inferences, and\n other metadata. A SageMaker container stores your trained model artifacts in the\n /opt/ml/model directory. After training has completed, by default, these artifacts\n are uploaded to your Amazon S3 bucket as compressed files.

" + "smithy.api#documentation": "

Provides information about the location that is configured for storing model\n artifacts.

\n

Model artifacts are outputs that result from training a model. They typically consist\n of trained parameters, a model definition that describes how to compute inferences, and\n other metadata. A SageMaker container stores your trained model artifacts in the\n /opt/ml/model directory. After training has completed, by default,\n these artifacts are uploaded to your Amazon S3 bucket as compressed files.

" } }, "com.amazonaws.sagemaker#ModelBiasAppSpecification": { @@ -44256,6 +45288,12 @@ "smithy.api#documentation": "

The URI of the source for the model package.

" } }, + "SecurityConfig": { + "target": "com.amazonaws.sagemaker#ModelPackageSecurityConfig" + }, + "ModelCard": { + "target": "com.amazonaws.sagemaker#ModelPackageModelCard" + }, "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { @@ -44578,6 +45616,42 @@ "target": "com.amazonaws.sagemaker#ModelPackageGroupSummary" } }, + "com.amazonaws.sagemaker#ModelPackageModelCard": { + "type": "structure", + "members": { + "ModelCardContent": { + "target": "com.amazonaws.sagemaker#ModelCardContent", + "traits": { + "smithy.api#documentation": "

The content of the model card. The content must follow the schema described\n in Model\n Package Model Card Schema.

" + } + }, + "ModelCardStatus": { + "target": "com.amazonaws.sagemaker#ModelCardStatus", + "traits": { + "smithy.api#documentation": "

The approval status of the model card within your organization. Different organizations might have different criteria for model card review and approval.

\n
    \n
  • \n

    \n Draft: The model card is a work in progress.

    \n
  • \n
  • \n

    \n PendingReview: The model card is pending review.

    \n
  • \n
  • \n

    \n Approved: The model card is approved.

    \n
  • \n
  • \n

    \n Archived: The model card is archived. No more updates can be made to the model\n card content. If you try to update the model card content, you will receive the message Model Card\n \t is in Archived state.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

The model card associated with the model package. Since ModelPackageModelCard is\n tied to a model package, it is a specific usage of a model card and its schema is\n simplified compared to the schema of ModelCard. The \n ModelPackageModelCard schema does not include model_package_details,\n and model_overview is composed of the model_creator and\n model_artifact properties. For more information about the model package model\n card schema, see Model\n package model card schema. For more information about\n the model card associated with the model package, see View\n the Details of a Model Version.

" + } + }, + "com.amazonaws.sagemaker#ModelPackageSecurityConfig": { + "type": "structure", + "members": { + "KmsKeyId": { + "target": "com.amazonaws.sagemaker#KmsKeyId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The KMS Key ID (KMSKeyId) used for encryption of model package information.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An optional Key Management Service\n key to encrypt, decrypt, and re-encrypt model package information for regulated workloads with\n highly sensitive data.

" + } + }, "com.amazonaws.sagemaker#ModelPackageSortBy": { "type": "enum", "members": { @@ -47279,6 +48353,18 @@ "smithy.api#documentation": "

The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce.

", "smithy.api#required": {} } + }, + "Scope": { + "target": "com.amazonaws.sagemaker#Scope", + "traits": { + "smithy.api#documentation": "

An array of string identifiers used to refer to the specific pieces of user data or claims that the client application wants to access.

" + } + }, + "AuthenticationRequestExtraParams": { + "target": "com.amazonaws.sagemaker#AuthenticationRequestExtraParams", + "traits": { + "smithy.api#documentation": "

A string to string map of identifiers specific to the custom identity provider (IdP) being used.

" + } } }, "traits": { @@ -47329,6 +48415,18 @@ "traits": { "smithy.api#documentation": "

The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce.

" } + }, + "Scope": { + "target": "com.amazonaws.sagemaker#Scope", + "traits": { + "smithy.api#documentation": "

An array of string identifiers used to refer to the specific pieces of user data or claims that the client application wants to access.

" + } + }, + "AuthenticationRequestExtraParams": { + "target": "com.amazonaws.sagemaker#AuthenticationRequestExtraParams", + "traits": { + "smithy.api#documentation": "

A string to string map of identifiers specific to the custom identity provider (IdP) being used.

" + } } }, "traits": { @@ -49908,6 +51006,12 @@ "traits": { "smithy.api#documentation": "

Settings that control how the endpoint routes incoming traffic to the instances that the\n endpoint hosts.

" } + }, + "InferenceAmiVersion": { + "target": "com.amazonaws.sagemaker#ProductionVariantInferenceAmiVersion", + "traits": { + "smithy.api#documentation": "

Specifies an option from a collection of preconfigured Amazon Machine Image (AMI)\n images. Each image is configured by Amazon Web Services with a set of software and driver\n versions. Amazon Web Services optimizes these configurations for different machine\n learning workloads.

\n

By selecting an AMI version, you can ensure that your inference environment is\n compatible with specific software requirements, such as CUDA driver versions, Linux\n kernel versions, or Amazon Web Services Neuron driver versions.

" + } } }, "traits": { @@ -49986,6 +51090,17 @@ "smithy.api#documentation": "

Specifies configuration for a core dump from the model container when the process\n crashes.

" } }, + "com.amazonaws.sagemaker#ProductionVariantInferenceAmiVersion": { + "type": "enum", + "members": { + "AL2_GPU_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "al2-ami-sagemaker-inference-gpu-2" + } + } + } + }, "com.amazonaws.sagemaker#ProductionVariantInstanceType": { "type": "enum", "members": { @@ -53042,33 +54157,25 @@ "CostPerHour": { "target": "com.amazonaws.sagemaker#Float", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Defines the cost per hour for the instance.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Defines the cost per hour for the instance.

" } }, "CostPerInference": { "target": "com.amazonaws.sagemaker#Float", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Defines the cost per inference for the instance .

", - "smithy.api#required": {} + "smithy.api#documentation": "

Defines the cost per inference for the instance .

" } }, "MaxInvocations": { "target": "com.amazonaws.sagemaker#Integer", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The expected maximum number of requests per minute for the instance.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The expected maximum number of requests per minute for the instance.

" } }, "ModelLatency": { "target": "com.amazonaws.sagemaker#Integer", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The expected model latency at maximum invocation per minute for the instance.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The expected model latency at maximum invocation per minute for the instance.

" } }, "CpuUtilization": { @@ -53321,6 +54428,16 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.sagemaker#ReferenceMinVersion": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 5, + "max": 14 + }, + "smithy.api#pattern": "^\\d{1,4}.\\d{1,4}.\\d{1,4}$" + } + }, "com.amazonaws.sagemaker#RegisterDevices": { "type": "operation", "input": { @@ -54428,6 +55545,12 @@ "traits": { "smithy.api#documentation": "

Specifies the access configuration file for the ML model. You can explicitly accept the\n model end-user license agreement (EULA) within the ModelAccessConfig. You are\n responsible for reviewing and complying with any applicable license terms and making sure\n they are acceptable for your use case before downloading or using a model.

" } + }, + "HubAccessConfig": { + "target": "com.amazonaws.sagemaker#InferenceHubAccessConfig", + "traits": { + "smithy.api#documentation": "

Configuration information for hub access.

" + } } }, "traits": { @@ -54471,6 +55594,20 @@ "smithy.api#pattern": "^(https|s3)://([^/]+)/?(.*)$" } }, + "com.amazonaws.sagemaker#S3Presign": { + "type": "structure", + "members": { + "IamPolicyConstraints": { + "target": "com.amazonaws.sagemaker#IamPolicyConstraints", + "traits": { + "smithy.api#documentation": "

Use this parameter to specify the allowed request source. Possible sources are either SourceIp or VpcSourceIp.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This object defines the access restrictions to Amazon S3 resources that are included in custom worker task templates using the Liquid filter, grant_read_access.

\n

To learn more about how custom templates are created, see Create custom worker task templates.

" + } + }, "com.amazonaws.sagemaker#S3StorageConfig": { "type": "structure", "members": { @@ -54594,6 +55731,9 @@ { "target": "com.amazonaws.sagemaker#CreateHub" }, + { + "target": "com.amazonaws.sagemaker#CreateHubContentReference" + }, { "target": "com.amazonaws.sagemaker#CreateHumanTaskUi" }, @@ -54618,6 +55758,9 @@ { "target": "com.amazonaws.sagemaker#CreateLabelingJob" }, + { + "target": "com.amazonaws.sagemaker#CreateMlflowTrackingServer" + }, { "target": "com.amazonaws.sagemaker#CreateModel" }, @@ -54657,6 +55800,9 @@ { "target": "com.amazonaws.sagemaker#CreatePresignedDomainUrl" }, + { + "target": "com.amazonaws.sagemaker#CreatePresignedMlflowTrackingServerUrl" + }, { "target": "com.amazonaws.sagemaker#CreatePresignedNotebookInstanceUrl" }, @@ -54759,6 +55905,9 @@ { "target": "com.amazonaws.sagemaker#DeleteHubContent" }, + { + "target": "com.amazonaws.sagemaker#DeleteHubContentReference" + }, { "target": "com.amazonaws.sagemaker#DeleteHumanTaskUi" }, @@ -54777,6 +55926,9 @@ { "target": "com.amazonaws.sagemaker#DeleteInferenceExperiment" }, + { + "target": "com.amazonaws.sagemaker#DeleteMlflowTrackingServer" + }, { "target": "com.amazonaws.sagemaker#DeleteModel" }, @@ -54948,6 +56100,9 @@ { "target": "com.amazonaws.sagemaker#DescribeLineageGroup" }, + { + "target": "com.amazonaws.sagemaker#DescribeMlflowTrackingServer" + }, { "target": "com.amazonaws.sagemaker#DescribeModel" }, @@ -55173,6 +56328,9 @@ { "target": "com.amazonaws.sagemaker#ListLineageGroups" }, + { + "target": "com.amazonaws.sagemaker#ListMlflowTrackingServers" + }, { "target": "com.amazonaws.sagemaker#ListModelBiasJobDefinitions" }, @@ -55311,6 +56469,9 @@ { "target": "com.amazonaws.sagemaker#StartInferenceExperiment" }, + { + "target": "com.amazonaws.sagemaker#StartMlflowTrackingServer" + }, { "target": "com.amazonaws.sagemaker#StartMonitoringSchedule" }, @@ -55344,6 +56505,9 @@ { "target": "com.amazonaws.sagemaker#StopLabelingJob" }, + { + "target": "com.amazonaws.sagemaker#StopMlflowTrackingServer" + }, { "target": "com.amazonaws.sagemaker#StopMonitoringSchedule" }, @@ -55425,6 +56589,9 @@ { "target": "com.amazonaws.sagemaker#UpdateInferenceExperiment" }, + { + "target": "com.amazonaws.sagemaker#UpdateMlflowTrackingServer" + }, { "target": "com.amazonaws.sagemaker#UpdateModelCard" }, @@ -56525,6 +57692,16 @@ "target": "com.amazonaws.sagemaker#SageMakerImageVersionAlias" } }, + "com.amazonaws.sagemaker#SageMakerPublicHubContentArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 255 + }, + "smithy.api#pattern": "^arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:aws:hub-content\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}\\/Model\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}$" + } + }, "com.amazonaws.sagemaker#SagemakerServicecatalogStatus": { "type": "enum", "members": { @@ -56687,6 +57864,16 @@ } } }, + "com.amazonaws.sagemaker#Scope": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#pattern": "^[!#-\\[\\]-~]+( [!#-\\[\\]-~]+)*$" + } + }, "com.amazonaws.sagemaker#Search": { "type": "operation", "input": { @@ -57424,12 +58611,12 @@ "EnableSessionTagChaining": { "target": "com.amazonaws.sagemaker#EnableSessionTagChaining", "traits": { - "smithy.api#documentation": "

Set to True to allow SageMaker to extract session tags from a \n training job creation role and reuse these tags when assuming the training \n job execution role.

" + "smithy.api#documentation": "

Set to True to allow SageMaker to extract session tags from a training job\n creation role and reuse these tags when assuming the training job execution role.

" } } }, "traits": { - "smithy.api#documentation": "

Contains information about attribute-based access control (ABAC) for a training job.\n The session chaining configuration uses Amazon Security Token Service (STS) for your\n training job to request temporary, limited-privilege credentials to tenants. For more\n information, see Attribute-based access control (ABAC) for multi-tenancy\n training.

" + "smithy.api#documentation": "

Contains information about attribute-based access control (ABAC) for a training job.\n The session chaining configuration uses Amazon Security Token Service (STS) for your training job to\n request temporary, limited-privilege credentials to tenants. For more information, see\n Attribute-based access control (ABAC) for multi-tenancy training.

" } }, "com.amazonaws.sagemaker#SessionExpirationDurationInSeconds": { @@ -57810,6 +58997,29 @@ } } }, + "com.amazonaws.sagemaker#SortTrackingServerBy": { + "type": "enum", + "members": { + "NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Name" + } + }, + "CREATION_TIME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CreationTime" + } + }, + "STATUS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Status" + } + } + } + }, "com.amazonaws.sagemaker#SortTrialComponentsBy": { "type": "enum", "members": { @@ -57913,7 +59123,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list of IP address ranges (CIDRs). Used to create an allow\n list of IP addresses for a private workforce. Workers will only be able to login to their worker portal from an \n IP address within this range. By default, a workforce isn't restricted to specific IP addresses.

" + "smithy.api#documentation": "

A list of IP address ranges (CIDRs). Used to create an allow\n list of IP addresses for a private workforce. Workers will only be able to log in to their worker portal from an\n IP address within this range. By default, a workforce isn't restricted to specific IP addresses.

" } }, "com.amazonaws.sagemaker#SourceType": { @@ -58436,6 +59646,56 @@ "smithy.api#output": {} } }, + "com.amazonaws.sagemaker#StartMlflowTrackingServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#StartMlflowTrackingServerRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#StartMlflowTrackingServerResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ConflictException" + }, + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Programmatically start an MLflow Tracking Server.

" + } + }, + "com.amazonaws.sagemaker#StartMlflowTrackingServerRequest": { + "type": "structure", + "members": { + "TrackingServerName": { + "target": "com.amazonaws.sagemaker#TrackingServerName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the tracking server to start.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#StartMlflowTrackingServerResponse": { + "type": "structure", + "members": { + "TrackingServerArn": { + "target": "com.amazonaws.sagemaker#TrackingServerArn", + "traits": { + "smithy.api#documentation": "

The ARN of the started tracking server.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.sagemaker#StartMonitoringSchedule": { "type": "operation", "input": { @@ -59020,6 +60280,56 @@ "smithy.api#input": {} } }, + "com.amazonaws.sagemaker#StopMlflowTrackingServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#StopMlflowTrackingServerRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#StopMlflowTrackingServerResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ConflictException" + }, + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Programmatically stop an MLflow Tracking Server.

" + } + }, + "com.amazonaws.sagemaker#StopMlflowTrackingServerRequest": { + "type": "structure", + "members": { + "TrackingServerName": { + "target": "com.amazonaws.sagemaker#TrackingServerName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the tracking server to stop.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#StopMlflowTrackingServerResponse": { + "type": "structure", + "members": { + "TrackingServerArn": { + "target": "com.amazonaws.sagemaker#TrackingServerArn", + "traits": { + "smithy.api#documentation": "

The ARN of the stopped tracking server.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.sagemaker#StopMonitoringSchedule": { "type": "operation", "input": { @@ -59570,7 +60880,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes a work team of a vendor that does the a labelling job.

" + "smithy.api#documentation": "

Describes a work team of a vendor that does the labelling job.

" } }, "com.amazonaws.sagemaker#SubscribedWorkteams": { @@ -59654,7 +60964,7 @@ "FeatureSpecificationS3Uri": { "target": "com.amazonaws.sagemaker#S3Uri", "traits": { - "smithy.api#documentation": "

A URL to the Amazon S3 data source containing selected features from the input data source to\n run an Autopilot job V2. You can input FeatureAttributeNames (optional) in JSON\n format as shown below:

\n

\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }.

\n

You can also specify the data type of the feature (optional) in the format shown\n below:

\n

\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }\n

\n \n

These column keys may not include the target column.

\n
\n

In ensembling mode, Autopilot only supports the following data types: numeric,\n categorical, text, and datetime. In HPO mode,\n Autopilot can support numeric, categorical, text,\n datetime, and sequence.

\n

If only FeatureDataTypes is provided, the column keys (col1,\n col2,..) should be a subset of the column names in the input data.

\n

If both FeatureDataTypes and FeatureAttributeNames are\n provided, then the column keys should be a subset of the column names provided in\n FeatureAttributeNames.

\n

The key name FeatureAttributeNames is fixed. The values listed in\n [\"col1\", \"col2\", ...] are case sensitive and should be a list of strings\n containing unique values that are a subset of the column names in the input data. The list\n of columns provided must not include the target column.

" + "smithy.api#documentation": "

A URL to the Amazon S3 data source containing selected features from the input\n data source to run an Autopilot job V2. You can input FeatureAttributeNames\n (optional) in JSON format as shown below:

\n

\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }.

\n

You can also specify the data type of the feature (optional) in the format shown\n below:

\n

\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }\n

\n \n

These column keys may not include the target column.

\n
\n

In ensembling mode, Autopilot only supports the following data types: numeric,\n categorical, text, and datetime. In HPO mode,\n Autopilot can support numeric, categorical, text,\n datetime, and sequence.

\n

If only FeatureDataTypes is provided, the column keys (col1,\n col2,..) should be a subset of the column names in the input data.

\n

If both FeatureDataTypes and FeatureAttributeNames are\n provided, then the column keys should be a subset of the column names provided in\n FeatureAttributeNames.

\n

The key name FeatureAttributeNames is fixed. The values listed in\n [\"col1\", \"col2\", ...] are case sensitive and should be a list of strings\n containing unique values that are a subset of the column names in the input data. The list\n of columns provided must not include the target column.

" } }, "Mode": { @@ -60573,7 +61883,7 @@ "FeatureSpecificationS3Uri": { "target": "com.amazonaws.sagemaker#S3Uri", "traits": { - "smithy.api#documentation": "

A URL to the Amazon S3 data source containing additional selected features that complement\n the target, itemID, timestamp, and grouped columns set in TimeSeriesConfig.\n When not provided, the AutoML job V2 includes all the columns from the original dataset\n that are not already declared in TimeSeriesConfig. If provided, the AutoML job\n V2 only considers these additional columns as a complement to the ones declared in\n TimeSeriesConfig.

\n

You can input FeatureAttributeNames (optional) in JSON format as shown\n below:

\n

\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }.

\n

You can also specify the data type of the feature (optional) in the format shown\n below:

\n

\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }\n

\n

Autopilot supports the following data types: numeric, categorical,\n text, and datetime.

\n \n

These column keys must not include any column set in\n TimeSeriesConfig.

\n
" + "smithy.api#documentation": "

A URL to the Amazon S3 data source containing additional selected features that\n complement the target, itemID, timestamp, and grouped columns set in\n TimeSeriesConfig. When not provided, the AutoML job V2 includes all the\n columns from the original dataset that are not already declared in\n TimeSeriesConfig. If provided, the AutoML job V2 only considers these\n additional columns as a complement to the ones declared in\n TimeSeriesConfig.

\n

You can input FeatureAttributeNames (optional) in JSON format as shown\n below:

\n

\n { \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }.

\n

You can also specify the data type of the feature (optional) in the format shown\n below:

\n

\n { \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }\n

\n

Autopilot supports the following data types: numeric, categorical,\n text, and datetime.

\n \n

These column keys must not include any column set in\n TimeSeriesConfig.

\n
" } }, "CompletionCriteria": { @@ -60620,6 +61930,9 @@ "traits": { "smithy.api#documentation": "

The collection of holiday featurization attributes used to incorporate national holiday\n information into your forecasting model.

" } + }, + "CandidateGenerationConfig": { + "target": "com.amazonaws.sagemaker#CandidateGenerationConfig" } }, "traits": { @@ -60678,6 +61991,227 @@ } } }, + "com.amazonaws.sagemaker#TrackingServerArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:mlflow-tracking-server/" + } + }, + "com.amazonaws.sagemaker#TrackingServerName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}$" + } + }, + "com.amazonaws.sagemaker#TrackingServerSize": { + "type": "enum", + "members": { + "S": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Small" + } + }, + "M": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Medium" + } + }, + "L": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Large" + } + } + } + }, + "com.amazonaws.sagemaker#TrackingServerStatus": { + "type": "enum", + "members": { + "CREATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Creating" + } + }, + "CREATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Created" + } + }, + "CREATE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CreateFailed" + } + }, + "UPDATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Updating" + } + }, + "UPDATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Updated" + } + }, + "UPDATE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UpdateFailed" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deleting" + } + }, + "DELETE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DeleteFailed" + } + }, + "STOPPING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Stopping" + } + }, + "STOPPED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Stopped" + } + }, + "STOP_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "StopFailed" + } + }, + "STARTING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Starting" + } + }, + "STARTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Started" + } + }, + "START_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "StartFailed" + } + }, + "MAINTENANCE_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MaintenanceInProgress" + } + }, + "MAINTENANCE_COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MaintenanceComplete" + } + }, + "MAINTENANCE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MaintenanceFailed" + } + } + } + }, + "com.amazonaws.sagemaker#TrackingServerSummary": { + "type": "structure", + "members": { + "TrackingServerArn": { + "target": "com.amazonaws.sagemaker#TrackingServerArn", + "traits": { + "smithy.api#documentation": "

The ARN of a listed tracking server.

" + } + }, + "TrackingServerName": { + "target": "com.amazonaws.sagemaker#TrackingServerName", + "traits": { + "smithy.api#documentation": "

The name of a listed tracking server.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

The creation time of a listed tracking server.

" + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

The last modified time of a listed tracking server.

" + } + }, + "TrackingServerStatus": { + "target": "com.amazonaws.sagemaker#TrackingServerStatus", + "traits": { + "smithy.api#documentation": "

The creation status of a listed tracking server.

" + } + }, + "IsActive": { + "target": "com.amazonaws.sagemaker#IsTrackingServerActive", + "traits": { + "smithy.api#documentation": "

The activity status of a listed tracking server.

" + } + }, + "MlflowVersion": { + "target": "com.amazonaws.sagemaker#MlflowVersion", + "traits": { + "smithy.api#documentation": "

The MLflow version used for a listed tracking server.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The summary of the tracking server to list.

" + } + }, + "com.amazonaws.sagemaker#TrackingServerSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#TrackingServerSummary" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.sagemaker#TrackingServerUrl": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + } + } + }, "com.amazonaws.sagemaker#TrafficDurationInSeconds": { "type": "integer", "traits": { @@ -64932,14 +66466,14 @@ } ], "traits": { - "smithy.api#documentation": "

Update a hub.

\n \n

Hub APIs are only callable through SageMaker Studio.

\n
" + "smithy.api#documentation": "

Update a hub.

" } }, "com.amazonaws.sagemaker#UpdateHubRequest": { "type": "structure", "members": { "HubName": { - "target": "com.amazonaws.sagemaker#HubName", + "target": "com.amazonaws.sagemaker#HubNameOrArn", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "

The name of the hub to update.

", @@ -65375,6 +66909,83 @@ "smithy.api#output": {} } }, + "com.amazonaws.sagemaker#UpdateMlflowTrackingServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#UpdateMlflowTrackingServerRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#UpdateMlflowTrackingServerResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ConflictException" + }, + { + "target": "com.amazonaws.sagemaker#ResourceLimitExceeded" + }, + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Updates properties of an existing MLflow Tracking Server.

" + } + }, + "com.amazonaws.sagemaker#UpdateMlflowTrackingServerRequest": { + "type": "structure", + "members": { + "TrackingServerName": { + "target": "com.amazonaws.sagemaker#TrackingServerName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the MLflow Tracking Server to update.

", + "smithy.api#required": {} + } + }, + "ArtifactStoreUri": { + "target": "com.amazonaws.sagemaker#S3Uri", + "traits": { + "smithy.api#documentation": "

The new S3 URI for the general purpose bucket to use as the artifact store for the MLflow\n Tracking Server.

" + } + }, + "TrackingServerSize": { + "target": "com.amazonaws.sagemaker#TrackingServerSize", + "traits": { + "smithy.api#documentation": "

The new size for the MLflow Tracking Server.

" + } + }, + "AutomaticModelRegistration": { + "target": "com.amazonaws.sagemaker#Boolean", + "traits": { + "smithy.api#documentation": "

Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. \n To enable automatic model registration, set this value to True. \n To disable automatic model registration, set this value to False. \n If not specified, AutomaticModelRegistration defaults to False\n

" + } + }, + "WeeklyMaintenanceWindowStart": { + "target": "com.amazonaws.sagemaker#WeeklyMaintenanceWindowStart", + "traits": { + "smithy.api#documentation": "

The new weekly maintenance window start day and time to update. The maintenance window day and time should be \n in Coordinated Universal Time (UTC) 24-hour standard time. For example: TUE:03:30.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemaker#UpdateMlflowTrackingServerResponse": { + "type": "structure", + "members": { + "TrackingServerArn": { + "target": "com.amazonaws.sagemaker#TrackingServerArn", + "traits": { + "smithy.api#documentation": "

The ARN of the updated MLflow Tracking Server.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.sagemaker#UpdateModelCard": { "type": "operation", "input": { @@ -65511,6 +67122,12 @@ "traits": { "smithy.api#documentation": "

The URI of the source for the model package.

" } + }, + "ModelCard": { + "target": "com.amazonaws.sagemaker#ModelPackageModelCard", + "traits": { + "smithy.api#documentation": "

The model card associated with the model package. Since ModelPackageModelCard is\n tied to a model package, it is a specific usage of a model card and its schema is\n simplified compared to the schema of ModelCard. The \n ModelPackageModelCard schema does not include model_package_details,\n and model_overview is composed of the model_creator and\n model_artifact properties. For more information about the model package model\n card schema, see Model\n package model card schema. For more information about\n the model card associated with the model package, see View\n the Details of a Model Version.

" + } } }, "traits": { @@ -66560,6 +68177,12 @@ "traits": { "smithy.api#documentation": "

Configures SNS topic notifications for available or expiring work items

" } + }, + "WorkerAccessConfiguration": { + "target": "com.amazonaws.sagemaker#WorkerAccessConfiguration", + "traits": { + "smithy.api#documentation": "

Use this optional parameter to constrain access to an Amazon S3 resource based on the IP address using supported IAM global condition keys. The Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL.

" + } } }, "traits": { @@ -67203,7 +68826,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 10 + "max": 20 } } }, @@ -67285,6 +68908,30 @@ "smithy.api#documentation": "

Status and billing information about the warm pool.

" } }, + "com.amazonaws.sagemaker#WeeklyMaintenanceWindowStart": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 9 + }, + "smithy.api#pattern": "^(Mon|Tue|Wed|Thu|Fri|Sat|Sun):([01]\\d|2[0-3]):([0-5]\\d)$" + } + }, + "com.amazonaws.sagemaker#WorkerAccessConfiguration": { + "type": "structure", + "members": { + "S3Presign": { + "target": "com.amazonaws.sagemaker#S3Presign", + "traits": { + "smithy.api#documentation": "

Defines any Amazon S3 resource constraints.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Use this optional parameter to constrain access to an Amazon S3 resource based on the IP address using supported IAM global condition keys. The Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL.

" + } + }, "com.amazonaws.sagemaker#Workforce": { "type": "structure", "members": { @@ -67484,7 +69131,7 @@ "SecurityGroupIds": { "target": "com.amazonaws.sagemaker#WorkforceSecurityGroupIds", "traits": { - "smithy.api#documentation": "

The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.

" + "smithy.api#documentation": "

The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.

" } }, "Subnets": { @@ -67652,6 +69299,12 @@ "traits": { "smithy.api#documentation": "

Configures SNS notifications of available or expiring work items for work\n teams.

" } + }, + "WorkerAccessConfiguration": { + "target": "com.amazonaws.sagemaker#WorkerAccessConfiguration", + "traits": { + "smithy.api#documentation": "

Describes any access constraints that have been defined for Amazon S3 resources.

" + } } }, "traits": { diff --git a/models/secrets-manager.json b/models/secrets-manager.json index 33564bef70..7a89a0443d 100644 --- a/models/secrets-manager.json +++ b/models/secrets-manager.json @@ -360,7 +360,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new secret. A secret can be a password, a set of \n credentials such as a user name and password, an OAuth token, or other secret information \n that you store in an encrypted form in Secrets Manager. The secret also \n includes the connection information to access a database or other service, which Secrets Manager \n doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the\n important information needed to manage the secret.

\n

For secrets that use managed rotation, you need to create the secret through the managing service. For more information, see Secrets Manager secrets managed by other Amazon Web Services services.\n\n

\n

For information about creating a secret in the console, see Create a secret.

\n

To create a secret, you can provide the secret value to be encrypted in either the\n SecretString parameter or the SecretBinary parameter, but not both. \n If you include SecretString or SecretBinary\n then Secrets Manager creates an initial secret version and automatically attaches the staging\n label AWSCURRENT to it.

\n

For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret,\n you must make sure the JSON you store in the SecretString matches the JSON structure of\n a database secret.

\n

If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key \n aws/secretsmanager. If this key \n doesn't already exist in your account, then Secrets Manager creates it for you automatically. All\n users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. \n Creating aws/secretsmanager can result in a one-time significant delay in returning the \n result.

\n

If the secret is in a different Amazon Web Services account from the credentials calling the API, then \n you can't use aws/secretsmanager to encrypt the secret, and you must create \n and use a customer managed KMS key.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:CreateSecret. If you \n include tags in the secret, you also need secretsmanager:TagResource.\n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager.

\n

To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key.

", + "smithy.api#documentation": "

Creates a new secret. A secret can be a password, a set of \n credentials such as a user name and password, an OAuth token, or other secret information \n that you store in an encrypted form in Secrets Manager. The secret also \n includes the connection information to access a database or other service, which Secrets Manager \n doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the\n important information needed to manage the secret.

\n

For secrets that use managed rotation, you need to create the secret through the managing service. For more information, see Secrets Manager secrets managed by other Amazon Web Services services.\n\n

\n

For information about creating a secret in the console, see Create a secret.

\n

To create a secret, you can provide the secret value to be encrypted in either the\n SecretString parameter or the SecretBinary parameter, but not both. \n If you include SecretString or SecretBinary\n then Secrets Manager creates an initial secret version and automatically attaches the staging\n label AWSCURRENT to it.

\n

For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret,\n you must make sure the JSON you store in the SecretString matches the JSON structure of\n a database secret.

\n

If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key \n aws/secretsmanager. If this key \n doesn't already exist in your account, then Secrets Manager creates it for you automatically. All\n users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. \n Creating aws/secretsmanager can result in a one-time significant delay in returning the \n result.

\n

If the secret is in a different Amazon Web Services account from the credentials calling the API, then \n you can't use aws/secretsmanager to encrypt the secret, and you must create \n and use a customer managed KMS key.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:CreateSecret. If you \n include tags in the secret, you also need secretsmanager:TagResource. To add replica Regions, you must also have secretsmanager:ReplicateSecretToRegions.\n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager.

\n

To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key.

", "smithy.api#examples": [ { "title": "To create a basic secret", @@ -412,13 +412,13 @@ "SecretBinary": { "target": "com.amazonaws.secretsmanager#SecretBinaryType", "traits": { - "smithy.api#documentation": "

The binary data to encrypt and store in the new version of\n the secret. We recommend that you store your binary data in a file and then pass the\n contents of the file as a parameter.

\n

Either SecretString or SecretBinary must have a value, but not\n both.

\n

This parameter is not available in the Secrets Manager console.

" + "smithy.api#documentation": "

The binary data to encrypt and store in the new version of\n the secret. We recommend that you store your binary data in a file and then pass the\n contents of the file as a parameter.

\n

Either SecretString or SecretBinary must have a value, but not\n both.

\n

This parameter is not available in the Secrets Manager console.

\n

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" } }, "SecretString": { "target": "com.amazonaws.secretsmanager#SecretStringType", "traits": { - "smithy.api#documentation": "

The text data to encrypt and store in this new version of\n the secret. We recommend you use a JSON structure of key/value pairs for your secret value.

\n

Either SecretString or SecretBinary must have a value, but not\n both.

\n

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected\n secret text in only the SecretString parameter. The Secrets Manager console stores the\n information as a JSON structure of key/value pairs that a Lambda rotation function can parse.

" + "smithy.api#documentation": "

The text data to encrypt and store in this new version of\n the secret. We recommend you use a JSON structure of key/value pairs for your secret value.

\n

Either SecretString or SecretBinary must have a value, but not\n both.

\n

If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected\n secret text in only the SecretString parameter. The Secrets Manager console stores the\n information as a JSON structure of key/value pairs that a Lambda rotation function can parse.

\n

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" } }, "Tags": { @@ -673,7 +673,22 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the details of a secret. It does not include the encrypted secret value. Secrets Manager\n only returns fields that have a value in the response.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:DescribeSecret. \n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager.

" + "smithy.api#documentation": "

Retrieves the details of a secret. It does not include the encrypted secret value. Secrets Manager\n only returns fields that have a value in the response.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:DescribeSecret. \n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager.

", + "smithy.test#smokeTests": [ + { + "id": "DescribeSecretFailure", + "params": { + "SecretId": "fake-secret-id" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.secretsmanager#DescribeSecretRequest": { @@ -722,7 +737,7 @@ "target": "com.amazonaws.secretsmanager#RotationEnabledType", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

Specifies whether automatic rotation is turned on for this secret.

\n

To turn on rotation, use RotateSecret. To turn off\n rotation, use CancelRotateSecret.

" + "smithy.api#documentation": "

Specifies whether automatic rotation is turned on for this secret. If the secret has never been configured for rotation, Secrets Manager returns null.

\n

To turn on rotation, use RotateSecret. To turn off\n rotation, use CancelRotateSecret.

" } }, "RotationLambdaARN": { @@ -1227,7 +1242,7 @@ "SecretId": { "target": "com.amazonaws.secretsmanager#SecretIdType", "traits": { - "smithy.api#documentation": "

The ARN or name of the secret to retrieve.

\n

For an ARN, we recommend that you specify a complete ARN rather \n than a partial ARN. See Finding a secret from a partial ARN.

", + "smithy.api#documentation": "

The ARN or name of the secret to retrieve. To retrieve a secret from another account, you must use an ARN.

\n

For an ARN, we recommend that you specify a complete ARN rather \n than a partial ARN. See Finding a secret from a partial ARN.

", "smithy.api#required": {} } }, @@ -1272,13 +1287,13 @@ "SecretBinary": { "target": "com.amazonaws.secretsmanager#SecretBinaryType", "traits": { - "smithy.api#documentation": "

The decrypted secret value, if the secret value was originally provided as\n binary data in the form of a byte array. When you retrieve a SecretBinary using the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not encoded.

\n

If the secret was created by using the Secrets Manager console, or if the secret value was \n originally provided as a string, then this field is omitted. The secret value appears in \n SecretString instead.

" + "smithy.api#documentation": "

The decrypted secret value, if the secret value was originally provided as\n binary data in the form of a byte array. When you retrieve a SecretBinary using the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not encoded.

\n

If the secret was created by using the Secrets Manager console, or if the secret value was \n originally provided as a string, then this field is omitted. The secret value appears in \n SecretString instead.

\n

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" } }, "SecretString": { "target": "com.amazonaws.secretsmanager#SecretStringType", "traits": { - "smithy.api#documentation": "

The decrypted secret value, if the secret value was originally provided as a string or \n through the Secrets Manager console.

\n

If this secret was created by using the console, then Secrets Manager stores the information as a\n JSON structure of key/value pairs.

" + "smithy.api#documentation": "

The decrypted secret value, if the secret value was originally provided as a string or \n through the Secrets Manager console.

\n

If this secret was created by using the console, then Secrets Manager stores the information as a\n JSON structure of key/value pairs.

\n

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" } }, "VersionStages": { @@ -1579,7 +1594,20 @@ "inputToken": "NextToken", "outputToken": "NextToken", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListSecretsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.secretsmanager#ListSecretsRequest": { @@ -1873,7 +1901,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new version with a new encrypted secret value and attaches it to the secret. The \n version can contain a new SecretString value or a new SecretBinary value.

\n

We recommend you avoid calling PutSecretValue at a sustained rate of more than \n once every 10 minutes. When you update the secret value, Secrets Manager creates a new version \n of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not \n remove versions created less than 24 hours ago. If you call PutSecretValue more \n than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach \n the quota for secret versions.

\n

You can specify the staging labels to attach to the new version in VersionStages. \n If you don't include VersionStages, then Secrets Manager automatically\n moves the staging label AWSCURRENT to this version. If this operation creates \n the first version for the secret, then Secrets Manager\n automatically attaches the staging label AWSCURRENT to it. \n If this operation moves the staging label AWSCURRENT from another version to this\n version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to\n the version that AWSCURRENT was removed from.

\n

This operation is idempotent. If you call this operation with a ClientRequestToken \n that matches an existing version's VersionId, and you specify the\n same secret data, the operation succeeds but does nothing. However, if the secret data is\n different, then the operation fails because you can't modify an existing version; you can\n only create new ones.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:PutSecretValue. \n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager.

", + "smithy.api#documentation": "

Creates a new version with a new encrypted secret value and attaches it to the secret. The \n version can contain a new SecretString value or a new SecretBinary value.

\n

We recommend you avoid calling PutSecretValue at a sustained rate of more than \n once every 10 minutes. When you update the secret value, Secrets Manager creates a new version \n of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not \n remove versions created less than 24 hours ago. If you call PutSecretValue more \n than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach \n the quota for secret versions.

\n

You can specify the staging labels to attach to the new version in VersionStages. \n If you don't include VersionStages, then Secrets Manager automatically\n moves the staging label AWSCURRENT to this version. If this operation creates \n the first version for the secret, then Secrets Manager\n automatically attaches the staging label AWSCURRENT to it. \n If this operation moves the staging label AWSCURRENT from another version to this\n version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to\n the version that AWSCURRENT was removed from.

\n

This operation is idempotent. If you call this operation with a ClientRequestToken \n that matches an existing version's VersionId, and you specify the\n same secret data, the operation succeeds but does nothing. However, if the secret data is\n different, then the operation fails because you can't modify an existing version; you can\n only create new ones.

\n

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

\n

\n Required permissions: \n secretsmanager:PutSecretValue. \n For more information, see \n IAM policy actions for Secrets Manager and Authentication \n and access control in Secrets Manager.

", "smithy.api#examples": [ { "title": "To store a secret value in a new version of a secret", @@ -1915,13 +1943,13 @@ "SecretBinary": { "target": "com.amazonaws.secretsmanager#SecretBinaryType", "traits": { - "smithy.api#documentation": "

The binary data to encrypt and store in the new version of\n the secret. To use this parameter in the command-line tools, we recommend that you store your\n binary data in a file and then pass the\n contents of the file as a parameter.

\n

You must include SecretBinary or SecretString, but not both.

\n

You can't access this value from the Secrets Manager console.

" + "smithy.api#documentation": "

The binary data to encrypt and store in the new version of\n the secret. To use this parameter in the command-line tools, we recommend that you store your\n binary data in a file and then pass the\n contents of the file as a parameter.

\n

You must include SecretBinary or SecretString, but not both.

\n

You can't access this value from the Secrets Manager console.

\n

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" } }, "SecretString": { "target": "com.amazonaws.secretsmanager#SecretStringType", "traits": { - "smithy.api#documentation": "

The text to encrypt and store in the new version of the secret.

\n

You must include SecretBinary or SecretString, but not both.

\n

We recommend you create the secret string as JSON key/value pairs, as shown in the example.

" + "smithy.api#documentation": "

The text to encrypt and store in the new version of the secret.

\n

You must include SecretBinary or SecretString, but not both.

\n

We recommend you create the secret string as JSON key/value pairs, as shown in the example.

\n

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" } }, "VersionStages": { @@ -1929,6 +1957,12 @@ "traits": { "smithy.api#documentation": "

A list of staging labels to attach to this version of the\n secret. Secrets Manager uses staging labels to track versions of a secret through the rotation process.

\n

If you specify a staging\n label that's already associated with a different version of the same secret, then Secrets Manager \n removes the label from the other version and attaches it to this version. \n If you specify \n AWSCURRENT, and it is already attached to another version, then Secrets Manager also \n moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

\n

If you don't include VersionStages, then Secrets Manager automatically\n moves the staging label AWSCURRENT to this version.

" } + }, + "RotationToken": { + "target": "com.amazonaws.secretsmanager#RotationTokenType", + "traits": { + "smithy.api#documentation": "

A unique identifier that indicates the source of the request. For cross-account rotation (when you rotate a secret in one account by using a Lambda rotation function in another account) and the Lambda rotation function assumes an IAM role to call Secrets Manager, Secrets Manager validates the identity with the rotation token. For more information, see How rotation works.

\n

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" + } } }, "traits": { @@ -2430,7 +2464,7 @@ "target": "com.amazonaws.secretsmanager#BooleanType", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. \n The rotation schedule is defined in RotateSecretRequest$RotationRules.

\n

For secrets that use a Lambda rotation function to rotate, if you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the \n \n testSecret \n step of the Lambda rotation function. The test creates an AWSPENDING version of the secret and then removes it.

\n

By default, Secrets Manager rotates the secret immediately.

" + "smithy.api#documentation": "

Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. \n The rotation schedule is defined in RotateSecretRequest$RotationRules.

\n

For secrets that use a Lambda rotation function to rotate, if you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the \n \n testSecret \n step of the Lambda rotation function. The test creates an AWSPENDING version of the secret and then removes it.

\n

By default, Secrets Manager rotates the secret immediately.

" } } }, @@ -2505,6 +2539,17 @@ "smithy.api#documentation": "

A structure that defines the rotation configuration for the secret.

" } }, + "com.amazonaws.secretsmanager#RotationTokenType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\-]+$", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.secretsmanager#ScheduleExpressionType": { "type": "string", "traits": { @@ -3231,13 +3276,13 @@ "SecretBinary": { "target": "com.amazonaws.secretsmanager#SecretBinaryType", "traits": { - "smithy.api#documentation": "

The binary data to encrypt and store in the new\n version of the secret. We recommend that you\n store your binary data in a file and then pass\n the contents of the file as a parameter.

\n

Either SecretBinary or\n SecretString must have a value, but not both.

\n

You can't access this parameter in the Secrets Manager console.

" + "smithy.api#documentation": "

The binary data to encrypt and store in the new\n version of the secret. We recommend that you\n store your binary data in a file and then pass\n the contents of the file as a parameter.

\n

Either SecretBinary or\n SecretString must have a value, but not both.

\n

You can't access this parameter in the Secrets Manager console.

\n

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" } }, "SecretString": { "target": "com.amazonaws.secretsmanager#SecretStringType", "traits": { - "smithy.api#documentation": "

The text data to encrypt and store in the new\n version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value.

\n

Either SecretBinary or SecretString must have\n a value, but not both.

" + "smithy.api#documentation": "

The text data to encrypt and store in the new\n version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value.

\n

Either SecretBinary or SecretString must have\n a value, but not both.

\n

Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.

" } } }, @@ -3445,7 +3490,7 @@ "SecretId": { "target": "com.amazonaws.secretsmanager#SecretIdType", "traits": { - "smithy.api#documentation": "

This field is reserved for internal use.

" + "smithy.api#documentation": "

The ARN or name of the secret with the resource-based policy you want to validate.

" } }, "ResourcePolicy": { diff --git a/models/securityhub.json b/models/securityhub.json index 7a5383ff17..e85e0d7628 100644 --- a/models/securityhub.json +++ b/models/securityhub.json @@ -307,7 +307,7 @@ "PortName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The port name of the local connection.

" + "smithy.api#documentation": "

The port name of the local connection.

\n

Length Constraints: 128.

" } } }, @@ -365,7 +365,7 @@ "PortName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The port name of the remote connection.

" + "smithy.api#documentation": "

The port name of the remote connection.

\n

Length Constraints: 128.

" } } }, @@ -1494,13 +1494,13 @@ "Api": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The name of the API method that was issued.

" + "smithy.api#documentation": "

The name of the API method that was issued.

\n

Length Constraints: 128.

" } }, "ServiceName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The name of the Amazon Web Services service that the API method belongs to.

" + "smithy.api#documentation": "

The name of the Amazon Web Services service that the API method belongs to.

\n

Length Constraints: 128.

" } }, "CallerType": { @@ -1550,7 +1550,7 @@ "Domain": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The name of the DNS domain that issued the API call.

" + "smithy.api#documentation": "

The name of the DNS domain that issued the API call.

\n

Length Constraints: 128.

" } } }, @@ -18209,7 +18209,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The schema version that a finding is formatted for.

", + "smithy.api#documentation": "

The schema version that a finding is formatted for. The value is 2018-10-08.

", "smithy.api#required": {} } }, @@ -18217,7 +18217,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The security findings provider-specific identifier for a finding.

", + "smithy.api#documentation": "

The security findings provider-specific identifier for a finding.

\n

Length Constraints: Minimum length of 1. Maximum length of 512.

", "smithy.api#required": {} } }, @@ -18225,33 +18225,33 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ARN generated by Security Hub that uniquely identifies a product that generates findings.\n This can be the ARN for a third-party product that is integrated with Security Hub, or the ARN for\n a custom integration.

", + "smithy.api#documentation": "

The ARN generated by Security Hub that uniquely identifies a product that generates findings.\n This can be the ARN for a third-party product that is integrated with Security Hub, or the ARN for\n a custom integration.

\n

Length Constraints: Minimum length of 12. Maximum length of 2048.

", "smithy.api#required": {} } }, "ProductName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The name of the product that generated the finding.

\n

Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration.

\n

When you use the Security Hub console or API to filter findings by product name, you use this attribute.

" + "smithy.api#documentation": "

The name of the product that generated the finding.

\n

Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration.

\n

When you use the Security Hub console or API to filter findings by product name, you use this attribute.

\n

Length Constraints: Minimum length of 1. Maximum length of 128.

" } }, "CompanyName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The name of the company for the product that generated the finding.

\n

Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration.

\n

When you use the Security Hub console or API to filter findings by company name, you use this attribute.

" + "smithy.api#documentation": "

The name of the company for the product that generated the finding.

\n

Security Hub populates this attribute automatically for each finding. You cannot update this attribute with BatchImportFindings or BatchUpdateFindings. The exception to this is a custom integration.

\n

When you use the Security Hub console or API to filter findings by company name, you use this attribute.

\n

Length Constraints: Minimum length of 1. Maximum length of 128.\n

" } }, "Region": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The Region from which the finding was generated.

\n

Security Hub populates this attribute automatically for each finding. You cannot update it using BatchImportFindings or BatchUpdateFindings.

" + "smithy.api#documentation": "

The Region from which the finding was generated.

\n

Security Hub populates this attribute automatically for each finding. You cannot update it using BatchImportFindings or BatchUpdateFindings.

\n

Length Constraints: Minimum length of 1. Maximum length of 16.\n

" } }, "GeneratorId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The identifier for the solution-specific component (a discrete unit of logic) that\n generated a finding. In various security findings providers' solutions, this generator can\n be called a rule, a check, a detector, a plugin, etc.

", + "smithy.api#documentation": "

The identifier for the solution-specific component (a discrete unit of logic) that\n generated a finding. In various security findings providers' solutions, this generator can\n be called a rule, a check, a detector, a plugin, or something else.

\n

Length Constraints: Minimum length of 1. Maximum length of 512.

", "smithy.api#required": {} } }, @@ -18259,14 +18259,14 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Web Services account ID that a finding is generated in.

", + "smithy.api#documentation": "

The Amazon Web Services account ID that a finding is generated in.

\n

Length Constraints: 12.

", "smithy.api#required": {} } }, "Types": { "target": "com.amazonaws.securityhub#TypeList", "traits": { - "smithy.api#documentation": "

One or more finding types in the format of namespace/category/classifier\n that classify a finding.

\n

Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual\n Behaviors | Sensitive Data Identifications

" + "smithy.api#documentation": "

One or more finding types in the format of namespace/category/classifier\n that classify a finding.

\n

Valid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual\n Behaviors | Sensitive Data Identifications

\n

Array Members: Maximum number of 50 items.

" } }, "FirstObservedAt": { @@ -18319,7 +18319,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A finding's title.

\n \n

In this release, Title is a required property.

\n
", + "smithy.api#documentation": "

A finding's title. Title is a required property.

\n

Length Constraints: Minimum length of 1. Maximum length of 256.

", "smithy.api#required": {} } }, @@ -18327,7 +18327,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A finding's description.

\n \n

In this release, Description is a required property.

\n
", + "smithy.api#documentation": "

A finding's description. Description is a required property.

\n

Length Constraints: Minimum length of 1. Maximum length of 1024.

", "smithy.api#required": {} } }, @@ -18352,13 +18352,13 @@ "UserDefinedFields": { "target": "com.amazonaws.securityhub#FieldMap", "traits": { - "smithy.api#documentation": "

A list of name/value string pairs associated with the finding. These are custom,\n user-defined fields added to a finding.

" + "smithy.api#documentation": "

A list of name/value string pairs associated with the finding. These are custom,\n user-defined fields added to a finding.

\n

Can contain up to 50 key-value pairs. For each key-value pair, the key can contain up to 128 characters, and the value can contain up to 1024 characters.

" } }, "Malware": { "target": "com.amazonaws.securityhub#MalwareList", "traits": { - "smithy.api#documentation": "

A list of malware related to a finding.

" + "smithy.api#documentation": "

A list of malware related to a finding.

\n

Array Members: Maximum number of 5 items.

" } }, "Network": { @@ -18382,20 +18382,20 @@ "Threats": { "target": "com.amazonaws.securityhub#ThreatList", "traits": { - "smithy.api#documentation": "

Details about the threat detected in a security finding and the file paths that were affected by the threat.\n

" + "smithy.api#documentation": "

Details about the threat detected in a security finding and the file paths that were affected by the threat.\n

\n

Array Members: Minimum number of 1 item. Maximum number of 32 items.

" } }, "ThreatIntelIndicators": { "target": "com.amazonaws.securityhub#ThreatIntelIndicatorList", "traits": { - "smithy.api#documentation": "

Threat intelligence details related to a finding.

" + "smithy.api#documentation": "

Threat intelligence details related to a finding.

\n

Array Members: Minimum number of 1 item. Maximum number of 5 items.

" } }, "Resources": { "target": "com.amazonaws.securityhub#ResourceList", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A set of resource data types that describe the resources that the finding refers\n to.

", + "smithy.api#documentation": "

A set of resource data types that describe the resources that the finding refers\n to.

\n

Array Members: Minimum number of 1 item. Maximum number of 32 items.

", "smithy.api#required": {} } }, @@ -18432,7 +18432,7 @@ "RelatedFindings": { "target": "com.amazonaws.securityhub#RelatedFindingList", "traits": { - "smithy.api#documentation": "

A list of related findings.

" + "smithy.api#documentation": "

A list of related findings.

\n

Array Members: Minimum number of 1 item. Maximum number of 10 items.

" } }, "Note": { @@ -18480,13 +18480,13 @@ "ProcessedAt": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

A imestamp that indicates when Security Hub received a finding and begins to process it.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" + "smithy.api#documentation": "

A timestamp that indicates when Security Hub received a finding and begins to process it.

\n

This field accepts only the specified formats. Timestamps \ncan end with Z or (\"+\" / \"-\") time-hour [\":\" time-minute]. The time-secfrac after seconds is limited \nto a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:

\n
    \n
  • \n

    \n YYYY-MM-DDTHH:MM:SSZ (for example, 2019-01-31T23:00:00Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ (for example, 2019-01-31T23:00:00.123456789Z)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS+HH:MM (for example, 2024-01-04T15:25:10+17:59)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS-HHMM (for example, 2024-01-04T15:25:10-1759)

    \n
  • \n
  • \n

    \n YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM (for example, 2024-01-04T15:25:10.123456789+17:59)

    \n
  • \n
" } }, "AwsAccountName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The name of the Amazon Web Services account from which a finding was generated.\n

" + "smithy.api#documentation": "

The name of the Amazon Web Services account from which a finding was generated.\n

\n

Length Constraints: Minimum length of 1. Maximum length of 50.\n

" } } }, @@ -22180,13 +22180,13 @@ "Status": { "target": "com.amazonaws.securityhub#ComplianceStatus", "traits": { - "smithy.api#documentation": "

The result of a standards check.

\n

The valid values for Status are as follows.

\n
    \n
  • \n
      \n
    • \n

      \n PASSED - Standards check passed for all evaluated\n resources.

      \n
    • \n
    • \n

      \n WARNING - Some information is missing or this check is not\n supported for your configuration.

      \n
    • \n
    • \n

      \n FAILED - Standards check failed for at least one evaluated\n resource.

      \n
    • \n
    • \n

      \n NOT_AVAILABLE - Check could not be performed due to a service\n outage, API error, or because the result of the Config evaluation was\n NOT_APPLICABLE. If the Config evaluation result was\n NOT_APPLICABLE, then after 3 days, Security Hub automatically archives\n the finding.

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The result of a standards check.

\n

The valid values for Status are as follows.

\n
    \n
  • \n
      \n
    • \n

      \n PASSED - Standards check passed for all evaluated\n resources.

      \n
    • \n
    • \n

      \n WARNING - Some information is missing or this check is not\n supported for your configuration.

      \n
    • \n
    • \n

      \n FAILED - Standards check failed for at least one evaluated\n resource.

      \n
    • \n
    • \n

      \n NOT_AVAILABLE - Check could not be performed due to a service\n outage, API error, or because the result of the Config evaluation was\n NOT_APPLICABLE. If the Config evaluation result was\n NOT_APPLICABLE for a Security Hub control, Security Hub automatically archives\n the finding after 3 days.

      \n
    • \n
    \n
  • \n
" } }, "RelatedRequirements": { "target": "com.amazonaws.securityhub#RelatedRequirementsList", "traits": { - "smithy.api#documentation": "

For a control, the industry or regulatory framework requirements that are related to the\n control. The check for that control is aligned with these requirements.

" + "smithy.api#documentation": "

For a control, the industry or regulatory framework requirements that are related to the\n control. The check for that control is aligned with these requirements.

\n

Array Members: Maximum number of 32 items.

" } }, "StatusReasons": { @@ -25042,13 +25042,13 @@ "Domain": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The DNS domain that is associated with the DNS request.

" + "smithy.api#documentation": "

The DNS domain that is associated with the DNS request.

\n

Length Constraints: 128.

" } }, "Protocol": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The protocol that was used for the DNS request.

" + "smithy.api#documentation": "

The protocol that was used for the DNS request.

\n

Length Constraints: Minimum length of 1. Maximum length of 64.

" } }, "Blocked": { @@ -25391,25 +25391,25 @@ "FilePath": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Path to the infected or suspicious file on the resource it was detected on.\n\t\t

" + "smithy.api#documentation": "

Path to the infected or suspicious file on the resource it was detected on.\n\t\t

\n

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" } }, "FileName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The name of the infected or suspicious file corresponding to the hash.\n\t\t

" + "smithy.api#documentation": "

The name of the infected or suspicious file corresponding to the hash.\n\t\t

\n

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" } }, "ResourceId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource on which the threat was detected.\n\t\t

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource on which the threat was detected.\n\t\t

\n

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" } }, "Hash": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The hash value for the infected or suspicious file.\n\t\t

" + "smithy.api#documentation": "

The hash value for the infected or suspicious file.\n\t\t

\n

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" } } }, @@ -25588,7 +25588,7 @@ } }, "traits": { - "smithy.api#documentation": "

In a BatchImportFindings request, finding providers use FindingProviderFields to provide and update values for confidence, criticality, related findings, severity, and types.

" + "smithy.api#documentation": "

In a \n BatchImportFindings\n request, finding providers use FindingProviderFields to provide \n and update values for the following fields:

\n
    \n
  • \n

    \n Confidence\n

    \n
  • \n
  • \n

    \n Criticality\n

    \n
  • \n
  • \n

    \n RelatedFindings\n

    \n
  • \n
  • \n

    \n Severity\n

    \n
  • \n
  • \n

    \n Types\n

    \n
  • \n
\n

The preceding fields are nested under the FindingProviderFields object, but also have analogues of the same name \n as top-level ASFF fields. When a new finding is sent to Security Hub by a finding provider, Security Hub populates the \n FindingProviderFields object automatically, if it is empty, based on the corresponding top-level fields.

\n

Finding providers can update FindingProviderFields only by using the BatchImportFindings \n operation. Finding providers can't update\n this object with the \n BatchUpdateFindings\n operation. Customers can update the top-level fields by using the BatchUpdateFindings operation. Customers can't \n update FindingProviderFields.

\n

For information about how Security Hub handles updates from BatchImportFindings to\n FindingProviderFields and to the corresponding top-level\n attributes, see Using FindingProviderFields\n in the Security Hub User Guide.

\n

" } }, "com.amazonaws.securityhub#FindingProviderSeverity": { @@ -25603,12 +25603,12 @@ "Original": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The finding provider's original value for the severity.

" + "smithy.api#documentation": "

The finding provider's original value for the severity.

\n

Length Constraints: Minimum length of 1. Maximum length of 64.

" } } }, "traits": { - "smithy.api#documentation": "

The severity assigned to the finding by the finding provider.

" + "smithy.api#documentation": "

The severity assigned to a finding by the finding provider. This object may include one or more of the following \nattributes:

\n
    \n
  • \n

    \n Label\n

    \n
  • \n
  • \n

    \n Normalized\n

    \n
  • \n
  • \n

    \n Original\n

    \n
  • \n
  • \n

    \n Product\n

    \n
  • \n
\n

If a \n BatchImportFindings\n request for a \n new finding only provides Label or only provides Normalized, Security Hub \n automatically populates the value of the other field.

\n

The Normalized and Product attributes are included in the FindingProviderSeverity \n structure to preserve the historical information associated with the finding, even if the top-level \n Severity object is later modified using the \n BatchUpdateFindings\n operation.

\n

If the top-level Finding.Severity object is present, but Finding.FindingProviderFields isn't present, \n Security Hub creates the FindingProviderFields.Severity object and copies the entire Finding.Severity object into it. \n This ensures that the original, provider-supplied details are retained within the FindingProviderFields.Severity \n object, even if the top-level Severity object is overwritten.\n

" } }, "com.amazonaws.securityhub#FirewallPolicyDetails": { @@ -25739,7 +25739,7 @@ "Labels": { "target": "com.amazonaws.securityhub#TypeList", "traits": { - "smithy.api#documentation": "

\n An array of tags used to identify the detector associated with the finding.\n

" + "smithy.api#documentation": "

\n An array of tags used to identify the detector associated with the finding.\n

\n

Array Members: Minimum number of 0 items. Maximum number of 10 items.

" } } }, @@ -28734,7 +28734,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the malware that was observed.

", + "smithy.api#documentation": "

The name of the malware that was observed.

\n

Length Constraints: Minimum of 1. Maximum of 64.

", "smithy.api#required": {} } }, @@ -28747,7 +28747,7 @@ "Path": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The file system path of the malware that was observed.

" + "smithy.api#documentation": "

The file system path of the malware that was observed.

\n

Length Constraints: Minimum of 1. Maximum of 512.

" } }, "State": { @@ -29026,7 +29026,7 @@ "Protocol": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The protocol of network-related information about a finding.

" + "smithy.api#documentation": "

The protocol of network-related information about a finding.

\n

Length Constraints: Minimum of 1. Maximum of 16.

" } }, "OpenPortRange": { @@ -29056,7 +29056,7 @@ "SourceDomain": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The source domain of network-related information about a finding.

" + "smithy.api#documentation": "

The source domain of network-related information about a finding.

\n

Length Constraints: Minimum of 1. Maximum of 128.

" } }, "SourceMac": { @@ -29086,7 +29086,7 @@ "DestinationDomain": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The destination domain of network-related information about a finding.

" + "smithy.api#documentation": "

The destination domain of network-related information about a finding.

\n

Length Constraints: Minimum of 1. Maximum of 128.

" } } }, @@ -29124,7 +29124,7 @@ "Protocol": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The protocol used to make the network connection request.

" + "smithy.api#documentation": "

The protocol used to make the network connection request.

\n

Length Constraints: Minimum length of 1. Maximum length of 64.

" } }, "Blocked": { @@ -29161,7 +29161,7 @@ "Protocol": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The protocol used for the component.

" + "smithy.api#documentation": "

The protocol used for the component.

\n

Length Constraints: Minimum of 1. Maximum of 16.

" } }, "Destination": { @@ -29187,13 +29187,13 @@ "ComponentId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The identifier of a component in the network path.

" + "smithy.api#documentation": "

The identifier of a component in the network path.

\n

Length Constraints: Minimum of 1. Maximum of 32.

" } }, "ComponentType": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The type of component.

" + "smithy.api#documentation": "

The type of component.

\n

Length Constraints: Minimum of 1. Maximum of 32.

" } }, "Egress": { @@ -29261,7 +29261,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The text of a note.

", + "smithy.api#documentation": "

The text of a note.

\n

Length Constraints: Minimum of 1. Maximum of 512.

", "smithy.api#required": {} } }, @@ -29657,44 +29657,44 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The identifier of the compliance standard that was used to determine the patch\n compliance status.

", + "smithy.api#documentation": "

The identifier of the compliance standard that was used to determine the patch\n compliance status.

\n

Length Constraints: Minimum length of 1. Maximum length of 256.

", "smithy.api#required": {} } }, "InstalledCount": { "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "

The number of patches from the compliance standard that were installed\n successfully.

" + "smithy.api#documentation": "

The number of patches from the compliance standard that were installed\n successfully.

\n

The value can be an integer from 0 to 100000.

" } }, "MissingCount": { "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "

The number of patches that are part of the compliance standard but are not installed.\n The count includes patches that failed to install.

" + "smithy.api#documentation": "

The number of patches that are part of the compliance standard but are not installed.\n The count includes patches that failed to install.

\n

The value can be an integer from 0 to 100000.

" } }, "FailedCount": { "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "

The number of patches from the compliance standard that failed to install.

" + "smithy.api#documentation": "

The number of patches from the compliance standard that failed to install.

\n

The value can be an integer from 0 to 100000.

" } }, "InstalledOtherCount": { "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "

The number of installed patches that are not part of the compliance standard.

" + "smithy.api#documentation": "

The number of installed patches that are not part of the compliance standard.

\n

The value can be an integer from 0 to 100000.

" } }, "InstalledRejectedCount": { "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "

The number of patches that are installed but are also on a list of patches that the\n customer rejected.

" + "smithy.api#documentation": "

The number of patches that are installed but are also on a list of patches that the\n customer rejected.

\n

The value can be an integer from 0 to 100000.

" } }, "InstalledPendingReboot": { "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "

The number of patches that were applied, but that require the instance to be rebooted in\n order to be marked as installed.

" + "smithy.api#documentation": "

The number of patches that were applied, but that require the instance to be rebooted in\n order to be marked as installed.

\n

The value can be an integer from 0 to 100000.

" } }, "OperationStartTime": { @@ -29712,13 +29712,13 @@ "RebootOption": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The reboot option specified for the instance.

" + "smithy.api#documentation": "

The reboot option specified for the instance.

\n

Length Constraints: Minimum length of 1. Maximum length of 256.

" } }, "Operation": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The type of patch operation performed. For Patch Manager, the values are\n SCAN and INSTALL.

" + "smithy.api#documentation": "

The type of patch operation performed. For Patch Manager, the values are\n SCAN and INSTALL.

\n

Length Constraints: Minimum length of 1. Maximum length of 256.

" } } }, @@ -29844,13 +29844,13 @@ "Name": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The name of the process.

" + "smithy.api#documentation": "

The name of the process.

\n

Length Constraints: Minimum of 1. Maximum of 64.

" } }, "Path": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The path to the process executable.

" + "smithy.api#documentation": "

The path to the process executable.

\n

Length Constraints: Minimum of 1. Maximum of 512.

" } }, "Pid": { @@ -30025,7 +30025,7 @@ "Text": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

Describes the recommended steps to take to remediate an issue identified in a finding.

" + "smithy.api#documentation": "

Describes the recommended steps to take to remediate an issue identified in a finding.

\n

Length Constraints: Minimum of 1 length. Maximum of 512 length.

" } }, "Url": { @@ -30156,7 +30156,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The type of the resource that details are provided for. If possible, set\n Type to one of the supported resource types. For example, if the resource\n is an EC2 instance, then set Type to AwsEc2Instance.

\n

If the resource does not match any of the provided types, then set Type to\n Other.

", + "smithy.api#documentation": "

The type of the resource that details are provided for. If possible, set\n Type to one of the supported resource types. For example, if the resource\n is an EC2 instance, then set Type to AwsEc2Instance.

\n

If the resource does not match any of the provided types, then set Type to\n Other.

\n

Length Constraints: Minimum length of 1. Maximum length of 256.

", "smithy.api#required": {} } }, @@ -30177,7 +30177,7 @@ "Region": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The canonical Amazon Web Services external Region name where this resource is located.

" + "smithy.api#documentation": "

The canonical Amazon Web Services external Region name where this resource is located.

\n

Length Constraints: Minimum length of 1. Maximum length of 16.

" } }, "ResourceRole": { @@ -30189,7 +30189,7 @@ "Tags": { "target": "com.amazonaws.securityhub#FieldMap", "traits": { - "smithy.api#documentation": "

A list of Amazon Web Services tags associated with a resource at the time the finding was\n processed.

" + "smithy.api#documentation": "

A list of Amazon Web Services tags associated with a resource at the time the finding was\n processed. Tags must follow Amazon Web Services tag naming limits and requirements.

" } }, "DataClassification": { @@ -33129,13 +33129,13 @@ "Normalized": { "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "

Deprecated. The normalized severity of a finding.\n Instead of providing Normalized, provide Label.

\n

If you provide Label and do not provide Normalized, then\n Normalized is set automatically as follows.

\n
    \n
  • \n

    \n INFORMATIONAL - 0

    \n
  • \n
  • \n

    \n LOW - 1

    \n
  • \n
  • \n

    \n MEDIUM - 40

    \n
  • \n
  • \n

    \n HIGH - 70

    \n
  • \n
  • \n

    \n CRITICAL - 90

    \n
  • \n
" + "smithy.api#documentation": "

Deprecated. The normalized severity of a finding.\n Instead of providing Normalized, provide Label.

\n

The value of Normalized can be an integer between 0 and 100.

\n

If you provide Label and do not provide Normalized, then\n Normalized is set automatically as follows.

\n
    \n
  • \n

    \n INFORMATIONAL - 0

    \n
  • \n
  • \n

    \n LOW - 1

    \n
  • \n
  • \n

    \n MEDIUM - 40

    \n
  • \n
  • \n

    \n HIGH - 70

    \n
  • \n
  • \n

    \n CRITICAL - 90

    \n
  • \n
" } }, "Original": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The native severity from the finding product that generated the finding.

" + "smithy.api#documentation": "

The native severity from the finding product that generated the finding.

\n

Length Constraints: Minimum length of 1. Maximum length of 64.

" } } }, @@ -34485,6 +34485,12 @@ "traits": { "smithy.api#enumValue": "ORGANIZATIONAL_UNIT" } + }, + "ROOT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ROOT" + } } } }, @@ -34494,13 +34500,13 @@ "Name": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The name of the threat.\n\t\t

" + "smithy.api#documentation": "

The name of the threat.\n\t\t

\n

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" } }, "Severity": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The severity of the threat. \n\t\t

" + "smithy.api#documentation": "

The severity of the threat. \n\t\t

\n

Length Constraints: Minimum of 1 length. Maximum of 128 length.

" } }, "ItemCount": { @@ -34512,7 +34518,7 @@ "FilePaths": { "target": "com.amazonaws.securityhub#FilePathList", "traits": { - "smithy.api#documentation": "

Provides information about the file paths that were affected by the threat.\n\t\t

" + "smithy.api#documentation": "

Provides information about the file paths that were affected by the threat.\n\t\t

\n

Array Members: Minimum number of 1 item. Maximum number of 5 items.

" } } }, @@ -34532,7 +34538,7 @@ "Value": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The value of a threat intelligence indicator.

" + "smithy.api#documentation": "

The value of a threat intelligence indicator.

\n

Length Constraints: Minimum of 1 length. Maximum of 512 length.

" } }, "Category": { @@ -34550,7 +34556,7 @@ "Source": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The source of the threat intelligence indicator.

" + "smithy.api#documentation": "

The source of the threat intelligence indicator.

\n

Length Constraints: Minimum of 1 length. Maximum of 64 length.

" } }, "SourceUrl": { diff --git a/models/securitylake.json b/models/securitylake.json index 65c98b004b..91aa6e7dc0 100644 --- a/models/securitylake.json +++ b/models/securitylake.json @@ -59,7 +59,7 @@ "min": 1, "max": 1011 }, - "smithy.api#pattern": "^arn:aws:securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$" + "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$" } }, "com.amazonaws.securitylake#AwsAccountId": { @@ -857,7 +857,7 @@ "subscriberIdentity": { "target": "com.amazonaws.securitylake#AwsIdentity", "traits": { - "smithy.api#documentation": "

The AWS identity used to access your data.

", + "smithy.api#documentation": "

The Amazon Web Services identity used to access your data.

", "smithy.api#required": {} } }, @@ -985,7 +985,7 @@ "min": 1, "max": 64 }, - "smithy.api#pattern": "^[\\\\\\w\\-_:/.]*$" + "smithy.api#pattern": "^[\\w\\-\\_\\:\\.]*$" } }, "com.amazonaws.securitylake#CustomLogSourceProvider": { @@ -2977,7 +2977,7 @@ "com.amazonaws.securitylake#Region": { "type": "string", "traits": { - "smithy.api#pattern": "^(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$" + "smithy.api#pattern": "^(us(-gov)?|af|ap|ca|eu|me|sa)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+$" } }, "com.amazonaws.securitylake#RegionList": { @@ -3981,7 +3981,7 @@ "subscriberIdentity": { "target": "com.amazonaws.securitylake#AwsIdentity", "traits": { - "smithy.api#documentation": "

The AWS identity used to access your data.

", + "smithy.api#documentation": "

The Amazon Web Services identity used to access your data.

", "smithy.api#required": {} } }, @@ -4038,7 +4038,7 @@ "resourceShareArn": { "target": "com.amazonaws.securitylake#ResourceShareArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) which uniquely defines the AWS RAM resource share. Before\n accepting the RAM resource share invitation, you can view details related to the RAM\n resource share.

\n

This field is available only for Lake Formation subscribers created after March 8, 2023.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) which uniquely defines the Amazon Web Services RAM resource share. Before\n accepting the RAM resource share invitation, you can view details related to the RAM\n resource share.

\n

This field is available only for Lake Formation subscribers created after March 8, 2023.

" } }, "resourceShareName": { diff --git a/models/sesv2.json b/models/sesv2.json index 95bdbe5fa2..1820531dca 100644 --- a/models/sesv2.json +++ b/models/sesv2.json @@ -995,7 +995,7 @@ } ], "traits": { - "smithy.api#documentation": "

Create an event destination. Events include message sends,\n deliveries, opens, clicks, bounces, and complaints. Event\n destinations are places that you can send information about these events\n to. For example, you can send event data to Amazon SNS to receive notifications when you\n receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term\n storage.

\n

A single configuration set can include more than one event destination.

", + "smithy.api#documentation": "

Create an event destination. Events include message sends,\n deliveries, opens, clicks, bounces, and complaints. Event\n destinations are places that you can send information about these events\n to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event\n to the specified target.

\n

A single configuration set can include more than one event destination.

", "smithy.api#http": { "method": "POST", "uri": "/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations", @@ -2149,7 +2149,7 @@ } ], "traits": { - "smithy.api#documentation": "

Delete an event destination.

\n

\n Events include message sends, deliveries, opens, clicks, bounces,\n and complaints. Event destinations are places that you can send\n information about these events to. For example, you can send event data to Amazon SNS to\n receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to\n stream data to Amazon S3 for long-term storage.

", + "smithy.api#documentation": "

Delete an event destination.

\n

\n Events include message sends, deliveries, opens, clicks, bounces,\n and complaints. Event destinations are places that you can send\n information about these events to. For example, you can send event data to Amazon EventBridge and\n associate a rule to send the event to the specified target.

", "smithy.api#http": { "method": "DELETE", "uri": "/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations/{EventDestinationName}", @@ -3483,6 +3483,21 @@ "target": "com.amazonaws.sesv2#Esp" } }, + "com.amazonaws.sesv2#EventBridgeDestination": { + "type": "structure", + "members": { + "EventBusArn": { + "target": "com.amazonaws.sesv2#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon EventBridge bus to publish email events to. Only the default bus is supported.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to\n send notifications when certain email events occur.

" + } + }, "com.amazonaws.sesv2#EventDestination": { "type": "structure", "members": { @@ -3522,7 +3537,13 @@ "SnsDestination": { "target": "com.amazonaws.sesv2#SnsDestination", "traits": { - "smithy.api#documentation": "

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to\n send notification when certain email events occur.

" + "smithy.api#documentation": "

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to\n send notifications when certain email events occur.

" + } + }, + "EventBridgeDestination": { + "target": "com.amazonaws.sesv2#EventBridgeDestination", + "traits": { + "smithy.api#documentation": "

An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to\n send notifications when certain email events occur.

" } }, "PinpointDestination": { @@ -3567,7 +3588,13 @@ "SnsDestination": { "target": "com.amazonaws.sesv2#SnsDestination", "traits": { - "smithy.api#documentation": "

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to\n send notification when certain email events occur.

" + "smithy.api#documentation": "

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to\n send notifications when certain email events occur.

" + } + }, + "EventBridgeDestination": { + "target": "com.amazonaws.sesv2#EventBridgeDestination", + "traits": { + "smithy.api#documentation": "

An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to\n send notifications when certain email events occur.

" } }, "PinpointDestination": { @@ -4122,7 +4149,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieve a list of event destinations that are associated with a configuration\n set.

\n

\n Events include message sends, deliveries, opens, clicks, bounces,\n and complaints. Event destinations are places that you can send\n information about these events to. For example, you can send event data to Amazon SNS to\n receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to\n stream data to Amazon S3 for long-term storage.

", + "smithy.api#documentation": "

Retrieve a list of event destinations that are associated with a configuration\n set.

\n

\n Events include message sends, deliveries, opens, clicks, bounces,\n and complaints. Event destinations are places that you can send\n information about these events to. For example, you can send event data to Amazon EventBridge and\n associate a rule to send the event to the specified target.

", "smithy.api#http": { "method": "GET", "uri": "/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations", @@ -11369,7 +11396,7 @@ } }, "traits": { - "smithy.api#documentation": "

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to\n send notification when certain email events occur.

" + "smithy.api#documentation": "

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to\n send notifications when certain email events occur.

" } }, "com.amazonaws.sesv2#Subject": { @@ -12021,7 +12048,7 @@ } ], "traits": { - "smithy.api#documentation": "

Update the configuration of an event destination for a configuration set.

\n

\n Events include message sends, deliveries, opens, clicks, bounces,\n and complaints. Event destinations are places that you can send\n information about these events to. For example, you can send event data to Amazon SNS to\n receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to\n stream data to Amazon S3 for long-term storage.

", + "smithy.api#documentation": "

Update the configuration of an event destination for a configuration set.

\n

\n Events include message sends, deliveries, opens, clicks, bounces,\n and complaints. Event destinations are places that you can send\n information about these events to. For example, you can send event data to Amazon EventBridge and\n associate a rule to send the event to the specified target.

", "smithy.api#http": { "method": "PUT", "uri": "/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations/{EventDestinationName}", diff --git a/models/shield.json b/models/shield.json index bb13c64f11..6dbdbd8b0e 100644 --- a/models/shield.json +++ b/models/shield.json @@ -201,7 +201,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -244,7 +243,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -257,7 +257,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -271,7 +270,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -396,7 +394,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -431,7 +428,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -442,14 +438,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -463,14 +461,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -479,11 +475,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -494,14 +490,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -515,7 +513,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -535,7 +532,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -546,14 +542,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -564,9 +562,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -2979,7 +2979,20 @@ "outputToken": "NextToken", "items": "AttackSummaries", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "ListAttacksSuccess", + "params": {}, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.shield#ListAttacksRequest": { diff --git a/models/snowball.json b/models/snowball.json index a4ead5bb07..72c739a476 100644 --- a/models/snowball.json +++ b/models/snowball.json @@ -2500,7 +2500,20 @@ "outputToken": "NextToken", "items": "Addresses", "pageSize": "MaxResults" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeAddressesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.snowball#DescribeAddressesRequest": { diff --git a/models/sns.json b/models/sns.json index c77285f4bf..b523df57d0 100644 --- a/models/sns.json +++ b/models/sns.json @@ -1571,7 +1571,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a platform application object for one of the supported push notification\n services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile\n apps may register. You must specify PlatformPrincipal and\n PlatformCredential attributes when using the\n CreatePlatformApplication action.

\n

\n PlatformPrincipal and PlatformCredential are received from\n the notification service.

\n
    \n
  • \n

    For ADM, PlatformPrincipal is client id\n and PlatformCredential is client secret.

    \n
  • \n
  • \n

    For Baidu, PlatformPrincipal is API key\n and PlatformCredential is secret key.

    \n
  • \n
  • \n

    For APNS and APNS_SANDBOX using certificate\n credentials, PlatformPrincipal is SSL certificate and\n PlatformCredential is private key.

    \n
  • \n
  • \n

    For APNS and APNS_SANDBOX using token credentials,\n PlatformPrincipal is signing key ID and\n PlatformCredential is signing key.

    \n
  • \n
  • \n

    For GCM (Firebase Cloud Messaging) using key credentials, there is no\n PlatformPrincipal. The PlatformCredential is\n API key.

    \n
  • \n
  • \n

    For GCM (Firebase Cloud Messaging) using token credentials, there is no\n PlatformPrincipal. The PlatformCredential is a\n JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in\n string format and special characters must be ignored. To format the file\n correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq\n @json <<< cat service.json`.

    \n
  • \n
  • \n

    For MPNS, PlatformPrincipal is TLS\n certificate and PlatformCredential is private\n key.

    \n
  • \n
  • \n

    For WNS, PlatformPrincipal is Package Security\n Identifier and PlatformCredential is secret\n key.

    \n
  • \n
\n

You can use the returned PlatformApplicationArn as an attribute for the\n CreatePlatformEndpoint action.

" + "smithy.api#documentation": "

Creates a platform application object for one of the supported push notification\n services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile\n apps may register. You must specify PlatformPrincipal and\n PlatformCredential attributes when using the\n CreatePlatformApplication action.

\n

\n PlatformPrincipal and PlatformCredential are received from\n the notification service.

\n
    \n
  • \n

    For ADM, PlatformPrincipal is client id\n and PlatformCredential is client secret.

    \n
  • \n
  • \n

    For APNS and APNS_SANDBOX using certificate\n credentials, PlatformPrincipal is SSL certificate and\n PlatformCredential is private key.

    \n
  • \n
  • \n

    For APNS and APNS_SANDBOX using token credentials,\n PlatformPrincipal is signing key ID and\n PlatformCredential is signing key.

    \n
  • \n
  • \n

    For Baidu, PlatformPrincipal is API key\n and PlatformCredential is secret key.

    \n
  • \n
  • \n

    For GCM (Firebase Cloud Messaging) using key credentials, there is no\n PlatformPrincipal. The PlatformCredential is\n API key.

    \n
  • \n
  • \n

    For GCM (Firebase Cloud Messaging) using token credentials, there is no\n PlatformPrincipal. The PlatformCredential is a\n JSON formatted private key file. When using the Amazon Web Services CLI, the file must be in\n string format and special characters must be ignored. To format the file\n correctly, Amazon SNS recommends using the following command: SERVICE_JSON=`jq\n @json <<< cat service.json`.

    \n
  • \n
  • \n

    For MPNS, PlatformPrincipal is TLS\n certificate and PlatformCredential is private\n key.

    \n
  • \n
  • \n

    For WNS, PlatformPrincipal is Package Security\n Identifier and PlatformCredential is secret\n key.

    \n
  • \n
\n

You can use the returned PlatformApplicationArn as an attribute for the\n CreatePlatformEndpoint action.

" } }, "com.amazonaws.sns#CreatePlatformApplicationInput": { @@ -1794,7 +1794,7 @@ "Attributes": { "target": "com.amazonaws.sns#TopicAttributesMap", "traits": { - "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists the names, descriptions, and values of the special request\n parameters that the CreateTopic action uses:

\n
    \n
  • \n

    \n DeliveryPolicy – The policy that defines how Amazon SNS retries\n failed deliveries to HTTP/S endpoints.

    \n
  • \n
  • \n

    \n DisplayName – The display name to use for a topic with SMS\n subscriptions.

    \n
  • \n
  • \n

    \n FifoTopic – Set to true to create a FIFO topic.

    \n
  • \n
  • \n

    \n Policy – The policy that defines who can access your\n topic. By default, only the topic owner can publish or subscribe to the\n topic.

    \n
  • \n
  • \n

    \n SignatureVersion – The signature version corresponds to\n the hashing algorithm used while creating the signature of the notifications,\n subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS.\n By default, SignatureVersion is set to 1.

    \n
  • \n
  • \n

    \n TracingConfig – Tracing mode of an Amazon SNS topic. By default\n TracingConfig is set to PassThrough, and the topic\n passes through the tracing header it receives from an Amazon SNS publisher to its\n subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data\n to topic owner account if the sampled flag in the tracing header is true. This\n is only supported on standard topics.

    \n
  • \n
\n

The following attribute applies only to server-side\n encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – The ID of an Amazon Web Services managed customer master\n key (CMK) for Amazon SNS or a custom CMK. For more information, see Key\n Terms. For more examples, see KeyId in the Key Management Service API Reference.

    \n
  • \n
\n

The following attributes apply only to FIFO topics:

\n
    \n
  • \n

    \n ArchivePolicy – Adds or updates an inline policy document\n to archive messages stored in the specified Amazon SNS topic.

    \n
  • \n
  • \n

    \n BeginningArchiveTime – The earliest starting point at\n which a message in the topic’s archive can be replayed from. This point in time\n is based on the configured message retention period set by the topic’s message\n archiving policy.

    \n
  • \n
  • \n

    \n ContentBasedDeduplication – Enables content-based deduplication for\n FIFO topics.

    \n
      \n
    • \n

      By default, ContentBasedDeduplication is set to false.\n If you create a FIFO topic and this attribute is false, you must\n specify a value for the MessageDeduplicationId parameter for the\n Publish action.

      \n
    • \n
    • \n

      When you set ContentBasedDeduplication to true, \n Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using \n the body of the message (but not the attributes of the message).

      \n

      (Optional) To override the generated value, you can specify a value\n for the MessageDeduplicationId parameter for the Publish\n action.

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists names, descriptions, and values of the special request\n parameters that the CreateTopic action uses:

\n
    \n
  • \n

    \n DeliveryPolicy – The policy that defines how Amazon SNS retries\n failed deliveries to HTTP/S endpoints.

    \n
  • \n
  • \n

    \n DisplayName – The display name to use for a topic with SMS\n subscriptions.

    \n
  • \n
  • \n

    \n FifoTopic – Set to true to create a FIFO topic.

    \n
  • \n
  • \n

    \n Policy – The policy that defines who can access your\n topic. By default, only the topic owner can publish or subscribe to the\n topic.

    \n
  • \n
  • \n

    \n SignatureVersion – The signature version corresponds to\n the hashing algorithm used while creating the signature of the notifications,\n subscription confirmations, or unsubscribe confirmation messages sent by Amazon SNS.\n By default, SignatureVersion is set to 1.

    \n
  • \n
  • \n

    \n TracingConfig – Tracing mode of an Amazon SNS topic. By default\n TracingConfig is set to PassThrough, and the topic\n passes through the tracing header it receives from an Amazon SNS publisher to its\n subscriptions. If set to Active, Amazon SNS will vend X-Ray segment data\n to topic owner account if the sampled flag in the tracing header is true. This\n is only supported on standard topics.

    \n
  • \n
\n

The following attribute applies only to server-side\n encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – The ID of an Amazon Web Services managed customer master\n key (CMK) for Amazon SNS or a custom CMK. For more information, see Key\n Terms. For more examples, see KeyId in the Key Management Service API Reference.

    \n
  • \n
\n

The following attributes apply only to FIFO topics:

\n
    \n
  • \n

    \n ArchivePolicy – Adds or updates an inline policy document\n to archive messages stored in the specified Amazon SNS topic.

    \n
  • \n
  • \n

    \n BeginningArchiveTime – The earliest starting point at\n which a message in the topic’s archive can be replayed from. This point in time\n is based on the configured message retention period set by the topic’s message\n archiving policy.

    \n
  • \n
  • \n

    \n ContentBasedDeduplication – Enables content-based deduplication for\n FIFO topics.

    \n
      \n
    • \n

      By default, ContentBasedDeduplication is set to false.\n If you create a FIFO topic and this attribute is false, you must\n specify a value for the MessageDeduplicationId parameter for the\n Publish action.

      \n
    • \n
    • \n

      When you set ContentBasedDeduplication to true, \n Amazon SNS uses a SHA-256 hash to generate the MessageDeduplicationId using \n the body of the message (but not the attributes of the message).

      \n

      (Optional) To override the generated value, you can specify a value\n for the MessageDeduplicationId parameter for the Publish\n action.

      \n
    • \n
    \n
  • \n
" } }, "Tags": { @@ -2420,7 +2420,7 @@ "Attributes": { "target": "com.amazonaws.sns#SubscriptionAttributesMap", "traits": { - "smithy.api#documentation": "

A map of the subscription's attributes. Attributes in this map include the\n following:

\n
    \n
  • \n

    \n ConfirmationWasAuthenticatedtrue if the\n subscription confirmation request was authenticated.

    \n
  • \n
  • \n

    \n DeliveryPolicy – The JSON serialization of the\n subscription's delivery policy.

    \n
  • \n
  • \n

    \n EffectiveDeliveryPolicy – The JSON serialization of the\n effective delivery policy that takes into account the topic delivery policy and\n account system defaults.

    \n
  • \n
  • \n

    \n FilterPolicy – The filter policy JSON that is assigned to\n the subscription. For more information, see Amazon SNS Message\n Filtering in the Amazon SNS Developer Guide.

    \n
  • \n
  • \n

    \n FilterPolicyScope – This attribute lets you choose the\n filtering scope by using one of the following string value types:

    \n
      \n
    • \n

      \n MessageAttributes (default) – The filter is\n applied on the message attributes.

      \n
    • \n
    • \n

      \n MessageBody – The filter is applied on the message\n body.

      \n
    • \n
    \n
  • \n
  • \n

    \n Owner – The Amazon Web Services account ID of the subscription's\n owner.

    \n
  • \n
  • \n

    \n PendingConfirmationtrue if the subscription\n hasn't been confirmed. To confirm a pending subscription, call the\n ConfirmSubscription action with a confirmation token.

    \n
  • \n
  • \n

    \n RawMessageDeliverytrue if raw message\n delivery is enabled for the subscription. Raw messages are free of JSON\n formatting and can be sent to HTTP/S and Amazon SQS endpoints.

    \n
  • \n
  • \n

    \n RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. \n Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable)\n or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held \n in the dead-letter queue for further analysis or reprocessing.

    \n
  • \n
  • \n

    \n SubscriptionArn – The subscription's ARN.

    \n
  • \n
  • \n

    \n TopicArn – The topic ARN that the subscription is associated\n with.

    \n
  • \n
\n

The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions:

\n
    \n
  • \n

    \n SubscriptionRoleArn – The ARN of the IAM role that has the following:

    \n
      \n
    • \n

      Permission to write to the Kinesis Data Firehose delivery stream

      \n
    • \n
    • \n

      Amazon SNS listed as a trusted entity

      \n
    • \n
    \n

    Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. \n For more information, see Fanout \n to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide.

    \n
  • \n
" + "smithy.api#documentation": "

A map of the subscription's attributes. Attributes in this map include the\n following:

\n
    \n
  • \n

    \n ConfirmationWasAuthenticatedtrue if the\n subscription confirmation request was authenticated.

    \n
  • \n
  • \n

    \n DeliveryPolicy – The JSON serialization of the\n subscription's delivery policy.

    \n
  • \n
  • \n

    \n EffectiveDeliveryPolicy – The JSON serialization of the\n effective delivery policy that takes into account the topic delivery policy and\n account system defaults.

    \n
  • \n
  • \n

    \n FilterPolicy – The filter policy JSON that is assigned to\n the subscription. For more information, see Amazon SNS Message\n Filtering in the Amazon SNS Developer Guide.

    \n
  • \n
  • \n

    \n FilterPolicyScope – This attribute lets you choose the\n filtering scope by using one of the following string value types:

    \n
      \n
    • \n

      \n MessageAttributes (default) – The filter is\n applied on the message attributes.

      \n
    • \n
    • \n

      \n MessageBody – The filter is applied on the message\n body.

      \n
    • \n
    \n
  • \n
  • \n

    \n Owner – The Amazon Web Services account ID of the subscription's\n owner.

    \n
  • \n
  • \n

    \n PendingConfirmationtrue if the subscription\n hasn't been confirmed. To confirm a pending subscription, call the\n ConfirmSubscription action with a confirmation token.

    \n
  • \n
  • \n

    \n RawMessageDeliverytrue if raw message\n delivery is enabled for the subscription. Raw messages are free of JSON\n formatting and can be sent to HTTP/S and Amazon SQS endpoints.

    \n
  • \n
  • \n

    \n RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. \n Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable)\n or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held \n in the dead-letter queue for further analysis or reprocessing.

    \n
  • \n
  • \n

    \n SubscriptionArn – The subscription's ARN.

    \n
  • \n
  • \n

    \n TopicArn – The topic ARN that the subscription is associated\n with.

    \n
  • \n
\n

The following attribute applies only to Amazon Data Firehose delivery stream subscriptions:

\n
    \n
  • \n

    \n SubscriptionRoleArn – The ARN of the IAM role that has the following:

    \n
      \n
    • \n

      Permission to write to the Firehose delivery stream

      \n
    • \n
    • \n

      Amazon SNS listed as a trusted entity

      \n
    • \n
    \n

    Specifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. \n For more information, see Fanout \n to Firehose delivery streams in the Amazon SNS Developer Guide.

    \n
  • \n
" } } }, @@ -4037,7 +4037,7 @@ "Subject": { "target": "com.amazonaws.sns#subject", "traits": { - "smithy.api#documentation": "

Optional parameter to be used as the \"Subject\" line when the message is delivered to\n email endpoints. This field will also be included, if present, in the standard JSON\n messages delivered to other endpoints.

\n

Constraints: Subjects must be ASCII text that begins with a letter, number, or\n punctuation mark; must not include line breaks or control characters; and must be less\n than 100 characters long.

" + "smithy.api#documentation": "

Optional parameter to be used as the \"Subject\" line when the message is delivered to\n email endpoints. This field will also be included, if present, in the standard JSON\n messages delivered to other endpoints.

\n

Constraints: Subjects must be UTF-8 text with no line breaks or control characters,\n and less than 100 characters long.

" } }, "MessageStructure": { @@ -4490,7 +4490,7 @@ "AttributeName": { "target": "com.amazonaws.sns#attributeName", "traits": { - "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists the names, descriptions, and values of the special request\n parameters that this action uses:

\n
    \n
  • \n

    \n DeliveryPolicy – The policy that defines how Amazon SNS retries\n failed deliveries to HTTP/S endpoints.

    \n
  • \n
  • \n

    \n FilterPolicy – The simple JSON object that lets your\n subscriber receive only a subset of messages, rather than receiving every\n message published to the topic.

    \n
  • \n
  • \n

    \n FilterPolicyScope – This attribute lets you choose the\n filtering scope by using one of the following string value types:

    \n
      \n
    • \n

      \n MessageAttributes (default) – The filter is\n applied on the message attributes.

      \n
    • \n
    • \n

      \n MessageBody – The filter is applied on the message\n body.

      \n
    • \n
    \n
  • \n
  • \n

    \n RawMessageDelivery – When set to true,\n enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the\n need for the endpoints to process JSON formatting, which is otherwise created\n for Amazon SNS metadata.

    \n
  • \n
  • \n

    \n RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. \n Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable)\n or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held \n in the dead-letter queue for further analysis or reprocessing.

    \n
  • \n
\n

The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions:

\n
    \n
  • \n

    \n SubscriptionRoleArn – The ARN of the IAM role that has the following:

    \n
      \n
    • \n

      Permission to write to the Kinesis Data Firehose delivery stream

      \n
    • \n
    • \n

      Amazon SNS listed as a trusted entity

      \n
    • \n
    \n

    Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. \n For more information, see Fanout \n to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide.

    \n
  • \n
", + "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists the names, descriptions, and values of the special request\n parameters that this action uses:

\n
    \n
  • \n

    \n DeliveryPolicy – The policy that defines how Amazon SNS retries\n failed deliveries to HTTP/S endpoints.

    \n
  • \n
  • \n

    \n FilterPolicy – The simple JSON object that lets your\n subscriber receive only a subset of messages, rather than receiving every\n message published to the topic.

    \n
  • \n
  • \n

    \n FilterPolicyScope – This attribute lets you choose the\n filtering scope by using one of the following string value types:

    \n
      \n
    • \n

      \n MessageAttributes (default) – The filter is\n applied on the message attributes.

      \n
    • \n
    • \n

      \n MessageBody – The filter is applied on the message\n body.

      \n
    • \n
    \n
  • \n
  • \n

    \n RawMessageDelivery – When set to true,\n enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the\n need for the endpoints to process JSON formatting, which is otherwise created\n for Amazon SNS metadata.

    \n
  • \n
  • \n

    \n RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. \n Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable)\n or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held \n in the dead-letter queue for further analysis or reprocessing.

    \n
  • \n
\n

The following attribute applies only to Amazon Data Firehose delivery stream subscriptions:

\n
    \n
  • \n

    \n SubscriptionRoleArn – The ARN of the IAM role that has the following:

    \n
      \n
    • \n

      Permission to write to the Firehose delivery stream

      \n
    • \n
    • \n

      Amazon SNS listed as a trusted entity

      \n
    • \n
    \n

    Specifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. \n For more information, see Fanout \n to Firehose delivery streams in the Amazon SNS Developer Guide.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -4648,7 +4648,7 @@ "Attributes": { "target": "com.amazonaws.sns#SubscriptionAttributesMap", "traits": { - "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists the names, descriptions, and values of the special request\n parameters that the Subscribe action uses:

\n
    \n
  • \n

    \n DeliveryPolicy – The policy that defines how Amazon SNS retries\n failed deliveries to HTTP/S endpoints.

    \n
  • \n
  • \n

    \n FilterPolicy – The simple JSON object that lets your\n subscriber receive only a subset of messages, rather than receiving every\n message published to the topic.

    \n
  • \n
  • \n

    \n FilterPolicyScope – This attribute lets you choose the\n filtering scope by using one of the following string value types:

    \n
      \n
    • \n

      \n MessageAttributes (default) – The filter is\n applied on the message attributes.

      \n
    • \n
    • \n

      \n MessageBody – The filter is applied on the message\n body.

      \n
    • \n
    \n
  • \n
  • \n

    \n RawMessageDelivery – When set to true,\n enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the\n need for the endpoints to process JSON formatting, which is otherwise created\n for Amazon SNS metadata.

    \n
  • \n
  • \n

    \n RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. \n Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable)\n or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held \n in the dead-letter queue for further analysis or reprocessing.

    \n
  • \n
\n

The following attribute applies only to Amazon Kinesis Data Firehose delivery stream subscriptions:

\n
    \n
  • \n

    \n SubscriptionRoleArn – The ARN of the IAM role that has the following:

    \n
      \n
    • \n

      Permission to write to the Kinesis Data Firehose delivery stream

      \n
    • \n
    • \n

      Amazon SNS listed as a trusted entity

      \n
    • \n
    \n

    Specifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. \n For more information, see Fanout \n to Kinesis Data Firehose delivery streams in the Amazon SNS Developer Guide.

    \n
  • \n
\n

The following attributes apply only to FIFO topics:

\n
    \n
  • \n

    \n ReplayPolicy – Adds or updates an inline policy document\n for a subscription to replay messages stored in the specified Amazon SNS\n topic.

    \n
  • \n
  • \n

    \n ReplayStatus – Retrieves the status of the subscription\n message replay, which can be one of the following:

    \n
      \n
    • \n

      \n Completed – The replay has successfully\n redelivered all messages, and is now delivering newly published\n messages. If an ending point was specified in the\n ReplayPolicy then the subscription will no longer\n receive newly published messages.

      \n
    • \n
    • \n

      \n In progress – The replay is currently replaying\n the selected messages.

      \n
    • \n
    • \n

      \n Failed – The replay was unable to complete.

      \n
    • \n
    • \n

      \n Pending – The default state while the replay\n initiates.

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists the names, descriptions, and values of the special request\n parameters that the Subscribe action uses:

\n
    \n
  • \n

    \n DeliveryPolicy – The policy that defines how Amazon SNS retries\n failed deliveries to HTTP/S endpoints.

    \n
  • \n
  • \n

    \n FilterPolicy – The simple JSON object that lets your\n subscriber receive only a subset of messages, rather than receiving every\n message published to the topic.

    \n
  • \n
  • \n

    \n FilterPolicyScope – This attribute lets you choose the\n filtering scope by using one of the following string value types:

    \n
      \n
    • \n

      \n MessageAttributes (default) – The filter is\n applied on the message attributes.

      \n
    • \n
    • \n

      \n MessageBody – The filter is applied on the message\n body.

      \n
    • \n
    \n
  • \n
  • \n

    \n RawMessageDelivery – When set to true,\n enables raw message delivery to Amazon SQS or HTTP/S endpoints. This eliminates the\n need for the endpoints to process JSON formatting, which is otherwise created\n for Amazon SNS metadata.

    \n
  • \n
  • \n

    \n RedrivePolicy – When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. \n Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable)\n or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held \n in the dead-letter queue for further analysis or reprocessing.

    \n
  • \n
\n

The following attribute applies only to Amazon Data Firehose delivery stream subscriptions:

\n
    \n
  • \n

    \n SubscriptionRoleArn – The ARN of the IAM role that has the following:

    \n
      \n
    • \n

      Permission to write to the Firehose delivery stream

      \n
    • \n
    • \n

      Amazon SNS listed as a trusted entity

      \n
    • \n
    \n

    Specifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. \n For more information, see Fanout \n to Firehose delivery streams in the Amazon SNS Developer Guide.

    \n
  • \n
\n

The following attributes apply only to FIFO topics:

\n
    \n
  • \n

    \n ReplayPolicy – Adds or updates an inline policy document\n for a subscription to replay messages stored in the specified Amazon SNS\n topic.

    \n
  • \n
  • \n

    \n ReplayStatus – Retrieves the status of the subscription\n message replay, which can be one of the following:

    \n
      \n
    • \n

      \n Completed – The replay has successfully\n redelivered all messages, and is now delivering newly published\n messages. If an ending point was specified in the\n ReplayPolicy then the subscription will no longer\n receive newly published messages.

      \n
    • \n
    • \n

      \n In progress – The replay is currently replaying\n the selected messages.

      \n
    • \n
    • \n

      \n Failed – The replay was unable to complete.

      \n
    • \n
    • \n

      \n Pending – The default state while the replay\n initiates.

      \n
    • \n
    \n
  • \n
" } }, "ReturnSubscriptionArn": { diff --git a/models/sqs.json b/models/sqs.json index 1982004f6e..9a96b4f9d0 100644 --- a/models/sqs.json +++ b/models/sqs.json @@ -70,7 +70,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds a permission to a queue for a specific principal. This allows sharing\n access to the queue.

\n

When you create a queue, you have full control access rights for the queue. Only you,\n the owner of the queue, can grant or deny permissions to the queue. For more information\n about these permissions, see Allow Developers to Write Messages to a Shared Queue in the\n Amazon SQS Developer Guide.

\n \n
    \n
  • \n

    \n AddPermission generates a policy for you. You can use\n \n SetQueueAttributes\n to upload your\n policy. For more information, see Using Custom Policies with the Amazon SQS Access Policy\n Language in the Amazon SQS Developer\n Guide.

    \n
  • \n
  • \n

    An Amazon SQS policy can have a maximum of seven actions per\n statement.

    \n
  • \n
  • \n

    To remove the ability to change queue permissions, you must deny\n permission to the AddPermission, RemovePermission,\n and SetQueueAttributes actions in your IAM policy.

    \n
  • \n
  • \n

    Amazon SQS AddPermission does not support adding a\n non-account principal.

    \n
  • \n
\n
\n \n

Cross-account permissions don't apply to this action. For more information, see\n Grant cross-account permissions to a role and a username in the\n Amazon SQS Developer Guide.

\n
" + "smithy.api#documentation": "

Adds a permission to a queue for a specific principal. This allows sharing\n access to the queue.

\n

When you create a queue, you have full control access rights for the queue. Only you,\n the owner of the queue, can grant or deny permissions to the queue. For more information\n about these permissions, see Allow Developers to Write Messages to a Shared Queue in the Amazon SQS\n Developer Guide.

\n \n
    \n
  • \n

    \n AddPermission generates a policy for you. You can use\n \n SetQueueAttributes\n to upload your\n policy. For more information, see Using Custom Policies with the Amazon SQS Access Policy Language in\n the Amazon SQS Developer Guide.

    \n
  • \n
  • \n

    An Amazon SQS policy can have a maximum of seven actions per statement.

    \n
  • \n
  • \n

    To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

    \n
  • \n
  • \n

    Amazon SQS AddPermission does not support adding a non-account\n principal.

    \n
  • \n
\n
\n \n

Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

\n
" } }, "com.amazonaws.sqs#AddPermissionRequest": { @@ -93,7 +93,7 @@ "AWSAccountIds": { "target": "com.amazonaws.sqs#AWSAccountIdList", "traits": { - "smithy.api#documentation": "

The Amazon Web\n Services account numbers of the principals who are to receive\n permission. For information about locating the Amazon Web Services\n account identification, see Your Amazon Web\n Services Identifiers in the Amazon SQS Developer\n Guide.

", + "smithy.api#documentation": "

The Amazon Web Services account numbers of the principals who are to receive\n permission. For information about locating the Amazon Web Services account identification, see Your Amazon Web Services Identifiers in the Amazon SQS Developer\n Guide.

", "smithy.api#required": {}, "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "AWSAccountId" @@ -201,7 +201,7 @@ }, "aws.protocols#awsJson1_0": {}, "aws.protocols#awsQueryCompatible": {}, - "smithy.api#documentation": "

Welcome to the Amazon SQS API Reference.

\n

Amazon SQS is a reliable, highly-scalable hosted queue for storing messages as they\n travel between applications or microservices. Amazon SQS moves data between distributed\n application components and helps you decouple these components.

\n

For information on the permissions you need to use this API, see Identity and access management in the Amazon SQS Developer\n Guide.\n

\n

You can use Amazon Web Services\n SDKs to access Amazon SQS using your favorite programming language. The SDKs\n perform tasks such as the following automatically:

\n
    \n
  • \n

    Cryptographically sign your service requests

    \n
  • \n
  • \n

    Retry requests

    \n
  • \n
  • \n

    Handle error responses

    \n
  • \n
\n

\n Additional information\n

\n ", + "smithy.api#documentation": "

Welcome to the Amazon SQS API Reference.

\n

Amazon SQS is a reliable, highly-scalable hosted queue for storing messages as they travel\n between applications or microservices. Amazon SQS moves data between distributed application\n components and helps you decouple these components.

\n

For information on the permissions you need to use this API, see Identity and access management in the Amazon SQS Developer\n Guide.\n

\n

You can use Amazon Web Services SDKs to access\n Amazon SQS using your favorite programming language. The SDKs perform tasks such as the\n following automatically:

\n
    \n
  • \n

    Cryptographically sign your service requests

    \n
  • \n
  • \n

    Retry requests

    \n
  • \n
  • \n

    Handle error responses

    \n
  • \n
\n

\n Additional information\n

\n ", "smithy.api#title": "Amazon Simple Queue Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1338,7 +1338,7 @@ } ], "traits": { - "smithy.api#documentation": "

Cancels a specified message movement task. A message movement can only be cancelled\n when the current status is RUNNING. Cancelling a message movement task does not revert\n the messages that have already been moved. It can only stop the messages that have not\n been moved yet.

\n \n
    \n
  • \n

    This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source\n queue is the dead-letter queue (DLQ), while the destination queue can be the\n original source queue (from which the messages were driven to the\n dead-letter-queue), or a custom destination queue.

    \n
  • \n
  • \n

    Currently, only standard queues are supported.

    \n
  • \n
  • \n

    Only one active message movement task is supported per queue at any given\n time.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Cancels a specified message movement task. A message movement can only be cancelled\n when the current status is RUNNING. Cancelling a message movement task does not revert\n the messages that have already been moved. It can only stop the messages that have not\n been moved yet.

\n \n
    \n
  • \n

    This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source\n queue is the dead-letter queue (DLQ), while the destination queue can be the\n original source queue (from which the messages were driven to the\n dead-letter-queue), or a custom destination queue.

    \n
  • \n
  • \n

    Only one active message movement task is supported per queue at any given\n time.

    \n
  • \n
\n
" } }, "com.amazonaws.sqs#CancelMessageMoveTaskRequest": { @@ -1403,7 +1403,7 @@ } ], "traits": { - "smithy.api#documentation": "

Changes the visibility timeout of a specified message in a queue to a new value. The\n default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The\n maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer\n Guide.

\n

For example, if the default timeout for a queue is 60 seconds, 15 seconds have elapsed\n since you received the message, and you send a ChangeMessageVisibility call with\n VisibilityTimeout set to 10 seconds, the 10 seconds begin to count from\n the time that you make the ChangeMessageVisibility call. Thus, any attempt\n to change the visibility timeout or to delete that message 10 seconds after you\n initially change the visibility timeout (a total of 25 seconds) might result in an\n error.

\n

An Amazon SQS message has three basic states:

\n
    \n
  1. \n

    Sent to a queue by a producer.

    \n
  2. \n
  3. \n

    Received from the queue by a consumer.

    \n
  4. \n
  5. \n

    Deleted from the queue.

    \n
  6. \n
\n

A message is considered to be stored after it is sent to a queue\n by a producer, but not yet received from the queue by a consumer (that is, between\n states 1 and 2). There is no limit to the number of stored messages. A message is\n considered to be in flight after it is received from a queue by a\n consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is\n a limit to the number of in flight messages.

\n

Limits that apply to in flight messages are unrelated to the\n unlimited number of stored messages.

\n

For most standard queues (depending on queue traffic and message backlog), there can\n be a maximum of approximately 120,000 in flight messages (received from a queue by a\n consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS\n returns the OverLimit error message. To avoid reaching the limit, you\n should delete messages from the queue after they're processed. You can also increase the\n number of queues you use to process your messages. To request a limit increase, file a support request.

\n

For FIFO queues, there can be a maximum of 20,000 in flight messages (received from a\n queue by a consumer, but not yet deleted from the queue). If you reach this limit,\n Amazon SQS returns no error messages.

\n \n

If you attempt to set the VisibilityTimeout to a value greater than\n the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically\n recalculate and increase the timeout to the maximum remaining time.

\n

Unlike with a queue, when you change the visibility timeout for a specific message\n the timeout value is applied immediately but isn't saved in memory for that message.\n If you don't delete a message after it is received, the visibility timeout for the\n message reverts to the original timeout value (not to the value you set using the\n ChangeMessageVisibility action) the next time the message is\n received.

\n
" + "smithy.api#documentation": "

Changes the visibility timeout of a specified message in a queue to a new value. The\n default visibility timeout for a message is 30 seconds. The minimum is 0 seconds. The\n maximum is 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer\n Guide.

\n

For example, if the default timeout for a queue is 60 seconds, 15 seconds have elapsed\n since you received the message, and you send a ChangeMessageVisibility call with\n VisibilityTimeout set to 10 seconds, the 10 seconds begin to count from\n the time that you make the ChangeMessageVisibility call. Thus, any attempt\n to change the visibility timeout or to delete that message 10 seconds after you\n initially change the visibility timeout (a total of 25 seconds) might result in an\n error.

\n

An Amazon SQS message has three basic states:

\n
    \n
  1. \n

    Sent to a queue by a producer.

    \n
  2. \n
  3. \n

    Received from the queue by a consumer.

    \n
  4. \n
  5. \n

    Deleted from the queue.

    \n
  6. \n
\n

A message is considered to be stored after it is sent to a queue by a producer, but not yet received from the queue by a consumer (that is, between states 1 and 2). There is no limit to the number of stored messages.\n A message is considered to be in flight after it is received from a queue by a consumer, but not yet deleted from the queue (that is, between states 2 and 3). There is a limit to the number of in flight messages.

\n

Limits that apply to in flight messages are unrelated to the unlimited number of stored messages.

\n

For most standard queues (depending on queue traffic and message backlog), there can be a maximum of approximately 120,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). \n If you reach this limit, Amazon SQS returns the OverLimit error message.\n To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages.\n To request a limit increase, file a support request.

\n

For FIFO queues, there can be a maximum of 20,000 in flight messages (received from a queue by a consumer, but not yet deleted from the queue). If you reach this limit, Amazon SQS returns no error messages.

\n \n

If you attempt to set the VisibilityTimeout to a value greater than\n the maximum time left, Amazon SQS returns an error. Amazon SQS doesn't automatically\n recalculate and increase the timeout to the maximum remaining time.

\n

Unlike with a queue, when you change the visibility timeout for a specific message\n the timeout value is applied immediately but isn't saved in memory for that message.\n If you don't delete a message after it is received, the visibility timeout for the\n message reverts to the original timeout value (not to the value you set using the\n ChangeMessageVisibility action) the next time the message is\n received.

\n
" } }, "com.amazonaws.sqs#ChangeMessageVisibilityBatch": { @@ -1444,7 +1444,7 @@ } ], "traits": { - "smithy.api#documentation": "

Changes the visibility timeout of multiple messages. This is a batch version of\n \n ChangeMessageVisibility. The result of the action\n on each message is reported individually in the response. You can send up to 10\n \n ChangeMessageVisibility\n requests with each\n ChangeMessageVisibilityBatch action.

\n \n

Because the batch request can result in a combination of successful and\n unsuccessful actions, you should check for batch errors even when the call returns\n an HTTP status code of 200.

\n
" + "smithy.api#documentation": "

Changes the visibility timeout of multiple messages. This is a batch version of\n \n ChangeMessageVisibility. The result of the action\n on each message is reported individually in the response. You can send up to 10\n \n ChangeMessageVisibility\n requests with each\n ChangeMessageVisibilityBatch action.

\n \n

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

\n
" } }, "com.amazonaws.sqs#ChangeMessageVisibilityBatchRequest": { @@ -1478,7 +1478,7 @@ "Id": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

An identifier for this particular receipt handle used to communicate the\n result.

\n \n

The Ids of a batch request need to be unique within a request.

\n

This identifier can have up to 80 characters. The following characters are\n accepted: alphanumeric characters, hyphens(-), and underscores (_).

\n
", + "smithy.api#documentation": "

An identifier for this particular receipt handle used to communicate the\n result.

\n \n

The Ids of a batch request need to be unique within a request.

\n

This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

\n
", "smithy.api#required": {} } }, @@ -1618,7 +1618,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new standard or FIFO queue. You can pass one or more attributes in the\n request. Keep the following in mind:

\n
    \n
  • \n

    If you don't specify the FifoQueue attribute, Amazon SQS creates\n a standard queue.

    \n \n

    You can't change the queue type after you create it and you can't convert\n an existing standard queue into a FIFO queue. You must either create a new\n FIFO queue for your application or delete your existing standard queue and\n recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the\n Amazon SQS Developer Guide.

    \n
    \n
  • \n
  • \n

    If you don't provide a value for an attribute, the queue is created with the\n default value for the attribute.

    \n
  • \n
  • \n

    If you delete a queue, you must wait at least 60 seconds before creating a\n queue with the same name.

    \n
  • \n
\n

To successfully create a new queue, you must provide a queue name that adheres to the\n limits\n related to queues and is unique within the scope of your queues.

\n \n

After you create a queue, you must wait at least one second after the queue is\n created to be able to use the queue.

\n
\n

To get the queue URL, use the \n GetQueueUrl\n action.\n \n GetQueueUrl\n requires only the\n QueueName parameter. be aware of existing queue names:

\n
    \n
  • \n

    If you provide the name of an existing queue along with the exact names and\n values of all the queue's attributes, CreateQueue returns the queue\n URL for the existing queue.

    \n
  • \n
  • \n

    If the queue name, attribute names, or attribute values don't match an\n existing queue, CreateQueue returns an error.

    \n
  • \n
\n \n

Cross-account permissions don't apply to this action. For more information, see\n Grant cross-account permissions to a role and a username in the\n Amazon SQS Developer Guide.

\n
" + "smithy.api#documentation": "

Creates a new standard or FIFO queue. You can pass one or more attributes in\n the request. Keep the following in mind:

\n
    \n
  • \n

    If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue.

    \n \n

    You can't change the queue type after you create it and you can't convert\n an existing standard queue into a FIFO queue. You must either create a new\n FIFO queue for your application or delete your existing standard queue and\n recreate it as a FIFO queue. For more information, see Moving From a Standard Queue to a FIFO Queue in the\n Amazon SQS Developer Guide.

    \n
    \n
  • \n
  • \n

    If you don't provide a value for an attribute, the queue is created with the\n default value for the attribute.

    \n
  • \n
  • \n

    If you delete a queue, you must wait at least 60 seconds before creating a\n queue with the same name.

    \n
  • \n
\n

To successfully create a new queue, you must provide a queue name that adheres to the\n limits\n related to queues and is unique within the scope of your queues.

\n \n

After you create a queue, you must wait at least one second after the queue is\n created to be able to use the queue.

\n
\n

To get the queue URL, use the \n GetQueueUrl\n action.\n \n GetQueueUrl\n requires only the\n QueueName parameter. be aware of existing queue names:

\n
    \n
  • \n

    If you provide the name of an existing queue along with the exact names and\n values of all the queue's attributes, CreateQueue returns the queue\n URL for the existing queue.

    \n
  • \n
  • \n

    If the queue name, attribute names, or attribute values don't match an\n existing queue, CreateQueue returns an error.

    \n
  • \n
\n \n

Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

\n
" } }, "com.amazonaws.sqs#CreateQueueRequest": { @@ -1634,7 +1634,7 @@ "Attributes": { "target": "com.amazonaws.sqs#QueueAttributeMap", "traits": { - "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists the names, descriptions, and values of the special request\n parameters that the CreateQueue action uses:

\n
    \n
  • \n

    \n DelaySeconds – The length of time, in seconds, for which the\n delivery of all messages in the queue is delayed. Valid values: An integer from\n 0 to 900 seconds (15 minutes). Default: 0.

    \n
  • \n
  • \n

    \n MaximumMessageSize – The limit of how many bytes a message can\n contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes\n (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

    \n
  • \n
  • \n

    \n MessageRetentionPeriod – The length of time, in seconds, for which\n Amazon SQS retains a message. Valid values: An integer from 60 seconds (1\n minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days). When you\n change a queue's attributes, the change can take up to 60 seconds for most of\n the attributes to propagate throughout the Amazon SQS system. Changes made to\n the MessageRetentionPeriod attribute can take up to 15 minutes and\n will impact existing messages in the queue potentially causing them to be\n expired and deleted if the MessageRetentionPeriod is reduced below\n the age of existing messages.

    \n
  • \n
  • \n

    \n Policy – The queue's policy. A valid Amazon Web Services\n policy. For more information about policy structure, see Overview of Amazon Web Services IAM Policies in the IAM\n User Guide.

    \n
  • \n
  • \n

    \n ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for\n which a \n ReceiveMessage\n action waits for a message\n to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

    \n
  • \n
  • \n

    \n VisibilityTimeout – The visibility timeout for the queue, in\n seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For\n more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer\n Guide.

    \n
  • \n
\n

The following attributes apply only to dead-letter queues:\n

\n
    \n
  • \n

    \n RedrivePolicy – The string that includes the parameters for the\n dead-letter queue functionality of the source queue as a JSON object. The\n parameters are as follows:

    \n
      \n
    • \n

      \n deadLetterTargetArn – The Amazon Resource Name (ARN) of\n the dead-letter queue to which Amazon SQS moves messages after the value\n of maxReceiveCount is exceeded.

      \n
    • \n
    • \n

      \n maxReceiveCount – The number of times a message is\n delivered to the source queue before being moved to the dead-letter\n queue. Default: 10. When the ReceiveCount for a message\n exceeds the maxReceiveCount for a queue, Amazon SQS moves\n the message to the dead-letter-queue.

      \n
    • \n
    \n
  • \n
  • \n

    \n RedriveAllowPolicy – The string that includes the parameters for\n the permissions for the dead-letter queue redrive permission and which source\n queues can specify dead-letter queues as a JSON object. The parameters are as\n follows:

    \n
      \n
    • \n

      \n redrivePermission – The permission type that defines\n which source queues can specify the current queue as the dead-letter\n queue. Valid values are:

      \n
        \n
      • \n

        \n allowAll – (Default) Any source queues in this\n Amazon Web Services account in the same\n Region can specify this queue as the dead-letter queue.

        \n
      • \n
      • \n

        \n denyAll – No source queues can specify this queue\n as the dead-letter queue.

        \n
      • \n
      • \n

        \n byQueue – Only queues specified by the\n sourceQueueArns parameter can specify this\n queue as the dead-letter queue.

        \n
      • \n
      \n
    • \n
    • \n

      \n sourceQueueArns – The Amazon Resource Names (ARN)s of the\n source queues that can specify this queue as the dead-letter queue and\n redrive messages. You can specify this parameter only when the\n redrivePermission parameter is set to\n byQueue. You can specify up to 10 source queue ARNs. To\n allow more than 10 source queues to specify dead-letter queues, set the\n redrivePermission parameter to\n allowAll.

      \n
    • \n
    \n
  • \n
\n \n

The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the\n dead-letter queue of a standard queue must also be a standard queue.

\n
\n

The following attributes apply only to server-side-encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – The ID of an Amazon Web Services managed customer\n master key (CMK) for Amazon SQS or a custom CMK. For more information, see\n Key Terms. While the alias of the Amazon Web Services\n managed CMK for Amazon SQS is always alias/aws/sqs, the alias of a\n custom CMK can, for example, be alias/MyAlias\n .\n For more examples, see KeyId in the Key Management Service API\n Reference.

    \n
  • \n
  • \n

    \n KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for\n which Amazon SQS can reuse a data key to\n encrypt or decrypt messages before calling KMS again. An integer representing\n seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default:\n 300 (5 minutes). A shorter time period provides better security but results in\n more calls to KMS which might incur charges after Free Tier. For more\n information, see How Does the Data Key Reuse Period Work?\n

    \n
  • \n
  • \n

    \n SqsManagedSseEnabled – Enables server-side queue encryption using\n SQS owned encryption keys. Only one server-side encryption option is supported\n per queue (for example, SSE-KMS or SSE-SQS).

    \n
  • \n
\n

The following attributes apply only to FIFO\n (first-in-first-out) queues:

\n
    \n
  • \n

    \n FifoQueue – Designates a queue as FIFO. Valid values are\n true and false. If you don't specify the\n FifoQueue attribute, Amazon SQS creates a standard queue. You\n can provide this attribute only during queue creation. You can't change it for\n an existing queue. When you set this attribute, you must also provide the\n MessageGroupId for your messages explicitly.

    \n

    For more information, see FIFO queue logic in the Amazon SQS Developer\n Guide.

    \n
  • \n
  • \n

    \n ContentBasedDeduplication – Enables content-based deduplication.\n Valid values are true and false. For more information,\n see Exactly-once processing in the Amazon SQS Developer\n Guide. Note the following:

    \n
      \n
    • \n

      Every message must have a unique\n MessageDeduplicationId.

      \n
        \n
      • \n

        You may provide a MessageDeduplicationId\n explicitly.

        \n
      • \n
      • \n

        If you aren't able to provide a\n MessageDeduplicationId and you enable\n ContentBasedDeduplication for your queue,\n Amazon SQS uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the\n message (but not the attributes of the message).

        \n
      • \n
      • \n

        If you don't provide a MessageDeduplicationId and\n the queue doesn't have ContentBasedDeduplication\n set, the action fails with an error.

        \n
      • \n
      • \n

        If the queue has ContentBasedDeduplication set,\n your MessageDeduplicationId overrides the generated\n one.

        \n
      • \n
      \n
    • \n
    • \n

      When ContentBasedDeduplication is in effect, messages\n with identical content sent within the deduplication interval are\n treated as duplicates and only one copy of the message is\n delivered.

      \n
    • \n
    • \n

      If you send one message with ContentBasedDeduplication\n enabled and then another message with a\n MessageDeduplicationId that is the same as the one\n generated for the first MessageDeduplicationId, the two\n messages are treated as duplicates and only one copy of the message is\n delivered.

      \n
    • \n
    \n
  • \n
\n

The following attributes apply only to high\n throughput for FIFO queues:

\n
    \n
  • \n

    \n DeduplicationScope – Specifies whether message deduplication\n occurs at the message group or queue level. Valid values are\n messageGroup and queue.

    \n
  • \n
  • \n

    \n FifoThroughputLimit – Specifies whether the FIFO queue throughput\n quota applies to the entire queue or per message group. Valid values are\n perQueue and perMessageGroupId. The\n perMessageGroupId value is allowed only when the value for\n DeduplicationScope is messageGroup.

    \n
  • \n
\n

To enable high throughput for FIFO queues, do the following:

\n
    \n
  • \n

    Set DeduplicationScope to messageGroup.

    \n
  • \n
  • \n

    Set FifoThroughputLimit to perMessageGroupId.

    \n
  • \n
\n

If you set these attributes to anything other than the values shown for enabling high\n throughput, normal throughput is in effect and deduplication occurs as specified.

\n

For information on throughput quotas, see Quotas\n related to messages in the Amazon SQS Developer\n Guide.

", + "smithy.api#documentation": "

A map of attributes with their corresponding values.

\n

The following lists the names, descriptions, and values of the special request\n parameters that the CreateQueue action uses:

\n
    \n
  • \n

    \n DelaySeconds – The length of time, in seconds, for which the\n delivery of all messages in the queue is delayed. Valid values: An integer from\n 0 to 900 seconds (15 minutes). Default: 0.

    \n
  • \n
  • \n

    \n MaximumMessageSize – The limit of how many bytes a message\n can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes\n (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

    \n
  • \n
  • \n

    \n MessageRetentionPeriod – The length of time, in seconds, for\n which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1\n minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days). When you\n change a queue's attributes, the change can take up to 60 seconds for most of\n the attributes to propagate throughout the Amazon SQS system. Changes made to the\n MessageRetentionPeriod attribute can take up to 15 minutes and\n will impact existing messages in the queue potentially causing them to be\n expired and deleted if the MessageRetentionPeriod is reduced below\n the age of existing messages.

    \n
  • \n
  • \n

    \n Policy – The queue's policy. A valid Amazon Web Services policy. For more\n information about policy structure, see Overview of Amazon Web Services IAM\n Policies in the IAM User Guide.

    \n
  • \n
  • \n

    \n ReceiveMessageWaitTimeSeconds – The length of time, in\n seconds, for which a \n ReceiveMessage\n action waits\n for a message to arrive. Valid values: An integer from 0 to 20 (seconds).\n Default: 0.

    \n
  • \n
  • \n

    \n VisibilityTimeout – The visibility timeout for the queue, in\n seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For\n more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer\n Guide.

    \n
  • \n
\n

The following attributes apply only to dead-letter queues:\n

\n
    \n
  • \n

    \n RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality \n of the source queue as a JSON object. The parameters are as follows:

    \n
      \n
    • \n

      \n deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to \n which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

      \n
    • \n
    • \n

      \n maxReceiveCount – The number of times a message is delivered to the source queue before being \n moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount \n for a queue, Amazon SQS moves the message to the dead-letter-queue.

      \n
    • \n
    \n
  • \n
  • \n

    \n RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter\n queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:

    \n
      \n
    • \n

      \n redrivePermission – The permission type that defines which source queues can \n specify the current queue as the dead-letter queue. Valid values are:

      \n
        \n
      • \n

        \n allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can \n specify this queue as the dead-letter queue.

        \n
      • \n
      • \n

        \n denyAll – No source queues can specify this queue as the dead-letter\n queue.

        \n
      • \n
      • \n

        \n byQueue – Only queues specified by the sourceQueueArns parameter can specify \n this queue as the dead-letter queue.

        \n
      • \n
      \n
    • \n
    • \n

      \n sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify \n this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the \n redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. \n To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter\n to allowAll.

      \n
    • \n
    \n
  • \n
\n \n

The dead-letter queue of a \n FIFO queue must also be a FIFO queue. Similarly, the dead-letter \n queue of a standard queue must also be a standard queue.

\n
\n

The following attributes apply only to server-side-encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – The ID of an Amazon Web Services managed customer master\n key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the Amazon Web Services managed CMK for Amazon SQS is\n always alias/aws/sqs, the alias of a custom CMK can, for example,\n be alias/MyAlias\n . For more examples, see\n KeyId in the Key Management Service API\n Reference.

    \n
  • \n
  • \n

    \n KmsDataKeyReusePeriodSeconds – The length of time, in\n seconds, for which Amazon SQS can reuse a data key to\n encrypt or decrypt messages before calling KMS again. An integer\n representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24\n hours). Default: 300 (5 minutes). A shorter time period provides better security\n but results in more calls to KMS which might incur charges after Free Tier. For\n more information, see How Does the Data Key Reuse Period Work?\n

    \n
  • \n
  • \n

    \n SqsManagedSseEnabled – Enables server-side queue encryption\n using SQS owned encryption keys. Only one server-side encryption option is\n supported per queue (for example, SSE-KMS or SSE-SQS).

    \n
  • \n
\n

The following attributes apply only to FIFO (first-in-first-out)\n queues:

\n
    \n
  • \n

    \n FifoQueue – Designates a queue as FIFO. Valid values are\n true and false. If you don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You\n can provide this attribute only during queue creation. You can't change it for\n an existing queue. When you set this attribute, you must also provide the\n MessageGroupId for your messages explicitly.

    \n

    For more information, see FIFO queue logic in the Amazon SQS Developer\n Guide.

    \n
  • \n
  • \n

    \n ContentBasedDeduplication – Enables content-based\n deduplication. Valid values are true and false. For\n more information, see Exactly-once processing in the Amazon SQS Developer\n Guide. Note the following:

    \n
      \n
    • \n

      Every message must have a unique\n MessageDeduplicationId.

      \n
        \n
      • \n

        You may provide a MessageDeduplicationId\n explicitly.

        \n
      • \n
      • \n

        If you aren't able to provide a\n MessageDeduplicationId and you enable\n ContentBasedDeduplication for your queue, Amazon SQS\n uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the\n message (but not the attributes of the message).

        \n
      • \n
      • \n

        If you don't provide a MessageDeduplicationId and\n the queue doesn't have ContentBasedDeduplication\n set, the action fails with an error.

        \n
      • \n
      • \n

        If the queue has ContentBasedDeduplication set,\n your MessageDeduplicationId overrides the generated\n one.

        \n
      • \n
      \n
    • \n
    • \n

      When ContentBasedDeduplication is in effect, messages\n with identical content sent within the deduplication interval are\n treated as duplicates and only one copy of the message is\n delivered.

      \n
    • \n
    • \n

      If you send one message with ContentBasedDeduplication\n enabled and then another message with a\n MessageDeduplicationId that is the same as the one\n generated for the first MessageDeduplicationId, the two\n messages are treated as duplicates and only one copy of the message is\n delivered.

      \n
    • \n
    \n
  • \n
\n

The following attributes apply only to \nhigh throughput\nfor FIFO queues:

\n
    \n
  • \n

    \n DeduplicationScope – Specifies whether message deduplication occurs at the \n message group or queue level. Valid values are messageGroup and queue.

    \n
  • \n
  • \n

    \n FifoThroughputLimit – Specifies whether the FIFO queue throughput \n quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. \n The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.

    \n
  • \n
\n

To enable high throughput for FIFO queues, do the following:

\n
    \n
  • \n

    Set DeduplicationScope to messageGroup.

    \n
  • \n
  • \n

    Set FifoThroughputLimit to perMessageGroupId.

    \n
  • \n
\n

If you set these attributes to anything other than the values shown for enabling high\n throughput, normal throughput is in effect and deduplication occurs as specified.

\n

For information on throughput quotas, \n see Quotas related to messages \n in the Amazon SQS Developer Guide.

", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "Attribute" } @@ -1642,7 +1642,7 @@ "tags": { "target": "com.amazonaws.sqs#TagMap", "traits": { - "smithy.api#documentation": "

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see\n Tagging\n Your Amazon SQS Queues in the Amazon SQS Developer\n Guide.

\n

When you use queue tags, keep the following guidelines in mind:

\n
    \n
  • \n

    Adding more than 50 tags to a queue isn't recommended.

    \n
  • \n
  • \n

    Tags don't have any semantic meaning. Amazon SQS interprets tags as character\n strings.

    \n
  • \n
  • \n

    Tags are case-sensitive.

    \n
  • \n
  • \n

    A new tag with a key identical to that of an existing tag overwrites the\n existing tag.

    \n
  • \n
\n

For a full list of tag restrictions, see Quotas related to queues in the Amazon SQS Developer\n Guide.

\n \n

To be able to tag a queue on creation, you must have the\n sqs:CreateQueue and sqs:TagQueue permissions.

\n

Cross-account permissions don't apply to this action. For more information, see\n Grant cross-account permissions to a role and a username in the\n Amazon SQS Developer Guide.

\n
", + "smithy.api#documentation": "

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging \nYour Amazon SQS Queues in the Amazon SQS Developer Guide.

\n

When you use queue tags, keep the following guidelines in mind:

\n
    \n
  • \n

    Adding more than 50 tags to a queue isn't recommended.

    \n
  • \n
  • \n

    Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

    \n
  • \n
  • \n

    Tags are case-sensitive.

    \n
  • \n
  • \n

    A new tag with a key identical to that of an existing tag overwrites the existing tag.

    \n
  • \n
\n

For a full list of tag restrictions, see \nQuotas related to queues \nin the Amazon SQS Developer Guide.

\n \n

To be able to tag a queue on creation, you must have the\n sqs:CreateQueue and sqs:TagQueue permissions.

\n

Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

\n
", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "Tag" } @@ -1700,7 +1700,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified message from the specified queue. To select the message to\n delete, use the ReceiptHandle of the message (not the\n MessageId which you receive when you send the message). Amazon SQS can\n delete a message from a queue even if a visibility timeout setting causes the message to\n be locked by another consumer. Amazon SQS automatically deletes messages left in a queue\n longer than the retention period configured for the queue.

\n \n

The ReceiptHandle is associated with a specific\n instance of receiving a message. If you receive a message more than\n once, the ReceiptHandle is different each time you receive a message.\n When you use the DeleteMessage action, you must provide the most\n recently received ReceiptHandle for the message (otherwise, the request\n succeeds, but the message will not be deleted).

\n

For standard queues, it is possible to receive a message even after you delete it.\n This might happen on rare occasions if one of the servers which stores a copy of the\n message is unavailable when you send the request to delete the message. The copy\n remains on the server and might be returned to you during a subsequent receive\n request. You should ensure that your application is idempotent, so that receiving a\n message more than once does not cause issues.

\n
" + "smithy.api#documentation": "

Deletes the specified message from the specified queue. To select the message to\n delete, use the ReceiptHandle of the message (not the\n MessageId which you receive when you send the message). Amazon SQS can\n delete a message from a queue even if a visibility timeout setting causes the message to\n be locked by another consumer. Amazon SQS automatically deletes messages left in a queue\n longer than the retention period configured for the queue.

\n \n

The ReceiptHandle is associated with a specific\n instance of receiving a message. If you receive a message more than\n once, the ReceiptHandle is different each time you receive a message.\n When you use the DeleteMessage action, you must provide the most\n recently received ReceiptHandle for the message (otherwise, the request\n succeeds, but the message will not be deleted).

\n

For standard queues, it is possible to receive a message even after you\n delete it. This might happen on rare occasions if one of the servers which stores a\n copy of the message is unavailable when you send the request to delete the message.\n The copy remains on the server and might be returned to you during a subsequent\n receive request. You should ensure that your application is idempotent, so that\n receiving a message more than once does not cause issues.

\n
" } }, "com.amazonaws.sqs#DeleteMessageBatch": { @@ -1741,7 +1741,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes up to ten messages from the specified queue. This is a batch version of\n \n DeleteMessage. The result of the action on each\n message is reported individually in the response.

\n \n

Because the batch request can result in a combination of successful and\n unsuccessful actions, you should check for batch errors even when the call returns\n an HTTP status code of 200.

\n
" + "smithy.api#documentation": "

Deletes up to ten messages from the specified queue. This is a batch version of\n \n DeleteMessage. The result of the action on each\n message is reported individually in the response.

\n \n

Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

\n
" } }, "com.amazonaws.sqs#DeleteMessageBatchRequest": { @@ -1775,7 +1775,7 @@ "Id": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

The identifier for this particular receipt handle. This is used to communicate the\n result.

\n \n

The Ids of a batch request need to be unique within a request.

\n

This identifier can have up to 80 characters. The following characters are\n accepted: alphanumeric characters, hyphens(-), and underscores (_).

\n
", + "smithy.api#documentation": "

The identifier for this particular receipt handle. This is used to communicate the\n result.

\n \n

The Ids of a batch request need to be unique within a request.

\n

This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

\n
", "smithy.api#required": {} } }, @@ -1894,7 +1894,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the queue specified by the QueueUrl, regardless of the queue's\n contents.

\n \n

Be careful with the DeleteQueue action: When you delete a queue, any\n messages in the queue are no longer available.

\n
\n

When you delete a queue, the deletion process takes up to 60 seconds. Requests you\n send involving that queue during the 60 seconds might succeed. For example, a\n \n SendMessage\n request might succeed, but after 60\n seconds the queue and the message you sent no longer exist.

\n

When you delete a queue, you must wait at least 60 seconds before creating a queue\n with the same name.

\n \n

Cross-account permissions don't apply to this action. For more information, see\n Grant cross-account permissions to a role and a username in the\n Amazon SQS Developer Guide.

\n

The delete operation uses the HTTP GET verb.

\n
" + "smithy.api#documentation": "

Deletes the queue specified by the QueueUrl, regardless of the queue's\n contents.

\n \n

Be careful with the DeleteQueue action: When you delete a queue, any\n messages in the queue are no longer available.

\n
\n

When you delete a queue, the deletion process takes up to 60 seconds. Requests you\n send involving that queue during the 60 seconds might succeed. For example, a\n \n SendMessage\n request might succeed, but after 60\n seconds the queue and the message you sent no longer exist.

\n

When you delete a queue, you must wait at least 60 seconds before creating a queue\n with the same name.

\n \n

Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

\n

The delete operation uses the HTTP GET verb.

\n
" } }, "com.amazonaws.sqs#DeleteQueueRequest": { @@ -1962,7 +1962,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets attributes for the specified queue.

\n \n

To determine whether a queue is FIFO, you can check whether QueueName ends with the\n .fifo suffix.

\n
" + "smithy.api#documentation": "

Gets attributes for the specified queue.

\n \n

To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

\n
" } }, "com.amazonaws.sqs#GetQueueAttributesRequest": { @@ -1978,7 +1978,7 @@ "AttributeNames": { "target": "com.amazonaws.sqs#AttributeNameList", "traits": { - "smithy.api#documentation": "

A list of attributes for which to retrieve information.

\n

The AttributeNames parameter is optional, but if you don't specify values\n for this parameter, the request returns empty results.

\n \n

In the future, new attributes might be added. If you write code that calls this\n action, we recommend that you structure your code so that it can handle new\n attributes gracefully.

\n
\n

The following attributes are supported:

\n \n

The ApproximateNumberOfMessagesDelayed,\n ApproximateNumberOfMessagesNotVisible, and\n ApproximateNumberOfMessages metrics may not achieve consistency\n until at least 1 minute after the producers stop sending messages. This period is\n required for the queue metadata to reach eventual consistency.

\n
\n
    \n
  • \n

    \n All – Returns all values.

    \n
  • \n
  • \n

    \n ApproximateNumberOfMessages – Returns the approximate number of\n messages available for retrieval from the queue.

    \n
  • \n
  • \n

    \n ApproximateNumberOfMessagesDelayed – Returns the approximate\n number of messages in the queue that are delayed and not available for reading\n immediately. This can happen when the queue is configured as a delay queue or\n when a message has been sent with a delay parameter.

    \n
  • \n
  • \n

    \n ApproximateNumberOfMessagesNotVisible – Returns the approximate\n number of messages that are in flight. Messages are considered to be\n in flight if they have been sent to a client but have\n not yet been deleted or have not yet reached the end of their visibility window.\n

    \n
  • \n
  • \n

    \n CreatedTimestamp – Returns the time when the queue was created in\n seconds (epoch\n time).

    \n
  • \n
  • \n

    \n DelaySeconds – Returns the default delay on the queue in\n seconds.

    \n
  • \n
  • \n

    \n LastModifiedTimestamp – Returns the time when the queue was last\n changed in seconds (epoch\n time).

    \n
  • \n
  • \n

    \n MaximumMessageSize – Returns the limit of how many bytes a\n message can contain before Amazon SQS rejects it.

    \n
  • \n
  • \n

    \n MessageRetentionPeriod – Returns the length of time, in seconds,\n for which Amazon SQS retains a message. When you change a queue's attributes,\n the change can take up to 60 seconds for most of the attributes to propagate\n throughout the Amazon SQS system. Changes made to the\n MessageRetentionPeriod attribute can take up to 15 minutes and\n will impact existing messages in the queue potentially causing them to be\n expired and deleted if the MessageRetentionPeriod is reduced below\n the age of existing messages.

    \n
  • \n
  • \n

    \n Policy – Returns the policy of the queue.

    \n
  • \n
  • \n

    \n QueueArn – Returns the Amazon resource name (ARN) of the\n queue.

    \n
  • \n
  • \n

    \n ReceiveMessageWaitTimeSeconds – Returns the length of time, in\n seconds, for which the ReceiveMessage action waits for a message to\n arrive.

    \n
  • \n
  • \n

    \n VisibilityTimeout – Returns the visibility timeout for the queue.\n For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer\n Guide.

    \n
  • \n
\n

The following attributes apply only to dead-letter queues:\n

\n
    \n
  • \n

    \n RedrivePolicy – The string that includes the parameters for the\n dead-letter queue functionality of the source queue as a JSON object. The\n parameters are as follows:

    \n
      \n
    • \n

      \n deadLetterTargetArn – The Amazon Resource Name (ARN) of\n the dead-letter queue to which Amazon SQS moves messages after the value\n of maxReceiveCount is exceeded.

      \n
    • \n
    • \n

      \n maxReceiveCount – The number of times a message is\n delivered to the source queue before being moved to the dead-letter\n queue. Default: 10. When the ReceiveCount for a message\n exceeds the maxReceiveCount for a queue, Amazon SQS moves\n the message to the dead-letter-queue.

      \n
    • \n
    \n
  • \n
  • \n

    \n RedriveAllowPolicy – The string that includes the parameters for\n the permissions for the dead-letter queue redrive permission and which source\n queues can specify dead-letter queues as a JSON object. The parameters are as\n follows:

    \n
      \n
    • \n

      \n redrivePermission – The permission type that defines\n which source queues can specify the current queue as the dead-letter\n queue. Valid values are:

      \n
        \n
      • \n

        \n allowAll – (Default) Any source queues in this\n Amazon Web Services account in the same\n Region can specify this queue as the dead-letter queue.

        \n
      • \n
      • \n

        \n denyAll – No source queues can specify this queue\n as the dead-letter queue.

        \n
      • \n
      • \n

        \n byQueue – Only queues specified by the\n sourceQueueArns parameter can specify this\n queue as the dead-letter queue.

        \n
      • \n
      \n
    • \n
    • \n

      \n sourceQueueArns – The Amazon Resource Names (ARN)s of the\n source queues that can specify this queue as the dead-letter queue and\n redrive messages. You can specify this parameter only when the\n redrivePermission parameter is set to\n byQueue. You can specify up to 10 source queue ARNs. To\n allow more than 10 source queues to specify dead-letter queues, set the\n redrivePermission parameter to\n allowAll.

      \n
    • \n
    \n
  • \n
\n \n

The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the\n dead-letter queue of a standard queue must also be a standard queue.

\n
\n

The following attributes apply only to server-side-encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – Returns the ID of an Amazon Web Services\n managed customer master key (CMK) for Amazon SQS or a custom CMK. For more\n information, see Key Terms.

    \n
  • \n
  • \n

    \n KmsDataKeyReusePeriodSeconds – Returns the length of time, in\n seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt\n messages before calling KMS again. For more information, see How Does the Data Key Reuse Period Work?.

    \n
  • \n
  • \n

    \n SqsManagedSseEnabled – Returns information about whether the queue\n is using SSE-SQS encryption using SQS owned encryption keys. Only one\n server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS).

    \n
  • \n
\n

The following attributes apply only to FIFO\n (first-in-first-out) queues:

\n
    \n
  • \n

    \n FifoQueue – Returns information about whether the queue is FIFO.\n For more information, see FIFO queue logic in the Amazon SQS Developer\n Guide.

    \n \n

    To determine whether a queue is FIFO, you can check whether QueueName ends with\n the .fifo suffix.

    \n
    \n
  • \n
  • \n

    \n ContentBasedDeduplication – Returns whether content-based\n deduplication is enabled for the queue. For more information, see Exactly-once processing in the Amazon SQS Developer\n Guide.

    \n
  • \n
\n

The following attributes apply only to high\n throughput for FIFO queues:

\n
    \n
  • \n

    \n DeduplicationScope – Specifies whether message deduplication\n occurs at the message group or queue level. Valid values are\n messageGroup and queue.

    \n
  • \n
  • \n

    \n FifoThroughputLimit – Specifies whether the FIFO queue throughput\n quota applies to the entire queue or per message group. Valid values are\n perQueue and perMessageGroupId. The\n perMessageGroupId value is allowed only when the value for\n DeduplicationScope is messageGroup.

    \n
  • \n
\n

To enable high throughput for FIFO queues, do the following:

\n
    \n
  • \n

    Set DeduplicationScope to messageGroup.

    \n
  • \n
  • \n

    Set FifoThroughputLimit to perMessageGroupId.

    \n
  • \n
\n

If you set these attributes to anything other than the values shown for enabling high\n throughput, normal throughput is in effect and deduplication occurs as specified.

\n

For information on throughput quotas, see Quotas\n related to messages in the Amazon SQS Developer\n Guide.

", + "smithy.api#documentation": "

A list of attributes for which to retrieve information.

\n

The AttributeNames parameter is optional, but if you don't specify values\n for this parameter, the request returns empty results.

\n \n

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

\n
\n

The following attributes are supported:

\n \n

The ApproximateNumberOfMessagesDelayed,\n ApproximateNumberOfMessagesNotVisible, and\n ApproximateNumberOfMessages metrics may not achieve consistency\n until at least 1 minute after the producers stop sending messages. This period is\n required for the queue metadata to reach eventual consistency.

\n
\n
    \n
  • \n

    \n All – Returns all values.

    \n
  • \n
  • \n

    \n ApproximateNumberOfMessages – Returns the approximate\n number of messages available for retrieval from the queue.

    \n
  • \n
  • \n

    \n ApproximateNumberOfMessagesDelayed – Returns the\n approximate number of messages in the queue that are delayed and not available\n for reading immediately. This can happen when the queue is configured as a delay\n queue or when a message has been sent with a delay parameter.

    \n
  • \n
  • \n

    \n ApproximateNumberOfMessagesNotVisible – Returns the\n approximate number of messages that are in flight. Messages are considered to be\n in flight if they have been sent to a client but have\n not yet been deleted or have not yet reached the end of their visibility window.\n

    \n
  • \n
  • \n

    \n CreatedTimestamp – Returns the time when the queue was\n created in seconds (epoch\n time).

    \n
  • \n
  • \n

    \n DelaySeconds – Returns the default delay on the queue in\n seconds.

    \n
  • \n
  • \n

    \n LastModifiedTimestamp – Returns the time when the queue\n was last changed in seconds (epoch time).

    \n
  • \n
  • \n

    \n MaximumMessageSize – Returns the limit of how many bytes a\n message can contain before Amazon SQS rejects it.

    \n
  • \n
  • \n

    \n MessageRetentionPeriod – Returns the length of time, in\n seconds, for which Amazon SQS retains a message. When you change a queue's\n attributes, the change can take up to 60 seconds for most of the attributes to\n propagate throughout the Amazon SQS system. Changes made to the\n MessageRetentionPeriod attribute can take up to 15 minutes and\n will impact existing messages in the queue potentially causing them to be\n expired and deleted if the MessageRetentionPeriod is reduced below\n the age of existing messages.

    \n
  • \n
  • \n

    \n Policy – Returns the policy of the queue.

    \n
  • \n
  • \n

    \n QueueArn – Returns the Amazon resource name (ARN) of the\n queue.

    \n
  • \n
  • \n

    \n ReceiveMessageWaitTimeSeconds – Returns the length of\n time, in seconds, for which the ReceiveMessage action waits for a\n message to arrive.

    \n
  • \n
  • \n

    \n VisibilityTimeout – Returns the visibility timeout for the\n queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer\n Guide.

    \n
  • \n
\n

The following attributes apply only to dead-letter queues:\n

\n
    \n
  • \n

    \n RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality \n of the source queue as a JSON object. The parameters are as follows:

    \n
      \n
    • \n

      \n deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to \n which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

      \n
    • \n
    • \n

      \n maxReceiveCount – The number of times a message is delivered to the source queue before being \n moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount \n for a queue, Amazon SQS moves the message to the dead-letter-queue.

      \n
    • \n
    \n
  • \n
  • \n

    \n RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter\n queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:

    \n
      \n
    • \n

      \n redrivePermission – The permission type that defines which source queues can \n specify the current queue as the dead-letter queue. Valid values are:

      \n
        \n
      • \n

        \n allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can \n specify this queue as the dead-letter queue.

        \n
      • \n
      • \n

        \n denyAll – No source queues can specify this queue as the dead-letter\n queue.

        \n
      • \n
      • \n

        \n byQueue – Only queues specified by the sourceQueueArns parameter can specify \n this queue as the dead-letter queue.

        \n
      • \n
      \n
    • \n
    • \n

      \n sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify \n this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the \n redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. \n To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter\n to allowAll.

      \n
    • \n
    \n
  • \n
\n \n

The dead-letter queue of a \n FIFO queue must also be a FIFO queue. Similarly, the dead-letter \n queue of a standard queue must also be a standard queue.

\n
\n

The following attributes apply only to server-side-encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – Returns the ID of an Amazon Web Services managed customer\n master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms.

    \n
  • \n
  • \n

    \n KmsDataKeyReusePeriodSeconds – Returns the length of time,\n in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt\n messages before calling KMS again. For more information, see\n How Does the Data Key Reuse Period Work?.

    \n
  • \n
  • \n

    \n SqsManagedSseEnabled – Returns information about whether the\n queue is using SSE-SQS encryption using SQS owned encryption keys. Only one\n server-side encryption option is supported per queue (for example, SSE-KMS or SSE-SQS).

    \n
  • \n
\n

The following attributes apply only to FIFO (first-in-first-out)\n queues:

\n
    \n
  • \n

    \n FifoQueue – Returns information about whether the queue is\n FIFO. For more information, see FIFO queue logic in the Amazon SQS Developer\n Guide.

    \n \n

    To determine whether a queue is FIFO, you can check whether QueueName ends with the .fifo suffix.

    \n
    \n
  • \n
  • \n

    \n ContentBasedDeduplication – Returns whether content-based\n deduplication is enabled for the queue. For more information, see Exactly-once processing in the Amazon SQS Developer\n Guide.

    \n
  • \n
\n

The following attributes apply only to \nhigh throughput\nfor FIFO queues:

\n
    \n
  • \n

    \n DeduplicationScope – Specifies whether message deduplication occurs at the \n message group or queue level. Valid values are messageGroup and queue.

    \n
  • \n
  • \n

    \n FifoThroughputLimit – Specifies whether the FIFO queue throughput \n quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. \n The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.

    \n
  • \n
\n

To enable high throughput for FIFO queues, do the following:

\n
    \n
  • \n

    Set DeduplicationScope to messageGroup.

    \n
  • \n
  • \n

    Set FifoThroughputLimit to perMessageGroupId.

    \n
  • \n
\n

If you set these attributes to anything other than the values shown for enabling high\n throughput, normal throughput is in effect and deduplication occurs as specified.

\n

For information on throughput quotas, \n see Quotas related to messages \n in the Amazon SQS Developer Guide.

", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "AttributeName" } @@ -2032,7 +2032,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the URL of an existing Amazon SQS queue.

\n

To access a queue that belongs to another AWS account, use the\n QueueOwnerAWSAccountId parameter to specify the account ID of the\n queue's owner. The queue's owner must grant you permission to access the queue. For more\n information about shared queue access, see \n AddPermission\n \n or see Allow Developers to Write Messages to a Shared Queue in the\n Amazon SQS Developer Guide.

" + "smithy.api#documentation": "

Returns the URL of an existing Amazon SQS queue.

\n

To access a queue that belongs to another AWS account, use the\n QueueOwnerAWSAccountId parameter to specify the account ID of the\n queue's owner. The queue's owner must grant you permission to access the queue. For more\n information about shared queue access, see \n AddPermission\n \n or see Allow Developers to Write Messages to a Shared Queue in the Amazon SQS\n Developer Guide.

" } }, "com.amazonaws.sqs#GetQueueUrlRequest": { @@ -2048,7 +2048,7 @@ "QueueOwnerAWSAccountId": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

The Amazon Web\n Services account ID of the account that created the queue.

" + "smithy.api#documentation": "

The Amazon Web Services account ID of the account that created the queue.

" } } }, @@ -2068,7 +2068,7 @@ } }, "traits": { - "smithy.api#documentation": "

For more information, see Interpreting Responses in the Amazon SQS Developer\n Guide.

", + "smithy.api#documentation": "

For more information, see Interpreting Responses in the Amazon SQS Developer\n Guide.

", "smithy.api#output": {} } }, @@ -2402,7 +2402,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets the most recent message movement tasks (up to 10) under a specific source\n queue.

\n \n
    \n
  • \n

    This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source\n queue is the dead-letter queue (DLQ), while the destination queue can be the\n original source queue (from which the messages were driven to the\n dead-letter-queue), or a custom destination queue.

    \n
  • \n
  • \n

    Currently, only standard queues are supported.

    \n
  • \n
  • \n

    Only one active message movement task is supported per queue at any given\n time.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Gets the most recent message movement tasks (up to 10) under a specific source\n queue.

\n \n
    \n
  • \n

    This action is currently limited to supporting message redrive from dead-letter queues (DLQs) only. In this context, the source\n queue is the dead-letter queue (DLQ), while the destination queue can be the\n original source queue (from which the messages were driven to the\n dead-letter-queue), or a custom destination queue.

    \n
  • \n
  • \n

    Only one active message movement task is supported per queue at any given\n time.

    \n
  • \n
\n
" } }, "com.amazonaws.sqs#ListMessageMoveTasksRequest": { @@ -2486,7 +2486,7 @@ "ApproximateNumberOfMessagesToMove": { "target": "com.amazonaws.sqs#NullableLong", "traits": { - "smithy.api#documentation": "

The number of messages to be moved from the source queue. This number is obtained at\n the time of starting the message movement task.

" + "smithy.api#documentation": "

The number of messages to be moved from the source queue. This number is obtained at\n the time of starting the message movement task and is only included after the message\n movement task is selected to start.

" } }, "FailureReason": { @@ -2539,7 +2539,7 @@ } ], "traits": { - "smithy.api#documentation": "

List all cost allocation tags added to the specified Amazon SQS queue. For an\n overview, see Tagging\n Your Amazon SQS Queues in the Amazon SQS Developer\n Guide.

\n \n

Cross-account permissions don't apply to this action. For more information, see\n Grant cross-account permissions to a role and a username in the\n Amazon SQS Developer Guide.

\n
" + "smithy.api#documentation": "

List all cost allocation tags added to the specified Amazon SQS queue.\n For an overview, see Tagging \nYour Amazon SQS Queues in the Amazon SQS Developer Guide.

\n \n

Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

\n
" } }, "com.amazonaws.sqs#ListQueueTagsRequest": { @@ -2596,7 +2596,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of your queues in the current region. The response includes a maximum\n of 1,000 results. If you specify a value for the optional QueueNamePrefix\n parameter, only queues with a name that begins with the specified value are\n returned.

\n

The listQueues methods supports pagination. Set parameter\n MaxResults in the request to specify the maximum number of results to\n be returned in the response. If you do not set MaxResults, the response\n includes a maximum of 1,000 results. If you set MaxResults and there are\n additional results to display, the response includes a value for NextToken.\n Use NextToken as a parameter in your next request to\n listQueues to receive the next page of results.

\n \n

Cross-account permissions don't apply to this action. For more information, see\n Grant cross-account permissions to a role and a username in the\n Amazon SQS Developer Guide.

\n
", + "smithy.api#documentation": "

Returns a list of your queues in the current region. The response includes a maximum\n of 1,000 results. If you specify a value for the optional QueueNamePrefix\n parameter, only queues with a name that begins with the specified value are\n returned.

\n

The listQueues methods supports pagination. Set parameter\n MaxResults in the request to specify the maximum number of results to\n be returned in the response. If you do not set MaxResults, the response\n includes a maximum of 1,000 results. If you set MaxResults and there are\n additional results to display, the response includes a value for NextToken.\n Use NextToken as a parameter in your next request to\n listQueues to receive the next page of results.

\n \n

Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -2699,13 +2699,13 @@ "MD5OfMessageAttributes": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message attribute string. You can use this\n attribute to verify that Amazon SQS received the message correctly. Amazon SQS\n URL-decodes the message before creating the MD5 digest. For information about MD5, see\n RFC1321.

" + "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

" } }, "MessageAttributes": { "target": "com.amazonaws.sqs#MessageBodyAttributeMap", "traits": { - "smithy.api#documentation": "

Each message attribute consists of a Name, Type, and\n Value. For more information, see Amazon SQS message attributes in the Amazon SQS Developer\n Guide.

", + "smithy.api#documentation": "

Each message attribute consists of a Name, Type, \nand Value. For more information, see \nAmazon SQS \nmessage attributes in the Amazon SQS Developer Guide.

", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "MessageAttribute" } @@ -2758,7 +2758,7 @@ "DataType": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

Amazon SQS supports the following logical data types: String,\n Number, and Binary. For the Number data type,\n you must use StringValue.

\n

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon SQS Developer\n Guide.

", + "smithy.api#documentation": "

Amazon SQS supports the following logical data types: String,\n Number, and Binary. For the Number data type,\n you must use StringValue.

\n

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon SQS Developer\n Guide.

", "smithy.api#required": {} } } @@ -2816,6 +2816,12 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.sqs#MessageSystemAttributeList": { + "type": "list", + "member": { + "target": "com.amazonaws.sqs#MessageSystemAttributeName" + } + }, "com.amazonaws.sqs#MessageSystemAttributeMap": { "type": "map", "key": { @@ -2834,6 +2840,12 @@ "com.amazonaws.sqs#MessageSystemAttributeName": { "type": "enum", "members": { + "All": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "All" + } + }, "SenderId": { "target": "smithy.api#Unit", "traits": { @@ -2935,7 +2947,7 @@ "DataType": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

Amazon SQS supports the following logical data types: String,\n Number, and Binary. For the Number data type,\n you must use StringValue.

\n

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon SQS Developer\n Guide.

", + "smithy.api#documentation": "

Amazon SQS supports the following logical data types: String,\n Number, and Binary. For the Number data type,\n you must use StringValue.

\n

You can also append custom labels. For more information, see Amazon SQS Message Attributes in the Amazon SQS Developer\n Guide.

", "smithy.api#required": {} } } @@ -3230,7 +3242,7 @@ "code": "QueueAlreadyExists", "httpResponseCode": 400 }, - "smithy.api#documentation": "

A queue with this name already exists. Amazon SQS returns this error only if the\n request includes attributes whose values differ from those of the existing queue.

", + "smithy.api#documentation": "

A queue with this name already exists. Amazon SQS returns this error only if the request\n includes attributes whose values differ from those of the existing queue.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -3308,7 +3320,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves one or more messages (up to 10), from the specified queue. Using the\n WaitTimeSeconds parameter enables long-poll support. For more\n information, see Amazon\n SQS Long Polling in the Amazon SQS Developer Guide.

\n

Short poll is the default behavior where a weighted random set of machines is sampled\n on a ReceiveMessage call. Thus, only the messages on the sampled machines\n are returned. If the number of messages in the queue is small (fewer than 1,000), you\n most likely get fewer messages than you requested per ReceiveMessage call.\n If the number of messages in the queue is extremely small, you might not receive any\n messages in a particular ReceiveMessage response. If this happens, repeat\n the request.

\n

For each message returned, the response includes the following:

\n
    \n
  • \n

    The message body.

    \n
  • \n
  • \n

    An MD5 digest of the message body. For information about MD5, see RFC1321.

    \n
  • \n
  • \n

    The MessageId you received when you sent the message to the\n queue.

    \n
  • \n
  • \n

    The receipt handle.

    \n
  • \n
  • \n

    The message attributes.

    \n
  • \n
  • \n

    An MD5 digest of the message attributes.

    \n
  • \n
\n

The receipt handle is the identifier you must provide when deleting the message. For\n more information, see Queue and Message Identifiers in the Amazon SQS Developer\n Guide.

\n

You can provide the VisibilityTimeout parameter in your request. The\n parameter is applied to the messages that Amazon SQS returns in the response. If you\n don't include the parameter, the overall visibility timeout for the queue is used for\n the returned messages. For more information, see Visibility Timeout in the Amazon SQS Developer\n Guide.

\n

A message that isn't deleted or a message whose visibility isn't extended before the\n visibility timeout expires counts as a failed receive. Depending on the configuration of\n the queue, the message might be sent to the dead-letter queue.

\n \n

In the future, new attributes might be added. If you write code that calls this\n action, we recommend that you structure your code so that it can handle new\n attributes gracefully.

\n
" + "smithy.api#documentation": "

Retrieves one or more messages (up to 10), from the specified queue. Using the\n WaitTimeSeconds parameter enables long-poll support. For more\n information, see Amazon SQS\n Long Polling in the Amazon SQS Developer Guide.

\n

Short poll is the default behavior where a weighted random set of machines is sampled\n on a ReceiveMessage call. Thus, only the messages on the sampled machines\n are returned. If the number of messages in the queue is small (fewer than 1,000), you\n most likely get fewer messages than you requested per ReceiveMessage call.\n If the number of messages in the queue is extremely small, you might not receive any\n messages in a particular ReceiveMessage response. If this happens, repeat\n the request.

\n

For each message returned, the response includes the following:

\n
    \n
  • \n

    The message body.

    \n
  • \n
  • \n

    An MD5 digest of the message body. For information about MD5, see RFC1321.

    \n
  • \n
  • \n

    The MessageId you received when you sent the message to the\n queue.

    \n
  • \n
  • \n

    The receipt handle.

    \n
  • \n
  • \n

    The message attributes.

    \n
  • \n
  • \n

    An MD5 digest of the message attributes.

    \n
  • \n
\n

The receipt handle is the identifier you must provide when deleting the message. For\n more information, see Queue and Message Identifiers in the Amazon SQS Developer\n Guide.

\n

You can provide the VisibilityTimeout parameter in your request. The\n parameter is applied to the messages that Amazon SQS returns in the response. If you don't\n include the parameter, the overall visibility timeout for the queue is used for the\n returned messages. For more information, see Visibility Timeout in the Amazon SQS Developer\n Guide.

\n

A message that isn't deleted or a message whose visibility isn't extended before the\n visibility timeout expires counts as a failed receive. Depending on the configuration of\n the queue, the message might be sent to the dead-letter queue.

\n \n

In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

\n
" } }, "com.amazonaws.sqs#ReceiveMessageRequest": { @@ -3324,7 +3336,18 @@ "AttributeNames": { "target": "com.amazonaws.sqs#AttributeNameList", "traits": { - "smithy.api#documentation": "

A list of attributes that need to be returned along with each message. These\n attributes include:

\n
    \n
  • \n

    \n All – Returns all values.

    \n
  • \n
  • \n

    \n ApproximateFirstReceiveTimestamp – Returns the time the message was\n first received from the queue (epoch time in\n milliseconds).

    \n
  • \n
  • \n

    \n ApproximateReceiveCount – Returns the number of times a message has\n been received across all queues but not deleted.

    \n
  • \n
  • \n

    \n AWSTraceHeader – Returns the X-Ray trace header\n string.

    \n
  • \n
  • \n

    \n SenderId\n

    \n
      \n
    • \n

      For a user, returns the user ID, for example\n ABCDEFGHI1JKLMNOPQ23R.

      \n
    • \n
    • \n

      For an IAM role, returns the IAM role ID, for example\n ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

      \n
    • \n
    \n
  • \n
  • \n

    \n SentTimestamp – Returns the time the message was sent to the queue\n (epoch time in\n milliseconds).

    \n
  • \n
  • \n

    \n SqsManagedSseEnabled – Enables server-side queue encryption using\n SQS owned encryption keys. Only one server-side encryption option is supported\n per queue (for example, SSE-KMS or SSE-SQS).

    \n
  • \n
  • \n

    \n MessageDeduplicationId – Returns the value provided by the\n producer that calls the \n SendMessage\n \n action.

    \n
  • \n
  • \n

    \n MessageGroupId – Returns the value provided by the producer that\n calls the \n SendMessage\n action. Messages with the\n same MessageGroupId are returned in sequence.

    \n
  • \n
  • \n

    \n SequenceNumber – Returns the value provided by Amazon SQS.

    \n
  • \n
", + "smithy.api#deprecated": { + "message": "AttributeNames has been replaced by MessageSystemAttributeNames" + }, + "smithy.api#documentation": "\n

This parameter has been deprecated but will be supported for backward\n compatibility. To provide attribute names, you are encouraged to use\n MessageSystemAttributeNames.

\n
\n

A list of attributes that need to be returned along with each message. These\n attributes include:

\n
    \n
  • \n

    \n All – Returns all values.

    \n
  • \n
  • \n

    \n ApproximateFirstReceiveTimestamp – Returns the time the\n message was first received from the queue (epoch time in\n milliseconds).

    \n
  • \n
  • \n

    \n ApproximateReceiveCount – Returns the number of times a\n message has been received across all queues but not deleted.

    \n
  • \n
  • \n

    \n AWSTraceHeader – Returns the X-Ray trace\n header string.

    \n
  • \n
  • \n

    \n SenderId\n

    \n
      \n
    • \n

      For a user, returns the user ID, for example\n ABCDEFGHI1JKLMNOPQ23R.

      \n
    • \n
    • \n

      For an IAM role, returns the IAM role ID, for example\n ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

      \n
    • \n
    \n
  • \n
  • \n

    \n SentTimestamp – Returns the time the message was sent to the\n queue (epoch time in\n milliseconds).

    \n
  • \n
  • \n

    \n SqsManagedSseEnabled – Enables server-side queue encryption\n using SQS owned encryption keys. Only one server-side encryption option is\n supported per queue (for example, SSE-KMS or SSE-SQS).

    \n
  • \n
  • \n

    \n MessageDeduplicationId – Returns the value provided by the\n producer that calls the \n SendMessage\n \n action.

    \n
  • \n
  • \n

    \n MessageGroupId – Returns the value provided by the\n producer that calls the \n SendMessage\n action.\n Messages with the same MessageGroupId are returned in\n sequence.

    \n
  • \n
  • \n

    \n SequenceNumber – Returns the value provided by\n Amazon SQS.

    \n
  • \n
", + "smithy.api#xmlFlattened": {}, + "smithy.api#xmlName": "AttributeName" + } + }, + "MessageSystemAttributeNames": { + "target": "com.amazonaws.sqs#MessageSystemAttributeList", + "traits": { + "smithy.api#documentation": "

A list of attributes that need to be returned along with each message. These\n attributes include:

\n
    \n
  • \n

    \n All – Returns all values.

    \n
  • \n
  • \n

    \n ApproximateFirstReceiveTimestamp – Returns the time the\n message was first received from the queue (epoch time in\n milliseconds).

    \n
  • \n
  • \n

    \n ApproximateReceiveCount – Returns the number of times a\n message has been received across all queues but not deleted.

    \n
  • \n
  • \n

    \n AWSTraceHeader – Returns the X-Ray trace\n header string.

    \n
  • \n
  • \n

    \n SenderId\n

    \n
      \n
    • \n

      For a user, returns the user ID, for example\n ABCDEFGHI1JKLMNOPQ23R.

      \n
    • \n
    • \n

      For an IAM role, returns the IAM role ID, for example\n ABCDE1F2GH3I4JK5LMNOP:i-a123b456.

      \n
    • \n
    \n
  • \n
  • \n

    \n SentTimestamp – Returns the time the message was sent to the\n queue (epoch time in\n milliseconds).

    \n
  • \n
  • \n

    \n SqsManagedSseEnabled – Enables server-side queue encryption\n using SQS owned encryption keys. Only one server-side encryption option is\n supported per queue (for example, SSE-KMS or SSE-SQS).

    \n
  • \n
  • \n

    \n MessageDeduplicationId – Returns the value provided by the\n producer that calls the \n SendMessage\n \n action.

    \n
  • \n
  • \n

    \n MessageGroupId – Returns the value provided by the\n producer that calls the \n SendMessage\n action.\n Messages with the same MessageGroupId are returned in\n sequence.

    \n
  • \n
  • \n

    \n SequenceNumber – Returns the value provided by\n Amazon SQS.

    \n
  • \n
", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "AttributeName" } @@ -3340,7 +3363,7 @@ "MaxNumberOfMessages": { "target": "com.amazonaws.sqs#NullableInteger", "traits": { - "smithy.api#documentation": "

The maximum number of messages to return. Amazon SQS never returns more messages than\n this value (however, fewer messages might be returned). Valid values: 1 to 10. Default:\n 1.

" + "smithy.api#documentation": "

The maximum number of messages to return. Amazon SQS never returns more messages than this\n value (however, fewer messages might be returned). Valid values: 1 to 10. Default:\n 1.

" } }, "VisibilityTimeout": { @@ -3352,13 +3375,13 @@ "WaitTimeSeconds": { "target": "com.amazonaws.sqs#NullableInteger", "traits": { - "smithy.api#documentation": "

The duration (in seconds) for which the call waits for a message to arrive in the\n queue before returning. If a message is available, the call returns sooner than\n WaitTimeSeconds. If no messages are available and the wait time\n expires, the call returns successfully with an empty list of messages.

\n \n

To avoid HTTP errors, ensure that the HTTP response timeout for\n ReceiveMessage requests is longer than the\n WaitTimeSeconds parameter. For example, with the Java SDK, you can\n set HTTP transport settings using the NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous clients.

\n
" + "smithy.api#documentation": "

The duration (in seconds) for which the call waits for a message to arrive in the\n queue before returning. If a message is available, the call returns sooner than\n WaitTimeSeconds. If no messages are available and the wait time\n expires, the call does not return a message list.

\n \n

To avoid HTTP errors, ensure that the HTTP response timeout for\n ReceiveMessage requests is longer than the\n WaitTimeSeconds parameter. For example, with the Java SDK, you can\n set HTTP transport settings using the NettyNioAsyncHttpClient for asynchronous clients, or the ApacheHttpClient for synchronous clients.

\n
" } }, "ReceiveRequestAttemptId": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) queues.

\n

The token used for deduplication of ReceiveMessage calls. If a networking\n issue occurs after a ReceiveMessage action, and instead of a response you\n receive a generic error, it is possible to retry the same action with an identical\n ReceiveRequestAttemptId to retrieve the same set of messages, even if\n their visibility timeout has not yet expired.

\n
    \n
  • \n

    You can use ReceiveRequestAttemptId only for 5 minutes after a\n ReceiveMessage action.

    \n
  • \n
  • \n

    When you set FifoQueue, a caller of the\n ReceiveMessage action can provide a\n ReceiveRequestAttemptId explicitly.

    \n
  • \n
  • \n

    If a caller of the ReceiveMessage action doesn't provide a\n ReceiveRequestAttemptId, Amazon SQS generates a\n ReceiveRequestAttemptId.

    \n
  • \n
  • \n

    It is possible to retry the ReceiveMessage action with the same\n ReceiveRequestAttemptId if none of the messages have been\n modified (deleted or had their visibility changes).

    \n
  • \n
  • \n

    During a visibility timeout, subsequent calls with the same\n ReceiveRequestAttemptId return the same messages and receipt\n handles. If a retry occurs within the deduplication interval, it resets the\n visibility timeout. For more information, see Visibility Timeout in the Amazon SQS Developer\n Guide.

    \n \n

    If a caller of the ReceiveMessage action still processes\n messages when the visibility timeout expires and messages become visible,\n another worker consuming from the same queue can receive the same messages\n and therefore process duplicates. Also, if a consumer whose message\n processing time is longer than the visibility timeout tries to delete the\n processed messages, the action fails with an error.

    \n

    To mitigate this effect, ensure that your application observes a safe\n threshold before the visibility timeout expires and extend the visibility\n timeout as necessary.

    \n
    \n
  • \n
  • \n

    While messages with a particular MessageGroupId are invisible, no\n more messages belonging to the same MessageGroupId are returned\n until the visibility timeout expires. You can still receive messages with\n another MessageGroupId as long as it is also visible.

    \n
  • \n
  • \n

    If a caller of ReceiveMessage can't track the\n ReceiveRequestAttemptId, no retries work until the original\n visibility timeout expires. As a result, delays might occur but the messages in\n the queue remain in a strict order.

    \n
  • \n
\n

The maximum length of ReceiveRequestAttemptId is 128 characters.\n ReceiveRequestAttemptId can contain alphanumeric characters\n (a-z, A-Z, 0-9) and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

\n

For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon\n SQS Developer Guide.

" + "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) queues.

\n

The token used for deduplication of ReceiveMessage calls. If a networking\n issue occurs after a ReceiveMessage action, and instead of a response you\n receive a generic error, it is possible to retry the same action with an identical\n ReceiveRequestAttemptId to retrieve the same set of messages, even if\n their visibility timeout has not yet expired.

\n
    \n
  • \n

    You can use ReceiveRequestAttemptId only for 5 minutes after a\n ReceiveMessage action.

    \n
  • \n
  • \n

    When you set FifoQueue, a caller of the\n ReceiveMessage action can provide a\n ReceiveRequestAttemptId explicitly.

    \n
  • \n
  • \n

    It is possible to retry the ReceiveMessage action with the same\n ReceiveRequestAttemptId if none of the messages have been\n modified (deleted or had their visibility changes).

    \n
  • \n
  • \n

    During a visibility timeout, subsequent calls with the same\n ReceiveRequestAttemptId return the same messages and receipt\n handles. If a retry occurs within the deduplication interval, it resets the\n visibility timeout. For more information, see Visibility Timeout in the Amazon SQS Developer\n Guide.

    \n \n

    If a caller of the ReceiveMessage action still processes\n messages when the visibility timeout expires and messages become visible,\n another worker consuming from the same queue can receive the same messages\n and therefore process duplicates. Also, if a consumer whose message\n processing time is longer than the visibility timeout tries to delete the\n processed messages, the action fails with an error.

    \n

    To mitigate this effect, ensure that your application observes a safe\n threshold before the visibility timeout expires and extend the visibility\n timeout as necessary.

    \n
    \n
  • \n
  • \n

    While messages with a particular MessageGroupId are invisible, no\n more messages belonging to the same MessageGroupId are returned\n until the visibility timeout expires. You can still receive messages with\n another MessageGroupId as long as it is also visible.

    \n
  • \n
  • \n

    If a caller of ReceiveMessage can't track the\n ReceiveRequestAttemptId, no retries work until the original\n visibility timeout expires. As a result, delays might occur but the messages in\n the queue remain in a strict order.

    \n
  • \n
\n

The maximum length of ReceiveRequestAttemptId is 128 characters.\n ReceiveRequestAttemptId can contain alphanumeric characters\n (a-z, A-Z, 0-9) and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

\n

For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId Request Parameter in the Amazon SQS\n Developer Guide.

" } } }, @@ -3410,7 +3433,7 @@ } ], "traits": { - "smithy.api#documentation": "

Revokes any permissions in the queue policy that matches the specified\n Label parameter.

\n \n
    \n
  • \n

    Only the owner of a queue can remove permissions from it.

    \n
  • \n
  • \n

    Cross-account permissions don't apply to this action. For more\n information, see Grant cross-account permissions to a role and a username in the\n Amazon SQS Developer Guide.

    \n
  • \n
  • \n

    To remove the ability to change queue permissions, you must deny\n permission to the AddPermission, RemovePermission,\n and SetQueueAttributes actions in your IAM policy.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Revokes any permissions in the queue policy that matches the specified\n Label parameter.

\n \n
    \n
  • \n

    Only the owner of a queue can remove permissions from it.

    \n
  • \n
  • \n

    Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

    \n
  • \n
  • \n

    To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

    \n
  • \n
\n
" } }, "com.amazonaws.sqs#RemovePermissionRequest": { @@ -3448,7 +3471,7 @@ "code": "RequestThrottled", "httpResponseCode": 403 }, - "smithy.api#documentation": "

The request was denied due to request throttling.

\n
    \n
  • \n

    The rate of requests per second exceeds the Amazon Web Services KMS request quota for an\n account and Region.

    \n
  • \n
  • \n

    A burst or sustained high rate of requests to change the state of the same KMS\n key. This condition is often known as a \"hot key.\"

    \n
  • \n
  • \n

    Requests for operations on KMS keys in a Amazon Web Services CloudHSM key store\n might be throttled at a lower-than-expected rate when the Amazon Web Services\n CloudHSM cluster associated with the Amazon Web Services CloudHSM key store is\n processing numerous commands, including those unrelated to the Amazon Web Services CloudHSM key store.

    \n
  • \n
", + "smithy.api#documentation": "

The request was denied due to request throttling.

\n
    \n
  • \n

    The rate of requests per second exceeds the Amazon Web Services KMS request\n quota for an account and Region.

    \n
  • \n
  • \n

    A burst or sustained high rate of requests to change the state of the same KMS\n key. This condition is often known as a \"hot key.\"

    \n
  • \n
  • \n

    Requests for operations on KMS keys in a Amazon Web Services CloudHSM key store\n might be throttled at a lower-than-expected rate when the Amazon Web Services\n CloudHSM cluster associated with the Amazon Web Services CloudHSM key store is\n processing numerous commands, including those unrelated to the Amazon Web Services CloudHSM key store.

    \n
  • \n
", "smithy.api#error": "client", "smithy.api#httpError": 403 } @@ -3520,7 +3543,7 @@ } ], "traits": { - "smithy.api#documentation": "

Delivers a message to the specified queue.

\n \n

A message can include only XML, JSON, and unformatted text. The following Unicode\n characters are allowed:

\n

\n #x9 | #xA | #xD | #x20 to\n #xD7FF | #xE000 to #xFFFD |\n #x10000 to #x10FFFF\n

\n

Any characters not included in this list will be rejected. For more information,\n see the W3C specification for\n characters.

\n
" + "smithy.api#documentation": "

Delivers a message to the specified queue.

\n \n

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed. For more information, see the W3C specification for characters.

\n

\n #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF\n

\n

Amazon SQS does not throw an exception or completely reject the message if it contains invalid characters. Instead, it replaces those invalid characters with U+FFFD before storing the message in the queue, as long as the message body contains at least one valid character.

\n
" } }, "com.amazonaws.sqs#SendMessageBatch": { @@ -3585,7 +3608,7 @@ } ], "traits": { - "smithy.api#documentation": "

You can use SendMessageBatch to send up to 10 messages to the specified\n queue by assigning either identical or different values to each message (or by not\n assigning values at all). This is a batch version of \n SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued in\n the order they are sent.

\n

The result of sending each message is reported individually in the response. Because\n the batch request can result in a combination of successful and unsuccessful actions,\n you should check for batch errors even when the call returns an HTTP status code of\n 200.

\n

The maximum allowed individual message size and the maximum total payload size (the\n sum of the individual lengths of all of the batched messages) are both 256 KiB (262,144\n bytes).

\n \n

A message can include only XML, JSON, and unformatted text. The following Unicode\n characters are allowed:

\n

\n #x9 | #xA | #xD | #x20 to\n #xD7FF | #xE000 to #xFFFD |\n #x10000 to #x10FFFF\n

\n

Any characters not included in this list will be rejected. For more information,\n see the W3C specification for\n characters.

\n
\n

If you don't specify the DelaySeconds parameter for an entry, Amazon SQS\n uses the default value for the queue.

" + "smithy.api#documentation": "

You can use SendMessageBatch to send up to 10 messages to the specified\n queue by assigning either identical or different values to each message (or by not\n assigning values at all). This is a batch version of \n SendMessage. For a FIFO queue, multiple messages within a single batch are enqueued\n in the order they are sent.

\n

The result of sending each message is reported individually in the response.\n Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200.

\n

The maximum allowed individual message size and the maximum total payload size (the\n sum of the individual lengths of all of the batched messages) are both 256 KiB (262,144\n bytes).

\n \n

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed. For more information, see the W3C specification for characters.

\n

\n #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF\n

\n

Amazon SQS does not throw an exception or completely reject the message if it contains invalid characters. Instead, it replaces those invalid characters with U+FFFD before storing the message in the queue, as long as the message body contains at least one valid character.

\n
\n

If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses\n the default value for the queue.

" } }, "com.amazonaws.sqs#SendMessageBatchRequest": { @@ -3619,7 +3642,7 @@ "Id": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

An identifier for a message in this batch used to communicate the result.

\n \n

The Ids of a batch request need to be unique within a request.

\n

This identifier can have up to 80 characters. The following characters are\n accepted: alphanumeric characters, hyphens(-), and underscores (_).

\n
", + "smithy.api#documentation": "

An identifier for a message in this batch used to communicate the result.

\n \n

The Ids of a batch request need to be unique within a request.

\n

This identifier can have up to 80 characters. The following characters are accepted: alphanumeric characters, hyphens(-), and underscores (_).

\n
", "smithy.api#required": {} } }, @@ -3633,13 +3656,13 @@ "DelaySeconds": { "target": "com.amazonaws.sqs#NullableInteger", "traits": { - "smithy.api#documentation": "

The length of time, in seconds, for which a specific message is delayed. Valid values:\n 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds value\n become available for processing after the delay period is finished. If you don't specify\n a value, the default value for the queue is applied.

\n \n

When you set FifoQueue, you can't set DelaySeconds per\n message. You can set this parameter only on a queue level.

\n
" + "smithy.api#documentation": "

The length of time, in seconds, for which a specific message is delayed. Valid values:\n 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds value\n become available for processing after the delay period is finished. If you don't specify\n a value, the default value for the queue is applied.

\n \n

When you set FifoQueue, you can't set DelaySeconds per message. You can set this parameter only on a queue level.

\n
" } }, "MessageAttributes": { "target": "com.amazonaws.sqs#MessageBodyAttributeMap", "traits": { - "smithy.api#documentation": "

Each message attribute consists of a Name, Type, and\n Value. For more information, see Amazon SQS message attributes in the Amazon SQS Developer\n Guide.

", + "smithy.api#documentation": "

Each message attribute consists of a Name, Type, \nand Value. For more information, see \nAmazon SQS \nmessage attributes in the Amazon SQS Developer Guide.

", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "MessageAttribute" } @@ -3647,7 +3670,7 @@ "MessageSystemAttributes": { "target": "com.amazonaws.sqs#MessageBodySystemAttributeMap", "traits": { - "smithy.api#documentation": "

The message system attribute to send Each message system attribute consists of a\n Name, Type, and Value.

\n \n
    \n
  • \n

    Currently, the only supported message system attribute is\n AWSTraceHeader. Its type must be String and\n its value must be a correctly formatted X-Ray trace\n header string.

    \n
  • \n
  • \n

    The size of a message system attribute doesn't count towards the total\n size of a message.

    \n
  • \n
\n
", + "smithy.api#documentation": "

The message system attribute to send Each message system attribute consists of a Name, Type, and Value.

\n \n
    \n
  • \n

    Currently, the only supported message system attribute is AWSTraceHeader.\n Its type must be String and its value must be a correctly formatted\n X-Ray trace header string.

    \n
  • \n
  • \n

    The size of a message system attribute doesn't count towards the total size of a message.

    \n
  • \n
\n
", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "MessageSystemAttribute" } @@ -3655,18 +3678,18 @@ "MessageDeduplicationId": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) queues.

\n

The token used for deduplication of messages within a 5-minute minimum deduplication\n interval. If a message with a particular MessageDeduplicationId is sent\n successfully, subsequent messages with the same MessageDeduplicationId are\n accepted successfully but aren't delivered. For more information, see Exactly-once processing in the Amazon SQS Developer\n Guide.

\n
    \n
  • \n

    Every message must have a unique MessageDeduplicationId,

    \n
      \n
    • \n

      You may provide a MessageDeduplicationId\n explicitly.

      \n
    • \n
    • \n

      If you aren't able to provide a MessageDeduplicationId\n and you enable ContentBasedDeduplication for your queue,\n Amazon SQS uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the message\n (but not the attributes of the message).

      \n
    • \n
    • \n

      If you don't provide a MessageDeduplicationId and the\n queue doesn't have ContentBasedDeduplication set, the\n action fails with an error.

      \n
    • \n
    • \n

      If the queue has ContentBasedDeduplication set, your\n MessageDeduplicationId overrides the generated\n one.

      \n
    • \n
    \n
  • \n
  • \n

    When ContentBasedDeduplication is in effect, messages with\n identical content sent within the deduplication interval are treated as\n duplicates and only one copy of the message is delivered.

    \n
  • \n
  • \n

    If you send one message with ContentBasedDeduplication enabled\n and then another message with a MessageDeduplicationId that is the\n same as the one generated for the first MessageDeduplicationId, the\n two messages are treated as duplicates and only one copy of the message is\n delivered.

    \n
  • \n
\n \n

The MessageDeduplicationId is available to the consumer of the\n message (this can be useful for troubleshooting delivery issues).

\n

If a message is sent successfully but the acknowledgement is lost and the message\n is resent with the same MessageDeduplicationId after the deduplication\n interval, Amazon SQS can't detect duplicate messages.

\n

Amazon SQS continues to keep track of the message deduplication ID even after the\n message is received and deleted.

\n
\n

The length of MessageDeduplicationId is 128 characters.\n MessageDeduplicationId can contain alphanumeric characters\n (a-z, A-Z, 0-9) and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

\n

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon SQS\n Developer Guide.

" + "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) queues.

\n

The token used for deduplication of messages within a 5-minute minimum deduplication\n interval. If a message with a particular MessageDeduplicationId is sent\n successfully, subsequent messages with the same MessageDeduplicationId are\n accepted successfully but aren't delivered. For more information, see Exactly-once processing in the Amazon SQS Developer\n Guide.

\n
    \n
  • \n

    Every message must have a unique MessageDeduplicationId,

    \n
      \n
    • \n

      You may provide a MessageDeduplicationId\n explicitly.

      \n
    • \n
    • \n

      If you aren't able to provide a MessageDeduplicationId\n and you enable ContentBasedDeduplication for your queue,\n Amazon SQS uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the message\n (but not the attributes of the message).

      \n
    • \n
    • \n

      If you don't provide a MessageDeduplicationId and the\n queue doesn't have ContentBasedDeduplication set, the\n action fails with an error.

      \n
    • \n
    • \n

      If the queue has ContentBasedDeduplication set, your\n MessageDeduplicationId overrides the generated\n one.

      \n
    • \n
    \n
  • \n
  • \n

    When ContentBasedDeduplication is in effect, messages with\n identical content sent within the deduplication interval are treated as\n duplicates and only one copy of the message is delivered.

    \n
  • \n
  • \n

    If you send one message with ContentBasedDeduplication enabled\n and then another message with a MessageDeduplicationId that is the\n same as the one generated for the first MessageDeduplicationId, the\n two messages are treated as duplicates and only one copy of the message is\n delivered.

    \n
  • \n
\n \n

The MessageDeduplicationId is available to the consumer of the\n message (this can be useful for troubleshooting delivery issues).

\n

If a message is sent successfully but the acknowledgement is lost and the message\n is resent with the same MessageDeduplicationId after the deduplication\n interval, Amazon SQS can't detect duplicate messages.

\n

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

\n
\n

The length of MessageDeduplicationId is 128 characters.\n MessageDeduplicationId can contain alphanumeric characters\n (a-z, A-Z, 0-9) and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

\n

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon SQS Developer\n Guide.

" } }, "MessageGroupId": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) queues.

\n

The tag that specifies that a message belongs to a specific message group. Messages\n that belong to the same message group are processed in a FIFO manner (however, messages\n in different message groups might be processed out of order). To interleave multiple\n ordered streams within a single queue, use MessageGroupId values (for\n example, session data for multiple users). In this scenario, multiple consumers can\n process the queue, but the session data of each user is processed in a FIFO\n fashion.

\n
    \n
  • \n

    You must associate a non-empty MessageGroupId with a message. If\n you don't provide a MessageGroupId, the action fails.

    \n
  • \n
  • \n

    \n ReceiveMessage might return messages with multiple\n MessageGroupId values. For each MessageGroupId,\n the messages are sorted by time sent. The caller can't specify a\n MessageGroupId.

    \n
  • \n
\n

The length of MessageGroupId is 128 characters. Valid values:\n alphanumeric characters and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

\n

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon SQS Developer\n Guide.

\n \n

\n MessageGroupId is required for FIFO queues. You can't use it for\n Standard queues.

\n
" + "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) queues.

\n

The tag that specifies that a message belongs to a specific message group. Messages\n that belong to the same message group are processed in a FIFO manner (however,\n messages in different message groups might be processed out of order). To interleave\n multiple ordered streams within a single queue, use MessageGroupId values\n (for example, session data for multiple users). In this scenario, multiple consumers can\n process the queue, but the session data of each user is processed in a FIFO\n fashion.

\n
    \n
  • \n

    You must associate a non-empty MessageGroupId with a message. If\n you don't provide a MessageGroupId, the action fails.

    \n
  • \n
  • \n

    \n ReceiveMessage might return messages with multiple\n MessageGroupId values. For each MessageGroupId,\n the messages are sorted by time sent. The caller can't specify a\n MessageGroupId.

    \n
  • \n
\n

The length of MessageGroupId is 128 characters. Valid values:\n alphanumeric characters and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

\n

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon SQS Developer\n Guide.

\n \n

\n MessageGroupId is required for FIFO queues. You can't use it for\n Standard queues.

\n
" } } }, "traits": { - "smithy.api#documentation": "

Contains the details of a single Amazon SQS message along with an\n Id.

" + "smithy.api#documentation": "

Contains the details of a single Amazon SQS message along with an Id.

" } }, "com.amazonaws.sqs#SendMessageBatchRequestEntryList": { @@ -3722,20 +3745,20 @@ "MD5OfMessageBody": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message body string. You can use this attribute\n to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the\n message before creating the MD5 digest. For information about MD5, see RFC1321.

", + "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message body string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

", "smithy.api#required": {} } }, "MD5OfMessageAttributes": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message attribute string. You can use this\n attribute to verify that Amazon SQS received the message correctly. Amazon SQS\n URL-decodes the message before creating the MD5 digest. For information about MD5, see\n RFC1321.

" + "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

" } }, "MD5OfMessageSystemAttributes": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message system attribute string. You can use this\n attribute to verify that Amazon SQS received the message correctly. Amazon SQS\n URL-decodes the message before creating the MD5 digest. For information about MD5, see\n RFC1321.

" + "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message system attribute string. You can use this \nattribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

" } }, "SequenceNumber": { @@ -3768,20 +3791,20 @@ "MessageBody": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

The message to send. The minimum size is one character. The maximum size is 256\n KiB.

\n \n

A message can include only XML, JSON, and unformatted text. The following Unicode\n characters are allowed:

\n

\n #x9 | #xA | #xD | #x20 to\n #xD7FF | #xE000 to #xFFFD |\n #x10000 to #x10FFFF\n

\n

Any characters not included in this list will be rejected. For more information,\n see the W3C specification for\n characters.

\n
", + "smithy.api#documentation": "

The message to send. The minimum size is one character. The maximum size is 256\n KiB.

\n \n

A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed. For more information, see the W3C specification for characters.

\n

\n #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF\n

\n

Amazon SQS does not throw an exception or completely reject the message if it contains invalid characters. Instead, it replaces those invalid characters with U+FFFD before storing the message in the queue, as long as the message body contains at least one valid character.

\n
", "smithy.api#required": {} } }, "DelaySeconds": { "target": "com.amazonaws.sqs#NullableInteger", "traits": { - "smithy.api#documentation": "

The length of time, in seconds, for which to delay a specific message. Valid values:\n 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds value\n become available for processing after the delay period is finished. If you don't specify\n a value, the default value for the queue applies.

\n \n

When you set FifoQueue, you can't set DelaySeconds per\n message. You can set this parameter only on a queue level.

\n
" + "smithy.api#documentation": "

The length of time, in seconds, for which to delay a specific message. Valid values:\n 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds value\n become available for processing after the delay period is finished. If you don't specify\n a value, the default value for the queue applies.

\n \n

When you set FifoQueue, you can't set DelaySeconds per message. You can set this parameter only on a queue level.

\n
" } }, "MessageAttributes": { "target": "com.amazonaws.sqs#MessageBodyAttributeMap", "traits": { - "smithy.api#documentation": "

Each message attribute consists of a Name, Type, and\n Value. For more information, see Amazon SQS message attributes in the Amazon SQS Developer\n Guide.

", + "smithy.api#documentation": "

Each message attribute consists of a Name, Type, \nand Value. For more information, see \nAmazon SQS \nmessage attributes in the Amazon SQS Developer Guide.

", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "MessageAttribute" } @@ -3789,7 +3812,7 @@ "MessageSystemAttributes": { "target": "com.amazonaws.sqs#MessageBodySystemAttributeMap", "traits": { - "smithy.api#documentation": "

The message system attribute to send. Each message system attribute consists of a\n Name, Type, and Value.

\n \n
    \n
  • \n

    Currently, the only supported message system attribute is\n AWSTraceHeader. Its type must be String and\n its value must be a correctly formatted X-Ray trace\n header string.

    \n
  • \n
  • \n

    The size of a message system attribute doesn't count towards the total\n size of a message.

    \n
  • \n
\n
", + "smithy.api#documentation": "

The message system attribute to send. Each message system attribute consists of a Name, Type, and Value.

\n \n
    \n
  • \n

    Currently, the only supported message system attribute is AWSTraceHeader.\n Its type must be String and its value must be a correctly formatted\n X-Ray trace header string.

    \n
  • \n
  • \n

    The size of a message system attribute doesn't count towards the total size of a message.

    \n
  • \n
\n
", "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "MessageSystemAttribute" } @@ -3797,13 +3820,13 @@ "MessageDeduplicationId": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) queues.

\n

The token used for deduplication of sent messages. If a message with a particular\n MessageDeduplicationId is sent successfully, any messages sent with the\n same MessageDeduplicationId are accepted successfully but aren't delivered\n during the 5-minute deduplication interval. For more information, see Exactly-once processing in the Amazon SQS Developer\n Guide.

\n
    \n
  • \n

    Every message must have a unique MessageDeduplicationId,

    \n
      \n
    • \n

      You may provide a MessageDeduplicationId\n explicitly.

      \n
    • \n
    • \n

      If you aren't able to provide a MessageDeduplicationId\n and you enable ContentBasedDeduplication for your queue,\n Amazon SQS uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the message\n (but not the attributes of the message).

      \n
    • \n
    • \n

      If you don't provide a MessageDeduplicationId and the\n queue doesn't have ContentBasedDeduplication set, the\n action fails with an error.

      \n
    • \n
    • \n

      If the queue has ContentBasedDeduplication set, your\n MessageDeduplicationId overrides the generated\n one.

      \n
    • \n
    \n
  • \n
  • \n

    When ContentBasedDeduplication is in effect, messages with\n identical content sent within the deduplication interval are treated as\n duplicates and only one copy of the message is delivered.

    \n
  • \n
  • \n

    If you send one message with ContentBasedDeduplication enabled\n and then another message with a MessageDeduplicationId that is the\n same as the one generated for the first MessageDeduplicationId, the\n two messages are treated as duplicates and only one copy of the message is\n delivered.

    \n
  • \n
\n \n

The MessageDeduplicationId is available to the consumer of the\n message (this can be useful for troubleshooting delivery issues).

\n

If a message is sent successfully but the acknowledgement is lost and the message\n is resent with the same MessageDeduplicationId after the deduplication\n interval, Amazon SQS can't detect duplicate messages.

\n

Amazon SQS continues to keep track of the message deduplication ID even after the\n message is received and deleted.

\n
\n

The maximum length of MessageDeduplicationId is 128 characters.\n MessageDeduplicationId can contain alphanumeric characters\n (a-z, A-Z, 0-9) and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

\n

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon SQS\n Developer Guide.

" + "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) queues.

\n

The token used for deduplication of sent messages. If a message with a particular\n MessageDeduplicationId is sent successfully, any messages sent with the\n same MessageDeduplicationId are accepted successfully but aren't delivered\n during the 5-minute deduplication interval. For more information, see Exactly-once processing in the Amazon SQS Developer\n Guide.

\n
    \n
  • \n

    Every message must have a unique MessageDeduplicationId,

    \n
      \n
    • \n

      You may provide a MessageDeduplicationId\n explicitly.

      \n
    • \n
    • \n

      If you aren't able to provide a MessageDeduplicationId\n and you enable ContentBasedDeduplication for your queue,\n Amazon SQS uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the message\n (but not the attributes of the message).

      \n
    • \n
    • \n

      If you don't provide a MessageDeduplicationId and the\n queue doesn't have ContentBasedDeduplication set, the\n action fails with an error.

      \n
    • \n
    • \n

      If the queue has ContentBasedDeduplication set, your\n MessageDeduplicationId overrides the generated\n one.

      \n
    • \n
    \n
  • \n
  • \n

    When ContentBasedDeduplication is in effect, messages with\n identical content sent within the deduplication interval are treated as\n duplicates and only one copy of the message is delivered.

    \n
  • \n
  • \n

    If you send one message with ContentBasedDeduplication enabled\n and then another message with a MessageDeduplicationId that is the\n same as the one generated for the first MessageDeduplicationId, the\n two messages are treated as duplicates and only one copy of the message is\n delivered.

    \n
  • \n
\n \n

The MessageDeduplicationId is available to the consumer of the\n message (this can be useful for troubleshooting delivery issues).

\n

If a message is sent successfully but the acknowledgement is lost and the message\n is resent with the same MessageDeduplicationId after the deduplication\n interval, Amazon SQS can't detect duplicate messages.

\n

Amazon SQS continues to keep track of the message deduplication ID even after the message is received and deleted.

\n
\n

The maximum length of MessageDeduplicationId is 128 characters.\n MessageDeduplicationId can contain alphanumeric characters\n (a-z, A-Z, 0-9) and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

\n

For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId Property in the Amazon SQS Developer\n Guide.

" } }, "MessageGroupId": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) queues.

\n

The tag that specifies that a message belongs to a specific message group. Messages\n that belong to the same message group are processed in a FIFO manner (however, messages\n in different message groups might be processed out of order). To interleave multiple\n ordered streams within a single queue, use MessageGroupId values (for\n example, session data for multiple users). In this scenario, multiple consumers can\n process the queue, but the session data of each user is processed in a FIFO\n fashion.

\n
    \n
  • \n

    You must associate a non-empty MessageGroupId with a message. If\n you don't provide a MessageGroupId, the action fails.

    \n
  • \n
  • \n

    \n ReceiveMessage might return messages with multiple\n MessageGroupId values. For each MessageGroupId,\n the messages are sorted by time sent. The caller can't specify a\n MessageGroupId.

    \n
  • \n
\n

The length of MessageGroupId is 128 characters. Valid values:\n alphanumeric characters and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

\n

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon SQS Developer\n Guide.

\n \n

\n MessageGroupId is required for FIFO queues. You can't use it for\n Standard queues.

\n
" + "smithy.api#documentation": "

This parameter applies only to FIFO (first-in-first-out) queues.

\n

The tag that specifies that a message belongs to a specific message group. Messages\n that belong to the same message group are processed in a FIFO manner (however,\n messages in different message groups might be processed out of order). To interleave\n multiple ordered streams within a single queue, use MessageGroupId values\n (for example, session data for multiple users). In this scenario, multiple consumers can\n process the queue, but the session data of each user is processed in a FIFO\n fashion.

\n
    \n
  • \n

    You must associate a non-empty MessageGroupId with a message. If\n you don't provide a MessageGroupId, the action fails.

    \n
  • \n
  • \n

    \n ReceiveMessage might return messages with multiple\n MessageGroupId values. For each MessageGroupId,\n the messages are sorted by time sent. The caller can't specify a\n MessageGroupId.

    \n
  • \n
\n

The maximum length of MessageGroupId is 128 characters. Valid values:\n alphanumeric characters and punctuation\n (!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~).

\n

For best practices of using MessageGroupId, see Using the MessageGroupId Property in the Amazon SQS Developer\n Guide.

\n \n

\n MessageGroupId is required for FIFO queues. You can't use it for\n Standard queues.

\n
" } } }, @@ -3818,19 +3841,19 @@ "MD5OfMessageBody": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message body string. You can use this attribute\n to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the\n message before creating the MD5 digest. For information about MD5, see RFC1321.

" + "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message body string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

" } }, "MD5OfMessageAttributes": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message attribute string. You can use this\n attribute to verify that Amazon SQS received the message correctly. Amazon SQS\n URL-decodes the message before creating the MD5 digest. For information about MD5, see\n RFC1321.

" + "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message attribute string. You can use this attribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest. For information about MD5, see RFC1321.

" } }, "MD5OfMessageSystemAttributes": { "target": "com.amazonaws.sqs#String", "traits": { - "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message system attribute string. You can use this\n attribute to verify that Amazon SQS received the message correctly. Amazon SQS\n URL-decodes the message before creating the MD5 digest.

" + "smithy.api#documentation": "

An MD5 digest of the non-URL-encoded message system attribute string. You can use this \nattribute to verify that Amazon SQS received the message correctly. Amazon SQS URL-decodes the message before creating the MD5 digest.

" } }, "MessageId": { @@ -3886,7 +3909,7 @@ } ], "traits": { - "smithy.api#documentation": "

Sets the value of one or more queue attributes. When you change a queue's attributes,\n the change can take up to 60 seconds for most of the attributes to propagate throughout\n the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute\n can take up to 15 minutes and will impact existing messages in the queue potentially\n causing them to be expired and deleted if the MessageRetentionPeriod is\n reduced below the age of existing messages.

\n \n
    \n
  • \n

    In the future, new attributes might be added. If you write code that calls\n this action, we recommend that you structure your code so that it can handle\n new attributes gracefully.

    \n
  • \n
  • \n

    Cross-account permissions don't apply to this action. For more\n information, see Grant cross-account permissions to a role and a username in the\n Amazon SQS Developer Guide.

    \n
  • \n
  • \n

    To remove the ability to change queue permissions, you must deny\n permission to the AddPermission, RemovePermission,\n and SetQueueAttributes actions in your IAM policy.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Sets the value of one or more queue attributes, like a policy. When you change a\n queue's attributes, the change can take up to 60 seconds for most of the attributes to\n propagate throughout the Amazon SQS system. Changes made to the\n MessageRetentionPeriod attribute can take up to 15 minutes and will\n impact existing messages in the queue potentially causing them to be expired and deleted\n if the MessageRetentionPeriod is reduced below the age of existing\n messages.

\n \n
    \n
  • \n

    In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.

    \n
  • \n
  • \n

    Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

    \n
  • \n
  • \n

    To remove the ability to change queue permissions, you must deny permission to the AddPermission, RemovePermission, and SetQueueAttributes actions in your IAM policy.

    \n
  • \n
\n
" } }, "com.amazonaws.sqs#SetQueueAttributesRequest": { @@ -3902,7 +3925,7 @@ "Attributes": { "target": "com.amazonaws.sqs#QueueAttributeMap", "traits": { - "smithy.api#documentation": "

A map of attributes to set.

\n

The following lists the names, descriptions, and values of the special request\n parameters that the SetQueueAttributes action uses:

\n
    \n
  • \n

    \n DelaySeconds – The length of time, in seconds, for which the\n delivery of all messages in the queue is delayed. Valid values: An integer from\n 0 to 900 (15 minutes). Default: 0.

    \n
  • \n
  • \n

    \n MaximumMessageSize – The limit of how many bytes a message can\n contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes\n (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

    \n
  • \n
  • \n

    \n MessageRetentionPeriod – The length of time, in seconds, for which\n Amazon SQS retains a message. Valid values: An integer representing seconds,\n from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). When you\n change a queue's attributes, the change can take up to 60 seconds for most of\n the attributes to propagate throughout the Amazon SQS system. Changes made to\n the MessageRetentionPeriod attribute can take up to 15 minutes and\n will impact existing messages in the queue potentially causing them to be\n expired and deleted if the MessageRetentionPeriod is reduced below\n the age of existing messages.

    \n
  • \n
  • \n

    \n Policy – The queue's policy. A valid Amazon Web Services\n policy. For more information about policy structure, see Overview of Amazon Web Services IAM Policies in the\n Identity and Access Management User Guide.\n

    \n
  • \n
  • \n

    \n ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for\n which a \n ReceiveMessage\n action waits for a message\n to arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0.

    \n
  • \n
  • \n

    \n VisibilityTimeout – The visibility timeout for the queue, in\n seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For\n more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer\n Guide.

    \n
  • \n
\n

The following attributes apply only to dead-letter queues:\n

\n
    \n
  • \n

    \n RedrivePolicy – The string that includes the parameters for the\n dead-letter queue functionality of the source queue as a JSON object. The\n parameters are as follows:

    \n
      \n
    • \n

      \n deadLetterTargetArn – The Amazon Resource Name (ARN) of\n the dead-letter queue to which Amazon SQS moves messages after the value\n of maxReceiveCount is exceeded.

      \n
    • \n
    • \n

      \n maxReceiveCount – The number of times a message is\n delivered to the source queue before being moved to the dead-letter\n queue. Default: 10. When the ReceiveCount for a message\n exceeds the maxReceiveCount for a queue, Amazon SQS moves\n the message to the dead-letter-queue.

      \n
    • \n
    \n
  • \n
  • \n

    \n RedriveAllowPolicy – The string that includes the parameters for\n the permissions for the dead-letter queue redrive permission and which source\n queues can specify dead-letter queues as a JSON object. The parameters are as\n follows:

    \n
      \n
    • \n

      \n redrivePermission – The permission type that defines\n which source queues can specify the current queue as the dead-letter\n queue. Valid values are:

      \n
        \n
      • \n

        \n allowAll – (Default) Any source queues in this\n Amazon Web Services account in the same\n Region can specify this queue as the dead-letter queue.

        \n
      • \n
      • \n

        \n denyAll – No source queues can specify this queue\n as the dead-letter queue.

        \n
      • \n
      • \n

        \n byQueue – Only queues specified by the\n sourceQueueArns parameter can specify this\n queue as the dead-letter queue.

        \n
      • \n
      \n
    • \n
    • \n

      \n sourceQueueArns – The Amazon Resource Names (ARN)s of the\n source queues that can specify this queue as the dead-letter queue and\n redrive messages. You can specify this parameter only when the\n redrivePermission parameter is set to\n byQueue. You can specify up to 10 source queue ARNs. To\n allow more than 10 source queues to specify dead-letter queues, set the\n redrivePermission parameter to\n allowAll.

      \n
    • \n
    \n
  • \n
\n \n

The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the\n dead-letter queue of a standard queue must also be a standard queue.

\n
\n

The following attributes apply only to server-side-encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – The ID of an Amazon Web Services managed customer\n master key (CMK) for Amazon SQS or a custom CMK. For more information, see\n Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is\n always alias/aws/sqs, the alias of a custom CMK can, for example,\n be alias/MyAlias\n . For more examples, see\n KeyId in the Key Management Service API\n Reference.

    \n
  • \n
  • \n

    \n KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for\n which Amazon SQS can reuse a data key to\n encrypt or decrypt messages before calling KMS again. An integer representing\n seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). Default:\n 300 (5 minutes). A shorter time period provides better security but results in\n more calls to KMS which might incur charges after Free Tier. For more\n information, see How Does the Data Key Reuse Period Work?.

    \n
  • \n
  • \n

    \n SqsManagedSseEnabled – Enables server-side queue encryption using\n SQS owned encryption keys. Only one server-side encryption option is supported\n per queue (for example, SSE-KMS or SSE-SQS).

    \n
  • \n
\n

The following attribute applies only to FIFO\n (first-in-first-out) queues:

\n
    \n
  • \n

    \n ContentBasedDeduplication – Enables content-based deduplication.\n For more information, see Exactly-once processing in the Amazon SQS Developer\n Guide. Note the following:

    \n
      \n
    • \n

      Every message must have a unique\n MessageDeduplicationId.

      \n
        \n
      • \n

        You may provide a MessageDeduplicationId\n explicitly.

        \n
      • \n
      • \n

        If you aren't able to provide a\n MessageDeduplicationId and you enable\n ContentBasedDeduplication for your queue,\n Amazon SQS uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the\n message (but not the attributes of the message).

        \n
      • \n
      • \n

        If you don't provide a MessageDeduplicationId and\n the queue doesn't have ContentBasedDeduplication\n set, the action fails with an error.

        \n
      • \n
      • \n

        If the queue has ContentBasedDeduplication set,\n your MessageDeduplicationId overrides the generated\n one.

        \n
      • \n
      \n
    • \n
    • \n

      When ContentBasedDeduplication is in effect, messages\n with identical content sent within the deduplication interval are\n treated as duplicates and only one copy of the message is\n delivered.

      \n
    • \n
    • \n

      If you send one message with ContentBasedDeduplication\n enabled and then another message with a\n MessageDeduplicationId that is the same as the one\n generated for the first MessageDeduplicationId, the two\n messages are treated as duplicates and only one copy of the message is\n delivered.

      \n
    • \n
    \n
  • \n
\n

The following attributes apply only to high\n throughput for FIFO queues:

\n
    \n
  • \n

    \n DeduplicationScope – Specifies whether message deduplication\n occurs at the message group or queue level. Valid values are\n messageGroup and queue.

    \n
  • \n
  • \n

    \n FifoThroughputLimit – Specifies whether the FIFO queue throughput\n quota applies to the entire queue or per message group. Valid values are\n perQueue and perMessageGroupId. The\n perMessageGroupId value is allowed only when the value for\n DeduplicationScope is messageGroup.

    \n
  • \n
\n

To enable high throughput for FIFO queues, do the following:

\n
    \n
  • \n

    Set DeduplicationScope to messageGroup.

    \n
  • \n
  • \n

    Set FifoThroughputLimit to perMessageGroupId.

    \n
  • \n
\n

If you set these attributes to anything other than the values shown for enabling high\n throughput, normal throughput is in effect and deduplication occurs as specified.

\n

For information on throughput quotas, see Quotas\n related to messages in the Amazon SQS Developer\n Guide.

", + "smithy.api#documentation": "

A map of attributes to set.

\n

The following lists the names, descriptions, and values of the special request\n parameters that the SetQueueAttributes action uses:

\n
    \n
  • \n

    \n DelaySeconds – The length of time, in seconds, for which the\n delivery of all messages in the queue is delayed. Valid values: An integer from\n 0 to 900 (15 minutes). Default: 0.

    \n
  • \n
  • \n

    \n MaximumMessageSize – The limit of how many bytes a message\n can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes\n (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).

    \n
  • \n
  • \n

    \n MessageRetentionPeriod – The length of time, in seconds, for\n which Amazon SQS retains a message. Valid values: An integer representing seconds,\n from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). When you\n change a queue's attributes, the change can take up to 60 seconds for most of\n the attributes to propagate throughout the Amazon SQS system. Changes made to the\n MessageRetentionPeriod attribute can take up to 15 minutes and\n will impact existing messages in the queue potentially causing them to be\n expired and deleted if the MessageRetentionPeriod is reduced below\n the age of existing messages.

    \n
  • \n
  • \n

    \n Policy – The queue's policy. A valid Amazon Web Services policy. For more\n information about policy structure, see Overview of Amazon Web Services IAM\n Policies in the Identity and Access Management User\n Guide.

    \n
  • \n
  • \n

    \n ReceiveMessageWaitTimeSeconds – The length of time, in\n seconds, for which a \n ReceiveMessage\n action waits\n for a message to arrive. Valid values: An integer from 0 to 20 (seconds).\n Default: 0.

    \n
  • \n
  • \n

    \n VisibilityTimeout – The visibility timeout for the queue, in\n seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For\n more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer\n Guide.

    \n
  • \n
\n

The following attributes apply only to dead-letter queues:\n

\n
    \n
  • \n

    \n RedrivePolicy – The string that includes the parameters for the dead-letter queue functionality \n of the source queue as a JSON object. The parameters are as follows:

    \n
      \n
    • \n

      \n deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to \n which Amazon SQS moves messages after the value of maxReceiveCount is exceeded.

      \n
    • \n
    • \n

      \n maxReceiveCount – The number of times a message is delivered to the source queue before being \n moved to the dead-letter queue. Default: 10. When the ReceiveCount for a message exceeds the maxReceiveCount \n for a queue, Amazon SQS moves the message to the dead-letter-queue.

      \n
    • \n
    \n
  • \n
  • \n

    \n RedriveAllowPolicy – The string that includes the parameters for the permissions for the dead-letter\n queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:

    \n
      \n
    • \n

      \n redrivePermission – The permission type that defines which source queues can \n specify the current queue as the dead-letter queue. Valid values are:

      \n
        \n
      • \n

        \n allowAll – (Default) Any source queues in this Amazon Web Services account in the same Region can \n specify this queue as the dead-letter queue.

        \n
      • \n
      • \n

        \n denyAll – No source queues can specify this queue as the dead-letter\n queue.

        \n
      • \n
      • \n

        \n byQueue – Only queues specified by the sourceQueueArns parameter can specify \n this queue as the dead-letter queue.

        \n
      • \n
      \n
    • \n
    • \n

      \n sourceQueueArns – The Amazon Resource Names (ARN)s of the source queues that can specify \n this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the \n redrivePermission parameter is set to byQueue. You can specify up to 10 source queue ARNs. \n To allow more than 10 source queues to specify dead-letter queues, set the redrivePermission parameter\n to allowAll.

      \n
    • \n
    \n
  • \n
\n \n

The dead-letter queue of a \n FIFO queue must also be a FIFO queue. Similarly, the dead-letter \n queue of a standard queue must also be a standard queue.

\n
\n

The following attributes apply only to server-side-encryption:

\n
    \n
  • \n

    \n KmsMasterKeyId – The ID of an Amazon Web Services managed customer master\n key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms. While the alias of the AWS-managed CMK for Amazon SQS is\n always alias/aws/sqs, the alias of a custom CMK can, for example,\n be alias/MyAlias\n . For more examples, see\n KeyId in the Key Management Service API\n Reference.

    \n
  • \n
  • \n

    \n KmsDataKeyReusePeriodSeconds – The length of time, in\n seconds, for which Amazon SQS can reuse a data key to\n encrypt or decrypt messages before calling KMS again. An integer\n representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24\n hours). Default: 300 (5 minutes). A shorter time period provides better security\n but results in more calls to KMS which might incur charges after Free Tier. For\n more information, see How Does the Data Key Reuse Period Work?.

    \n
  • \n
  • \n

    \n SqsManagedSseEnabled – Enables server-side queue encryption\n using SQS owned encryption keys. Only one server-side encryption option is\n supported per queue (for example, SSE-KMS or SSE-SQS).

    \n
  • \n
\n

The following attribute applies only to FIFO (first-in-first-out)\n queues:

\n
    \n
  • \n

    \n ContentBasedDeduplication – Enables content-based\n deduplication. For more information, see Exactly-once processing in the Amazon SQS Developer\n Guide. Note the following:

    \n
      \n
    • \n

      Every message must have a unique\n MessageDeduplicationId.

      \n
        \n
      • \n

        You may provide a MessageDeduplicationId\n explicitly.

        \n
      • \n
      • \n

        If you aren't able to provide a\n MessageDeduplicationId and you enable\n ContentBasedDeduplication for your queue, Amazon SQS\n uses a SHA-256 hash to generate the\n MessageDeduplicationId using the body of the\n message (but not the attributes of the message).

        \n
      • \n
      • \n

        If you don't provide a MessageDeduplicationId and\n the queue doesn't have ContentBasedDeduplication\n set, the action fails with an error.

        \n
      • \n
      • \n

        If the queue has ContentBasedDeduplication set,\n your MessageDeduplicationId overrides the generated\n one.

        \n
      • \n
      \n
    • \n
    • \n

      When ContentBasedDeduplication is in effect, messages\n with identical content sent within the deduplication interval are\n treated as duplicates and only one copy of the message is\n delivered.

      \n
    • \n
    • \n

      If you send one message with ContentBasedDeduplication\n enabled and then another message with a\n MessageDeduplicationId that is the same as the one\n generated for the first MessageDeduplicationId, the two\n messages are treated as duplicates and only one copy of the message is\n delivered.

      \n
    • \n
    \n
  • \n
\n

The following attributes apply only to \nhigh throughput\nfor FIFO queues:

\n
    \n
  • \n

    \n DeduplicationScope – Specifies whether message deduplication occurs at the \n message group or queue level. Valid values are messageGroup and queue.

    \n
  • \n
  • \n

    \n FifoThroughputLimit – Specifies whether the FIFO queue throughput \n quota applies to the entire queue or per message group. Valid values are perQueue and perMessageGroupId. \n The perMessageGroupId value is allowed only when the value for DeduplicationScope is messageGroup.

    \n
  • \n
\n

To enable high throughput for FIFO queues, do the following:

\n
    \n
  • \n

    Set DeduplicationScope to messageGroup.

    \n
  • \n
  • \n

    Set FifoThroughputLimit to perMessageGroupId.

    \n
  • \n
\n

If you set these attributes to anything other than the values shown for enabling high\n throughput, normal throughput is in effect and deduplication occurs as specified.

\n

For information on throughput quotas, \n see Quotas related to messages \n in the Amazon SQS Developer Guide.

", "smithy.api#required": {}, "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "Attribute" @@ -3940,7 +3963,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts an asynchronous task to move messages from a specified source queue to a\n specified destination queue.

\n \n
    \n
  • \n

    This action is currently limited to supporting message redrive from queues\n that are configured as dead-letter queues (DLQs) of other Amazon SQS queues only. Non-SQS\n queue sources of dead-letter queues, such as Lambda or Amazon SNS topics, are\n currently not supported.

    \n
  • \n
  • \n

    In dead-letter queues redrive context, the\n StartMessageMoveTask the source queue is the DLQ, while the\n destination queue can be the original source queue (from which the messages\n were driven to the dead-letter-queue), or a custom destination queue.

    \n
  • \n
  • \n

    Currently, only standard queues support redrive. FIFO queues don't support\n redrive.

    \n
  • \n
  • \n

    Only one active message movement task is supported per queue at any given\n time.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Starts an asynchronous task to move messages from a specified source queue to a\n specified destination queue.

\n \n
    \n
  • \n

    This action is currently limited to supporting message redrive from queues\n that are configured as dead-letter queues (DLQs) of other Amazon SQS queues only. Non-SQS\n queue sources of dead-letter queues, such as Lambda or Amazon SNS topics, are\n currently not supported.

    \n
  • \n
  • \n

    In dead-letter queues redrive context, the\n StartMessageMoveTask the source queue is the DLQ, while the\n destination queue can be the original source queue (from which the messages\n were driven to the dead-letter-queue), or a custom destination queue.

    \n
  • \n
  • \n

    Only one active message movement task is supported per queue at any given\n time.

    \n
  • \n
\n
" } }, "com.amazonaws.sqs#StartMessageMoveTaskRequest": { @@ -4046,7 +4069,7 @@ } ], "traits": { - "smithy.api#documentation": "

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see\n Tagging\n Your Amazon SQS Queues in the Amazon SQS Developer\n Guide.

\n

When you use queue tags, keep the following guidelines in mind:

\n
    \n
  • \n

    Adding more than 50 tags to a queue isn't recommended.

    \n
  • \n
  • \n

    Tags don't have any semantic meaning. Amazon SQS interprets tags as character\n strings.

    \n
  • \n
  • \n

    Tags are case-sensitive.

    \n
  • \n
  • \n

    A new tag with a key identical to that of an existing tag overwrites the\n existing tag.

    \n
  • \n
\n

For a full list of tag restrictions, see Quotas related to queues in the Amazon SQS Developer\n Guide.

\n \n

Cross-account permissions don't apply to this action. For more information, see\n Grant cross-account permissions to a role and a username in the\n Amazon SQS Developer Guide.

\n
" + "smithy.api#documentation": "

Add cost allocation tags to the specified Amazon SQS queue. For an overview, see Tagging \nYour Amazon SQS Queues in the Amazon SQS Developer Guide.

\n

When you use queue tags, keep the following guidelines in mind:

\n
    \n
  • \n

    Adding more than 50 tags to a queue isn't recommended.

    \n
  • \n
  • \n

    Tags don't have any semantic meaning. Amazon SQS interprets tags as character strings.

    \n
  • \n
  • \n

    Tags are case-sensitive.

    \n
  • \n
  • \n

    A new tag with a key identical to that of an existing tag overwrites the existing tag.

    \n
  • \n
\n

For a full list of tag restrictions, see \nQuotas related to queues \nin the Amazon SQS Developer Guide.

\n \n

Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

\n
" } }, "com.amazonaws.sqs#TagQueueRequest": { @@ -4139,7 +4162,7 @@ } ], "traits": { - "smithy.api#documentation": "

Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see\n Tagging\n Your Amazon SQS Queues in the Amazon SQS Developer\n Guide.

\n \n

Cross-account permissions don't apply to this action. For more information, see\n Grant cross-account permissions to a role and a username in the\n Amazon SQS Developer Guide.

\n
" + "smithy.api#documentation": "

Remove cost allocation tags from the specified Amazon SQS queue. For an overview, see Tagging \nYour Amazon SQS Queues in the Amazon SQS Developer Guide.

\n \n

Cross-account permissions don't apply to this action. For more information, \nsee Grant \ncross-account permissions to a role and a username in the Amazon SQS Developer Guide.

\n
" } }, "com.amazonaws.sqs#UntagQueueRequest": { diff --git a/models/ssm-sap.json b/models/ssm-sap.json index 3e020f2cb9..ad96b14b1c 100644 --- a/models/ssm-sap.json +++ b/models/ssm-sap.json @@ -703,6 +703,17 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.ssmsap#ConnectedEntityType": { + "type": "enum", + "members": { + "DBMS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DBMS" + } + } + } + }, "com.amazonaws.ssmsap#CredentialType": { "type": "enum", "members": { @@ -1648,7 +1659,7 @@ "target": "com.amazonaws.ssmsap#MaxResults", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The maximum number of results to return with a single call. To retrieve the remaining\n results, make another call with the returned nextToken value.

" + "smithy.api#documentation": "

The maximum number of results to return with a single call. To retrieve the remaining\n results, make another call with the returned nextToken value.

" } }, "Filters": { @@ -1837,6 +1848,92 @@ } } }, + "com.amazonaws.ssmsap#ListOperationEvents": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmsap#ListOperationEventsInput" + }, + "output": { + "target": "com.amazonaws.ssmsap#ListOperationEventsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmsap#InternalServerException" + }, + { + "target": "com.amazonaws.ssmsap#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of operations events.

\n

Available parameters include OperationID, as well as optional parameters\n MaxResults, NextToken, and\n Filters.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/list-operation-events" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "OperationEvents" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.ssmsap#ListOperationEventsInput": { + "type": "structure", + "members": { + "OperationId": { + "target": "com.amazonaws.ssmsap#OperationId", + "traits": { + "smithy.api#documentation": "

The ID of the operation.

", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.ssmsap#MaxResults", + "traits": { + "smithy.api#default": null, + "smithy.api#documentation": "

The maximum number of results to return with a single call. To retrieve the remaining\n results, make another call with the returned nextToken value.

\n

If you do not specify a value for MaxResults, the request returns 50 items\n per page by default.

" + } + }, + "NextToken": { + "target": "com.amazonaws.ssmsap#NextToken", + "traits": { + "smithy.api#documentation": "

The token to use to retrieve the next page of results. \n This value is null when there are no more results to return.

" + } + }, + "Filters": { + "target": "com.amazonaws.ssmsap#FilterList", + "traits": { + "smithy.api#documentation": "

Optionally specify filters to narrow the returned operation \n event items.

\n

Valid filter names include status, resourceID, \n and resourceType. The valid operator for all three filters \n is Equals.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmsap#ListOperationEventsOutput": { + "type": "structure", + "members": { + "OperationEvents": { + "target": "com.amazonaws.ssmsap#OperationEventList", + "traits": { + "smithy.api#documentation": "

A returned list of operation events that \n meet the filter criteria.

" + } + }, + "NextToken": { + "target": "com.amazonaws.ssmsap#NextToken", + "traits": { + "smithy.api#documentation": "

The token to use to retrieve the next page of results. This value is null when there are\n no more results to return.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ssmsap#ListOperations": { "type": "operation", "input": { @@ -2059,6 +2156,79 @@ "smithy.api#documentation": "

The operations performed by AWS Systems Manager for SAP.

" } }, + "com.amazonaws.ssmsap#OperationEvent": { + "type": "structure", + "members": { + "Description": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

A description of the operation event. For example, \n \"Stop the EC2 instance i-abcdefgh987654321\".

" + } + }, + "Resource": { + "target": "com.amazonaws.ssmsap#Resource", + "traits": { + "smithy.api#documentation": "

The resource involved in the operations event.

\n

Contains ResourceArn ARN and ResourceType.

" + } + }, + "Status": { + "target": "com.amazonaws.ssmsap#OperationEventStatus", + "traits": { + "smithy.api#documentation": "

The status of the operation event. The possible statuses \n are: IN_PROGRESS, \n COMPLETED, and FAILED.

" + } + }, + "StatusMessage": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The status message relating to a specific \n operation event.

" + } + }, + "Timestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of the specified operation event.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An operation event returns details for an operation, including \n key milestones which can be used to monitor and track operations \n in progress.

\n

Operation events contain:

\n
    \n
  • \n

    Description string

    \n
  • \n
  • \n

    Resource, including its ARN and type

    \n
  • \n
  • \n

    Status

    \n
  • \n
  • \n

    StatusMessage string

    \n
  • \n
  • \n

    TimeStamp

    \n
  • \n
\n

Operation event examples include StartApplication or \n StopApplication.

" + } + }, + "com.amazonaws.ssmsap#OperationEventList": { + "type": "list", + "member": { + "target": "com.amazonaws.ssmsap#OperationEvent" + } + }, + "com.amazonaws.ssmsap#OperationEventResourceType": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\w]+::[\\w]+::[\\w]+$" + } + }, + "com.amazonaws.ssmsap#OperationEventStatus": { + "type": "enum", + "members": { + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, "com.amazonaws.ssmsap#OperationId": { "type": "string", "traits": { @@ -2405,6 +2575,26 @@ "smithy.api#documentation": "

Details of the SAP HANA system replication for the instance.

" } }, + "com.amazonaws.ssmsap#Resource": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.ssmsap#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the source resource.

\n

Example of ResourceArn: \n \"arn:aws:ec2:us-east-1:111111111111:instance/i-abcdefgh987654321\"

" + } + }, + "ResourceType": { + "target": "com.amazonaws.ssmsap#OperationEventResourceType", + "traits": { + "smithy.api#documentation": "

The resource type.

\n

Example of ResourceType: \"AWS::SystemsManagerSAP::Component\" \n or \"AWS::EC2::Instance\".

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The resource contains a ResourceArn \n and the ResourceType.

" + } + }, "com.amazonaws.ssmsap#ResourceId": { "type": "string", "traits": { @@ -2492,6 +2682,9 @@ { "target": "com.amazonaws.ssmsap#ListDatabases" }, + { + "target": "com.amazonaws.ssmsap#ListOperationEvents" + }, { "target": "com.amazonaws.ssmsap#ListOperations" }, @@ -2504,9 +2697,15 @@ { "target": "com.amazonaws.ssmsap#RegisterApplication" }, + { + "target": "com.amazonaws.ssmsap#StartApplication" + }, { "target": "com.amazonaws.ssmsap#StartApplicationRefresh" }, + { + "target": "com.amazonaws.ssmsap#StopApplication" + }, { "target": "com.amazonaws.ssmsap#TagResource" }, @@ -3214,6 +3413,66 @@ "smithy.api#pattern": "^arn:(.+:){2,4}.+$|^arn:(.+:){1,3}.+\\/.+$" } }, + "com.amazonaws.ssmsap#StartApplication": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmsap#StartApplicationInput" + }, + "output": { + "target": "com.amazonaws.ssmsap#StartApplicationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmsap#ConflictException" + }, + { + "target": "com.amazonaws.ssmsap#InternalServerException" + }, + { + "target": "com.amazonaws.ssmsap#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmsap#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Request is an operation which starts an application.

\n

Parameter ApplicationId is required.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/start-application" + } + } + }, + "com.amazonaws.ssmsap#StartApplicationInput": { + "type": "structure", + "members": { + "ApplicationId": { + "target": "com.amazonaws.ssmsap#ApplicationId", + "traits": { + "smithy.api#documentation": "

The ID of the application.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmsap#StartApplicationOutput": { + "type": "structure", + "members": { + "OperationId": { + "target": "com.amazonaws.ssmsap#OperationId", + "traits": { + "smithy.api#documentation": "

The ID of the operation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ssmsap#StartApplicationRefresh": { "type": "operation", "input": { @@ -3277,6 +3536,78 @@ "smithy.api#output": {} } }, + "com.amazonaws.ssmsap#StopApplication": { + "type": "operation", + "input": { + "target": "com.amazonaws.ssmsap#StopApplicationInput" + }, + "output": { + "target": "com.amazonaws.ssmsap#StopApplicationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.ssmsap#ConflictException" + }, + { + "target": "com.amazonaws.ssmsap#InternalServerException" + }, + { + "target": "com.amazonaws.ssmsap#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.ssmsap#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Request is an operation to stop an application.

\n

Parameter ApplicationId is required. \n Parameters StopConnectedEntity and \n IncludeEc2InstanceShutdown are optional.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/stop-application" + } + } + }, + "com.amazonaws.ssmsap#StopApplicationInput": { + "type": "structure", + "members": { + "ApplicationId": { + "target": "com.amazonaws.ssmsap#ApplicationId", + "traits": { + "smithy.api#documentation": "

The ID of the application.

", + "smithy.api#required": {} + } + }, + "StopConnectedEntity": { + "target": "com.amazonaws.ssmsap#ConnectedEntityType", + "traits": { + "smithy.api#documentation": "

Specify the ConnectedEntityType. Accepted type \n is DBMS.

\n

If this parameter is included, the connected DBMS (Database \n Management System) will be stopped.

" + } + }, + "IncludeEc2InstanceShutdown": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Boolean. If included and if set to True, the \n StopApplication operation will shut down the associated Amazon EC2 instance in addition to \n the application.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ssmsap#StopApplicationOutput": { + "type": "structure", + "members": { + "OperationId": { + "target": "com.amazonaws.ssmsap#OperationId", + "traits": { + "smithy.api#documentation": "

The ID of the operation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ssmsap#TagKey": { "type": "string", "traits": { diff --git a/models/ssm.json b/models/ssm.json index 9622ad1d88..20551d6f77 100644 --- a/models/ssm.json +++ b/models/ssm.json @@ -14454,7 +14454,8 @@ "smithy.api#length": { "min": 1, "max": 46 - } + }, + "smithy.api#sensitive": {} } }, "com.amazonaws.ssm#ISO8601String": { diff --git a/models/sso-oidc.json b/models/sso-oidc.json index 2974461fc0..ad40043c8d 100644 --- a/models/sso-oidc.json +++ b/models/sso-oidc.json @@ -990,6 +990,9 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.ssooidc#ArnType": { + "type": "string" + }, "com.amazonaws.ssooidc#Assertion": { "type": "string", "traits": { @@ -1036,6 +1039,12 @@ "com.amazonaws.ssooidc#ClientType": { "type": "string" }, + "com.amazonaws.ssooidc#CodeVerifier": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, "com.amazonaws.ssooidc#CreateToken": { "type": "operation", "input": { @@ -1098,6 +1107,25 @@ "expiresIn": 1579729529, "refreshToken": "aorvJYubGpU6i91YnH7Mfo-AT2fIVa1zCfA_Rvq9yjVKIP3onFmmykuQ7E93y2I-9Nyj-A_sVvMufaLNL0bqnDRtgAkc0:MGUCMFrRsktMRVlWaOR70XGMFGLL0SlcCw4DiYveIiOVx1uK9BbD0gvAddsW3UTLozXKMgIxAJ3qxUvjpnlLIOaaKOoa/FuNgqJVvr9GMwDtnAtlh9iZzAkEXAMPLEREFRESHTOKEN" } + }, + { + "title": "Call OAuth/OIDC /token endpoint for Refresh Token grant with Secret authentication", + "documentation": "", + "input": { + "clientId": "_yzkThXVzLWVhc3QtMQEXAMPLECLIENTID", + "clientSecret": "VERYLONGSECRETeyJraWQiOiJrZXktMTU2NDAyODA5OSIsImFsZyI6IkhTMzg0In0", + "grantType": "refresh_token", + "refreshToken": "aorvJYubGpU6i91YnH7Mfo-AT2fIVa1zCfA_Rvq9yjVKIP3onFmmykuQ7E93y2I-9Nyj-A_sVvMufaLNL0bqnDRtgAkc0:MGUCMFrRsktMRVlWaOR70XGMFGLL0SlcCw4DiYveIiOVx1uK9BbD0gvAddsW3UTLozXKMgIxAJ3qxUvjpnlLIOaaKOoa/FuNgqJVvr9GMwDtnAtlh9iZzAkEXAMPLEREFRESHTOKEN", + "scope": [ + "codewhisperer:completions" + ] + }, + "output": { + "accessToken": "aoal-YigITUDiNX1xZwOMXM5MxOWDL0E0jg9P6_C_jKQPxS_SKCP6f0kh1Up4g7TtvQqkMnD-GJiU_S1gvug6SrggAkc0:MGYCMQD3IatVjV7jAJU91kK3PkS/SfA2wtgWzOgZWDOR7sDGN9t0phCZz5It/aes/3C1Zj0CMQCKWOgRaiz6AIhza3DSXQNMLjRKXC8F8ceCsHlgYLMZ7hZidEXAMPLEACCESSTOKEN", + "tokenType": "Bearer", + "expiresIn": 1579729529, + "refreshToken": "aorvJYubGpU6i91YnH7Mfo-AT2fIVa1zCfA_Rvq9yjVKIP3onFmmykuQ7E93y2I-9Nyj-A_sVvMufaLNL0bqnDRtgAkc0:MGUCMFrRsktMRVlWaOR70XGMFGLL0SlcCw4DiYveIiOVx1uK9BbD0gvAddsW3UTLozXKMgIxAJ3qxUvjpnlLIOaaKOoa/FuNgqJVvr9GMwDtnAtlh9iZzAkEXAMPLEREFRESHTOKEN" + } } ], "smithy.api#http": { @@ -1161,6 +1189,12 @@ "traits": { "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This value specifies\n the location of the client or application that has registered to receive the authorization\n code.

" } + }, + "codeVerifier": { + "target": "com.amazonaws.ssooidc#CodeVerifier", + "traits": { + "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This value is generated\n by the client and presented to validate the original code challenge value the client passed at\n authorization time.

" + } } }, "traits": { @@ -1173,7 +1207,7 @@ "accessToken": { "target": "com.amazonaws.ssooidc#AccessToken", "traits": { - "smithy.api#documentation": "

A bearer token to access AWS accounts and applications assigned to a user.

" + "smithy.api#documentation": "

A bearer token to access Amazon Web Services accounts and applications assigned to a user.

" } }, "tokenType": { @@ -1253,7 +1287,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates and returns access and refresh tokens for clients and applications that are\n authenticated using IAM entities. The access token can be used to fetch short-term credentials\n for the assigned AWS accounts or to access application APIs using bearer\n authentication.

", + "smithy.api#documentation": "

Creates and returns access and refresh tokens for clients and applications that are\n authenticated using IAM entities. The access token can be used to fetch short-term credentials\n for the assigned Amazon Web Services accounts or to access application APIs using bearer\n authentication.

", "smithy.api#examples": [ { "title": "Call OAuth/OIDC /token endpoint for Authorization Code grant with IAM authentication", @@ -1282,6 +1316,72 @@ "sts:identity_context" ] } + }, + { + "title": "Call OAuth/OIDC /token endpoint for Refresh Token grant with IAM authentication", + "documentation": "", + "input": { + "clientId": "arn:aws:sso::123456789012:application/ssoins-111111111111/apl-222222222222", + "grantType": "refresh_token", + "refreshToken": "aorvJYubGpU6i91YnH7Mfo-AT2fIVa1zCfA_Rvq9yjVKIP3onFmmykuQ7E93y2I-9Nyj-A_sVvMufaLNL0bqnDRtgAkc0:MGUCMFrRsktMRVlWaOR70XGMFGLL0SlcCw4DiYveIiOVx1uK9BbD0gvAddsW3UTLozXKMgIxAJ3qxUvjpnlLIOaaKOoa/FuNgqJVvr9GMwDtnAtlh9iZzAkEXAMPLEREFRESHTOKEN" + }, + "output": { + "accessToken": "aoal-YigITUDiNX1xZwOMXM5MxOWDL0E0jg9P6_C_jKQPxS_SKCP6f0kh1Up4g7TtvQqkMnD-GJiU_S1gvug6SrggAkc0:MGYCMQD3IatVjV7jAJU91kK3PkS/SfA2wtgWzOgZWDOR7sDGN9t0phCZz5It/aes/3C1Zj0CMQCKWOgRaiz6AIhza3DSXQNMLjRKXC8F8ceCsHlgYLMZ7hZidEXAMPLEACCESSTOKEN", + "tokenType": "Bearer", + "expiresIn": 1579729529, + "refreshToken": "aorvJYubGpU6i91YnH7Mfo-AT2fIVa1zCfA_Rvq9yjVKIP3onFmmykuQ7E93y2I-9Nyj-A_sVvMufaLNL0bqnDRtgAkc0:MGUCMFrRsktMRVlWaOR70XGMFGLL0SlcCw4DiYveIiOVx1uK9BbD0gvAddsW3UTLozXKMgIxAJ3qxUvjpnlLIOaaKOoa/FuNgqJVvr9GMwDtnAtlh9iZzAkEXAMPLEREFRESHTOKEN", + "issuedTokenType": "urn:ietf:params:oauth:token-type:refresh_token", + "scope": [ + "openid", + "aws", + "sts:identity_context" + ] + } + }, + { + "title": "Call OAuth/OIDC /token endpoint for JWT Bearer grant with IAM authentication", + "documentation": "", + "input": { + "clientId": "arn:aws:sso::123456789012:application/ssoins-111111111111/apl-222222222222", + "grantType": "urn:ietf:params:oauth:grant-type:jwt-bearer", + "assertion": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjFMVE16YWtpaGlSbGFfOHoyQkVKVlhlV01xbyJ9.eyJ2ZXIiOiIyLjAiLCJpc3MiOiJodHRwczovL2xvZ2luLm1pY3Jvc29mdG9ubGluZS5jb20vOTEyMjA0MGQtNmM2Ny00YzViLWIxMTItMzZhMzA0YjY2ZGFkL3YyLjAiLCJzdWIiOiJBQUFBQUFBQUFBQUFBQUFBQUFBQUFJa3pxRlZyU2FTYUZIeTc4MmJidGFRIiwiYXVkIjoiNmNiMDQwMTgtYTNmNS00NmE3LWI5OTUtOTQwYzc4ZjVhZWYzIiwiZXhwIjoxNTM2MzYxNDExLCJpYXQiOjE1MzYyNzQ3MTEsIm5iZiI6MTUzNjI3NDcxMSwibmFtZSI6IkFiZSBMaW5jb2xuIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiQWJlTGlAbWljcm9zb2Z0LmNvbSIsIm9pZCI6IjAwMDAwMDAwLTAwMDAtMDAwMC02NmYzLTMzMzJlY2E3ZWE4MSIsInRpZCI6IjkxMjIwNDBkLTZjNjctNGM1Yi1iMTEyLTM2YTMwNGI2NmRhZCIsIm5vbmNlIjoiMTIzNTIzIiwiYWlvIjoiRGYyVVZYTDFpeCFsTUNXTVNPSkJjRmF0emNHZnZGR2hqS3Y4cTVnMHg3MzJkUjVNQjVCaXN2R1FPN1lXQnlqZDhpUURMcSFlR2JJRGFreXA1bW5PcmNkcUhlWVNubHRlcFFtUnA2QUlaOGpZIn0.1AFWW-Ck5nROwSlltm7GzZvDwUkqvhSQpm55TQsmVo9Y59cLhRXpvB8n-55HCr9Z6G_31_UbeUkoz612I2j_Sm9FFShSDDjoaLQr54CreGIJvjtmS3EkK9a7SJBbcpL1MpUtlfygow39tFjY7EVNW9plWUvRrTgVk7lYLprvfzw-CIqw3gHC-T7IK_m_xkr08INERBtaecwhTeN4chPC4W3jdmw_lIxzC48YoQ0dB1L9-ImX98Egypfrlbm0IBL5spFzL6JDZIRRJOu8vecJvj1mq-IUhGt0MacxX8jdxYLP-KUu2d9MbNKpCKJuZ7p8gwTL5B7NlUdh_dmSviPWrw" + }, + "output": { + "accessToken": "aoal-YigITUDiNX1xZwOMXM5MxOWDL0E0jg9P6_C_jKQPxS_SKCP6f0kh1Up4g7TtvQqkMnD-GJiU_S1gvug6SrggAkc0:MGYCMQD3IatVjV7jAJU91kK3PkS/SfA2wtgWzOgZWDOR7sDGN9t0phCZz5It/aes/3C1Zj0CMQCKWOgRaiz6AIhza3DSXQNMLjRKXC8F8ceCsHlgYLMZ7hZidEXAMPLEACCESSTOKEN", + "tokenType": "Bearer", + "expiresIn": 1579729529, + "refreshToken": "aorvJYubGpU6i91YnH7Mfo-AT2fIVa1zCfA_Rvq9yjVKIP3onFmmykuQ7E93y2I-9Nyj-A_sVvMufaLNL0bqnDRtgAkc0:MGUCMFrRsktMRVlWaOR70XGMFGLL0SlcCw4DiYveIiOVx1uK9BbD0gvAddsW3UTLozXKMgIxAJ3qxUvjpnlLIOaaKOoa/FuNgqJVvr9GMwDtnAtlh9iZzAkEXAMPLEREFRESHTOKEN", + "idToken": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhd3M6aWRlbnRpdHlfc3RvcmVfaWQiOiJkLTMzMzMzMzMzMzMiLCJzdWIiOiI3MzA0NDhmMi1lMGExLTcwYTctYzk1NC0wMDAwMDAwMDAwMDAiLCJhd3M6aW5zdGFuY2VfYWNjb3VudCI6IjExMTExMTExMTExMSIsInN0czppZGVudGl0eV9jb250ZXh0IjoiRVhBTVBMRUlERU5USVRZQ09OVEVYVCIsInN0czphdWRpdF9jb250ZXh0IjoiRVhBTVBMRUFVRElUQ09OVEVYVCIsImlzcyI6Imh0dHBzOi8vaWRlbnRpdHljZW50ZXIuYW1hem9uYXdzLmNvbS9zc29pbnMtMTExMTExMTExMTExIiwiYXdzOmlkZW50aXR5X3N0b3JlX2FybiI6ImFybjphd3M6aWRlbnRpdHlzdG9yZTo6MTExMTExMTExMTExOmlkZW50aXR5c3RvcmUvZC0zMzMzMzMzMzMzIiwiYXVkIjoiYXJuOmF3czpzc286OjEyMzQ1Njc4OTAxMjphcHBsaWNhdGlvbi9zc29pbnMtMTExMTExMTExMTExL2FwbC0yMjIyMjIyMjIyMjIiLCJhd3M6aW5zdGFuY2VfYXJuIjoiYXJuOmF3czpzc286OjppbnN0YW5jZS9zc29pbnMtMTExMTExMTExMTExIiwiYXdzOmNyZWRlbnRpYWxfaWQiOiJfWlIyTjZhVkJqMjdGUEtheWpfcEtwVjc3QVBERl80MXB4ZXRfWWpJdUpONlVJR2RBdkpFWEFNUExFQ1JFRElEIiwiYXV0aF90aW1lIjoiMjAyMC0wMS0yMlQxMjo0NToyOVoiLCJleHAiOjE1Nzk3Mjk1MjksImlhdCI6MTU3OTcyNTkyOX0.Xyah6qbk78qThzJ41iFU2yfGuRqqtKXHrJYwQ8L9Ip0", + "issuedTokenType": "urn:ietf:params:oauth:token-type:refresh_token", + "scope": [ + "openid", + "aws", + "sts:identity_context" + ] + } + }, + { + "title": "Call OAuth/OIDC /token endpoint for Token Exchange grant with IAM authentication", + "documentation": "", + "input": { + "clientId": "arn:aws:sso::123456789012:application/ssoins-111111111111/apl-222222222222", + "grantType": "urn:ietf:params:oauth:grant-type:token-exchange", + "subjectToken": "aoak-Hig8TUDPNX1xZwOMXM5MxOWDL0E0jg9P6_C_jKQPxS_SKCP6f0kh1Up4g7TtvQqkMnD-GJiU_S1gvug6SrggAkc0:MGYCMQD3IatVjV7jAJU91kK3PkS/SfA2wtgWzOgZWDOR7sDGN9t0phCZz5It/aes/3C1Zj0CMQCKWOgRaiz6AIhza3DSXQNMLjRKXC8F8ceCsHlgYLMZ7hZDIFFERENTACCESSTOKEN", + "subjectTokenType": "urn:ietf:params:oauth:token-type:access_token", + "requestedTokenType": "urn:ietf:params:oauth:token-type:access_token" + }, + "output": { + "accessToken": "aoal-YigITUDiNX1xZwOMXM5MxOWDL0E0jg9P6_C_jKQPxS_SKCP6f0kh1Up4g7TtvQqkMnD-GJiU_S1gvug6SrggAkc0:MGYCMQD3IatVjV7jAJU91kK3PkS/SfA2wtgWzOgZWDOR7sDGN9t0phCZz5It/aes/3C1Zj0CMQCKWOgRaiz6AIhza3DSXQNMLjRKXC8F8ceCsHlgYLMZ7hZidEXAMPLEACCESSTOKEN", + "tokenType": "Bearer", + "expiresIn": 1579729529, + "idToken": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhd3M6aWRlbnRpdHlfc3RvcmVfaWQiOiJkLTMzMzMzMzMzMzMiLCJzdWIiOiI3MzA0NDhmMi1lMGExLTcwYTctYzk1NC0wMDAwMDAwMDAwMDAiLCJhd3M6aW5zdGFuY2VfYWNjb3VudCI6IjExMTExMTExMTExMSIsInN0czppZGVudGl0eV9jb250ZXh0IjoiRVhBTVBMRUlERU5USVRZQ09OVEVYVCIsImlzcyI6Imh0dHBzOi8vaWRlbnRpdHljZW50ZXIuYW1hem9uYXdzLmNvbS9zc29pbnMtMTExMTExMTExMTExIiwiYXdzOmlkZW50aXR5X3N0b3JlX2FybiI6ImFybjphd3M6aWRlbnRpdHlzdG9yZTo6MTExMTExMTExMTExOmlkZW50aXR5c3RvcmUvZC0zMzMzMzMzMzMzIiwiYXVkIjoiYXJuOmF3czpzc286OjEyMzQ1Njc4OTAxMjphcHBsaWNhdGlvbi9zc29pbnMtMTExMTExMTExMTExL2FwbC0yMjIyMjIyMjIyMjIiLCJhd3M6aW5zdGFuY2VfYXJuIjoiYXJuOmF3czpzc286OjppbnN0YW5jZS9zc29pbnMtMTExMTExMTExMTExIiwiYXdzOmNyZWRlbnRpYWxfaWQiOiJfWlIyTjZhVkJqMjdGUEtheWpfcEtwVjc3QVBERl80MXB4ZXRfWWpJdUpONlVJR2RBdkpFWEFNUExFQ1JFRElEIiwiYXV0aF90aW1lIjoiMjAyMC0wMS0yMlQxMjo0NToyOVoiLCJleHAiOjE1Nzk3Mjk1MjksImlhdCI6MTU3OTcyNTkyOX0.5SYiW1kMsuUr7nna-l5tlakM0GNbMHvIM2_n0QD23jM", + "issuedTokenType": "urn:ietf:params:oauth:token-type:access_token", + "scope": [ + "openid", + "aws", + "sts:identity_context" + ] + } } ], "smithy.api#http": { @@ -1355,6 +1455,12 @@ "traits": { "smithy.api#documentation": "

Used only when calling this API for the Token Exchange grant type. This value specifies\n the type of token that the requester can receive. The following values are supported:

\n

* Access Token - urn:ietf:params:oauth:token-type:access_token\n

\n

* Refresh Token - urn:ietf:params:oauth:token-type:refresh_token\n

" } + }, + "codeVerifier": { + "target": "com.amazonaws.ssooidc#CodeVerifier", + "traits": { + "smithy.api#documentation": "

Used only when calling this API for the Authorization Code grant type. This value is generated\n by the client and presented to validate the original code challenge value the client passed at\n authorization time.

" + } } }, "traits": { @@ -1367,7 +1473,7 @@ "accessToken": { "target": "com.amazonaws.ssooidc#AccessToken", "traits": { - "smithy.api#documentation": "

A bearer token to access AWS accounts and applications assigned to a user.

" + "smithy.api#documentation": "

A bearer token to access Amazon Web Services accounts and applications assigned to a user.

" } }, "tokenType": { @@ -1452,6 +1558,12 @@ "com.amazonaws.ssooidc#GrantType": { "type": "string" }, + "com.amazonaws.ssooidc#GrantTypes": { + "type": "list", + "member": { + "target": "com.amazonaws.ssooidc#GrantType" + } + }, "com.amazonaws.ssooidc#IdToken": { "type": "string", "traits": { @@ -1552,6 +1664,28 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.ssooidc#InvalidRedirectUriException": { + "type": "structure", + "members": { + "error": { + "target": "com.amazonaws.ssooidc#Error", + "traits": { + "smithy.api#documentation": "

Single error code.\n For this exception the value will be invalid_redirect_uri.

" + } + }, + "error_description": { + "target": "com.amazonaws.ssooidc#ErrorDescription", + "traits": { + "smithy.api#documentation": "

Human-readable text providing additional information, used to assist the\n client developer in understanding the error that occurred.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Indicates that one or more redirect URI in the request is not supported for this operation.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.ssooidc#InvalidRequestException": { "type": "structure", "members": { @@ -1639,6 +1773,12 @@ "smithy.api#default": 0 } }, + "com.amazonaws.ssooidc#RedirectUris": { + "type": "list", + "member": { + "target": "com.amazonaws.ssooidc#URI" + } + }, "com.amazonaws.ssooidc#RefreshToken": { "type": "string", "traits": { @@ -1663,11 +1803,17 @@ { "target": "com.amazonaws.ssooidc#InvalidClientMetadataException" }, + { + "target": "com.amazonaws.ssooidc#InvalidRedirectUriException" + }, { "target": "com.amazonaws.ssooidc#InvalidRequestException" }, { "target": "com.amazonaws.ssooidc#InvalidScopeException" + }, + { + "target": "com.amazonaws.ssooidc#UnsupportedGrantTypeException" } ], "traits": { @@ -1683,7 +1829,16 @@ "scopes": [ "sso:account:access", "codewhisperer:completions" - ] + ], + "redirectUris": [ + "127.0.0.1:PORT/oauth/callback" + ], + "grantTypes": [ + "authorization_code", + "refresh_token" + ], + "issuerUrl": "https://identitycenter.amazonaws.com/ssoins-1111111111111111", + "entitledApplicationArn": "arn:aws:sso::ACCOUNTID:application/ssoins-1111111111111111/apl-1111111111111111" }, "output": { "clientId": "_yzkThXVzLWVhc3QtMQEXAMPLECLIENTID", @@ -1723,6 +1878,30 @@ "traits": { "smithy.api#documentation": "

The list of scopes that are defined by the client. Upon authorization, this list is used\n to restrict permissions when granting an access token.

" } + }, + "redirectUris": { + "target": "com.amazonaws.ssooidc#RedirectUris", + "traits": { + "smithy.api#documentation": "

The list of redirect URI that are defined by the client. At completion of authorization,\n this list is used to restrict what locations the user agent can be redirected back to.

" + } + }, + "grantTypes": { + "target": "com.amazonaws.ssooidc#GrantTypes", + "traits": { + "smithy.api#documentation": "

The list of OAuth 2.0 grant types that are defined by the client. This list is used to\n restrict the token granting flows available to the client.

" + } + }, + "issuerUrl": { + "target": "com.amazonaws.ssooidc#URI", + "traits": { + "smithy.api#documentation": "

The IAM Identity Center Issuer URL associated with an instance of IAM Identity Center. This value is needed for user access to resources through the client.

" + } + }, + "entitledApplicationArn": { + "target": "com.amazonaws.ssooidc#ArnType", + "traits": { + "smithy.api#documentation": "

This IAM Identity Center application ARN is used to define administrator-managed configuration for public client access to resources. At\n authorization, the scopes, grants, and redirect URI available to this client will be restricted by this application resource.

" + } } }, "traits": { diff --git a/models/storage-gateway.json b/models/storage-gateway.json index 65ccd25653..2e87c99d34 100644 --- a/models/storage-gateway.json +++ b/models/storage-gateway.json @@ -87,7 +87,7 @@ "GatewayTimezone": { "target": "com.amazonaws.storagegateway#GatewayTimezone", "traits": { - "smithy.api#documentation": "

A value that indicates the time zone you want to set for the gateway. The time zone is\n of the format \"GMT-hr:mm\" or \"GMT+hr:mm\". For example, GMT-4:00 indicates the time is 4\n hours behind GMT. GMT+2:00 indicates the time is 2 hours ahead of GMT. The time zone is\n used, for example, for scheduling snapshots and your gateway's maintenance\n schedule.

", + "smithy.api#documentation": "

A value that indicates the time zone you want to set for the gateway. The time zone is\n of the format \"GMT\", \"GMT-hr:mm\", or \"GMT+hr:mm\". For example, GMT indicates Greenwich Mean\n Time without any offset. GMT-4:00 indicates the time is 4 hours behind GMT. GMT+2:00\n indicates the time is 2 hours ahead of GMT. The time zone is used, for example, for\n scheduling snapshots and your gateway's maintenance schedule.

", "smithy.api#required": {} } }, @@ -101,7 +101,7 @@ "GatewayType": { "target": "com.amazonaws.storagegateway#GatewayType", "traits": { - "smithy.api#documentation": "

A value that defines the type of gateway to activate. The type specified is critical to\n all later functions of the gateway and cannot be changed after activation. The default\n value is CACHED.

\n

Valid Values: STORED | CACHED | VTL |\n VTL_SNOW | FILE_S3 | FILE_FSX_SMB\n

" + "smithy.api#documentation": "

A value that defines the type of gateway to activate. The type specified is critical to\n all later functions of the gateway and cannot be changed after activation. The default\n value is CACHED.

\n

Valid Values: STORED | CACHED | VTL\n | FILE_S3 |\n FILE_FSX_SMB\n

" } }, "TapeDriveType": { @@ -818,6 +818,23 @@ } } }, + "com.amazonaws.storagegateway#AutomaticUpdatePolicy": { + "type": "enum", + "members": { + "ALL_VERSIONS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALL_VERSIONS" + } + }, + "EMERGENCY_VERSIONS_ONLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EMERGENCY_VERSIONS_ONLY" + } + } + } + }, "com.amazonaws.storagegateway#AvailabilityMonitorTestStatus": { "type": "enum", "members": { @@ -3721,7 +3738,7 @@ "HostEnvironment": { "target": "com.amazonaws.storagegateway#HostEnvironment", "traits": { - "smithy.api#documentation": "

The type of hardware or software platform on which the gateway is running.

" + "smithy.api#documentation": "

The type of hardware or software platform on which the gateway is running.

\n \n

Tape Gateway is no longer available on Snow Family devices.

\n
" } }, "EndpointType": { @@ -3789,7 +3806,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns your gateway's weekly maintenance start time including the day and time of\n the week. Note that values are in terms of the gateway's time zone.

", + "smithy.api#documentation": "

Returns your gateway's maintenance window schedule information, with values for\n monthly or weekly cadence, specific day and time to begin maintenance, and which types of\n updates to apply. Time values returned are for the gateway's time zone.

", "smithy.api#examples": [ { "title": "To describe gateway's maintenance start time", @@ -3850,7 +3867,7 @@ "DayOfMonth": { "target": "com.amazonaws.storagegateway#DayOfMonth", "traits": { - "smithy.api#documentation": "

The day of the month component of the maintenance start time represented as an ordinal\n number from 1 to 28, where 1 represents the first day of the month and 28 represents the\n last day of the month.

" + "smithy.api#documentation": "

The day of the month component of the maintenance start time represented as an ordinal\n number from 1 to 28, where 1 represents the first day of the month. It is not possible to\n set the maintenance schedule to start on days 29 through 31.

" } }, "Timezone": { @@ -3858,10 +3875,16 @@ "traits": { "smithy.api#documentation": "

A value that indicates the time zone that is set for the gateway. The start time and day\n of week specified should be in the time zone of the gateway.

" } + }, + "SoftwareUpdatePreferences": { + "target": "com.amazonaws.storagegateway#SoftwareUpdatePreferences", + "traits": { + "smithy.api#documentation": "

A set of variables indicating the software update preferences for the gateway.

\n

Includes AutomaticUpdatePolicy field with the following inputs:

\n

\n ALL_VERSIONS - Enables regular gateway maintenance updates.

\n

\n EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance\n updates.

" + } } }, "traits": { - "smithy.api#documentation": "

A JSON object containing the following fields:

\n ", + "smithy.api#documentation": "

A JSON object containing the following fields:

\n ", "smithy.api#output": {} } }, @@ -4028,7 +4051,7 @@ "SMBSecurityStrategy": { "target": "com.amazonaws.storagegateway#SMBSecurityStrategy", "traits": { - "smithy.api#documentation": "

The type of security strategy that was specified for file gateway.

\n
    \n
  • \n

    \n ClientSpecified: If you use this option, requests are established\n based on what is negotiated by the client. This option is recommended when you want\n to maximize compatibility across different clients in your environment. Only\n supported for S3 File Gateways.

    \n
  • \n
  • \n

    \n MandatorySigning: If you use this option, file gateway only allows\n connections from SMBv2 or SMBv3 clients that have signing enabled. This option works\n with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

    \n
  • \n
  • \n

    \n MandatoryEncryption: If you use this option, file gateway only allows\n connections from SMBv3 clients that have encryption enabled. This option is highly\n recommended for environments that handle sensitive data. This option works with SMB\n clients on Microsoft Windows 8, Windows Server 2012 or newer.

    \n
  • \n
" + "smithy.api#documentation": "

The type of security strategy that was specified for file gateway.

\n
    \n
  • \n

    \n ClientSpecified: If you choose this option, requests are established\n based on what is negotiated by the client. This option is recommended when you want\n to maximize compatibility across different clients in your environment. Supported\n only for S3 File Gateway.

    \n
  • \n
  • \n

    \n MandatorySigning: If you choose this option, File Gateway only allows\n connections from SMBv2 or SMBv3 clients that have signing turned on. This option\n works with SMB clients on Microsoft Windows Vista, Windows Server 2008, or later.\n

    \n
  • \n
  • \n

    \n MandatoryEncryption: If you choose this option, File Gateway only\n allows connections from SMBv3 clients that have encryption turned on. Both 256-bit\n and 128-bit algorithms are allowed. This option is recommended for environments that\n handle sensitive data. It works with SMB clients on Microsoft Windows 8, Windows\n Server 2012, or later.

    \n
  • \n
  • \n

    \n MandatoryEncryptionNoAes128: If you choose this option, File Gateway\n only allows connections from SMBv3 clients that use 256-bit AES encryption\n algorithms. 128-bit algorithms are not allowed. This option is recommended for\n environments that handle sensitive data. It works with SMB clients on Microsoft\n Windows 8, Windows Server 2012, or later.

    \n
  • \n
" } }, "FileSharesVisible": { @@ -5971,7 +5994,7 @@ "HostEnvironment": { "target": "com.amazonaws.storagegateway#HostEnvironment", "traits": { - "smithy.api#documentation": "

The type of hardware or software platform on which the gateway is running.

" + "smithy.api#documentation": "

The type of hardware or software platform on which the gateway is running.

\n \n

Tape Gateway is no longer available on Snow Family devices.

\n
" } }, "HostEnvironmentId": { @@ -7751,7 +7774,7 @@ "FolderList": { "target": "com.amazonaws.storagegateway#FolderList", "traits": { - "smithy.api#documentation": "

A comma-separated list of the paths of folders to refresh in the cache. The default is\n [\"/\"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to true, the entire S3\n bucket that the file share has access to is refreshed.

" + "smithy.api#documentation": "

A comma-separated list of the paths of folders to refresh in the cache. The default is\n [\"/\"]. The default refreshes objects and folders at the root of the Amazon S3 bucket. If Recursive is set to true, the entire S3\n bucket that the file share has access to is refreshed.

\n

Do not include / when specifying folder names. For example, you would\n specify samplefolder rather than samplefolder/.

" } }, "Recursive": { @@ -8321,6 +8344,12 @@ "traits": { "smithy.api#enumValue": "MandatoryEncryption" } + }, + "MandatoryEncryptionNoAes128": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MandatoryEncryptionNoAes128" + } } } }, @@ -8540,6 +8569,20 @@ "smithy.api#pattern": "^\\Asnap-([0-9A-Fa-f]{8}|[0-9A-Fa-f]{17})\\z$" } }, + "com.amazonaws.storagegateway#SoftwareUpdatePreferences": { + "type": "structure", + "members": { + "AutomaticUpdatePolicy": { + "target": "com.amazonaws.storagegateway#AutomaticUpdatePolicy", + "traits": { + "smithy.api#documentation": "

Indicates the automatic update policy for a gateway.

\n

\n ALL_VERSIONS - Enables regular gateway maintenance updates.

\n

\n EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance\n updates.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A set of variables indicating the software update preferences for the gateway.

" + } + }, "com.amazonaws.storagegateway#SoftwareUpdatesEndDate": { "type": "string", "traits": { @@ -10896,7 +10939,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a gateway's metadata, which includes the gateway's name and time zone.\n To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in\n your request.

\n \n

For gateways activated after September 2, 2015, the gateway's ARN contains the\n gateway ID rather than the gateway name. However, changing the name of the gateway has\n no effect on the gateway's ARN.

\n
", + "smithy.api#documentation": "

Updates a gateway's metadata, which includes the gateway's name, time zone,\n and metadata cache size. To specify which gateway to update, use the Amazon Resource Name\n (ARN) of the gateway in your request.

\n \n

For gateways activated after September 2, 2015, the gateway's ARN contains the\n gateway ID rather than the gateway name. However, changing the name of the gateway has\n no effect on the gateway's ARN.

\n
", "smithy.api#examples": [ { "title": "To update a gateway's metadata", @@ -10941,7 +10984,7 @@ "GatewayCapacity": { "target": "com.amazonaws.storagegateway#GatewayCapacity", "traits": { - "smithy.api#documentation": "

Specifies the size of the gateway's metadata cache.

" + "smithy.api#documentation": "

Specifies the size of the gateway's metadata cache. This setting impacts gateway\n performance and hardware recommendations. For more information, see Performance guidance for gateways with multiple file shares\n in the Amazon S3 File Gateway User Guide.

" } } }, @@ -11043,7 +11086,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a gateway's weekly maintenance start time information, including day and\n time of the week. The maintenance time is the time in your gateway's time zone.

", + "smithy.api#documentation": "

Updates a gateway's maintenance window schedule, with settings for monthly or\n weekly cadence, specific day and time to begin maintenance, and which types of updates to\n apply. Time configuration uses the gateway's time zone. You can pass values for a complete\n maintenance schedule, or update policy, or both. Previous values will persist for whichever\n setting you choose not to modify. If an incomplete or invalid maintenance schedule is\n passed, the entire request will be rejected with an error and no changes will occur.

\n

A complete maintenance schedule must include values for both\n MinuteOfHour and HourOfDay, and either\n DayOfMonth\n or\n DayOfWeek.

\n \n

We recommend keeping maintenance updates turned on, except in specific use cases\n where the brief disruptions caused by updating the gateway could critically impact your\n deployment.

\n
", "smithy.api#examples": [ { "title": "To update a gateway's maintenance start time", @@ -11073,32 +11116,36 @@ "HourOfDay": { "target": "com.amazonaws.storagegateway#HourOfDay", "traits": { - "smithy.api#documentation": "

The hour component of the maintenance start time represented as hh,\n where hh is the hour (00 to 23). The hour of the day is in the time\n zone of the gateway.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The hour component of the maintenance start time represented as hh,\n where hh is the hour (00 to 23). The hour of the day is in the time\n zone of the gateway.

" } }, "MinuteOfHour": { "target": "com.amazonaws.storagegateway#MinuteOfHour", "traits": { - "smithy.api#documentation": "

The minute component of the maintenance start time represented as\n mm, where mm is the minute (00 to 59). The\n minute of the hour is in the time zone of the gateway.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The minute component of the maintenance start time represented as\n mm, where mm is the minute (00 to 59). The\n minute of the hour is in the time zone of the gateway.

" } }, "DayOfWeek": { "target": "com.amazonaws.storagegateway#DayOfWeek", "traits": { - "smithy.api#documentation": "

The day of the week component of the maintenance start time week represented as an\n ordinal number from 0 to 6, where 0 represents Sunday and 6 Saturday.

" + "smithy.api#documentation": "

The day of the week component of the maintenance start time week represented as an\n ordinal number from 0 to 6, where 0 represents Sunday and 6 represents Saturday.

" } }, "DayOfMonth": { "target": "com.amazonaws.storagegateway#DayOfMonth", "traits": { - "smithy.api#documentation": "

The day of the month component of the maintenance start time represented as an ordinal\n number from 1 to 28, where 1 represents the first day of the month and 28 represents the\n last day of the month.

" + "smithy.api#documentation": "

The day of the month component of the maintenance start time represented as an ordinal\n number from 1 to 28, where 1 represents the first day of the month. It is not possible to\n set the maintenance schedule to start on days 29 through 31.

" + } + }, + "SoftwareUpdatePreferences": { + "target": "com.amazonaws.storagegateway#SoftwareUpdatePreferences", + "traits": { + "smithy.api#documentation": "

A set of variables indicating the software update preferences for the gateway.

\n

Includes AutomaticUpdatePolicy field with the following inputs:

\n

\n ALL_VERSIONS - Enables regular gateway maintenance updates.

\n

\n EMERGENCY_VERSIONS_ONLY - Disables regular gateway maintenance\n updates.

" } } }, "traits": { - "smithy.api#documentation": "

A JSON object containing the following fields:

\n ", + "smithy.api#documentation": "

A JSON object containing the following fields:

\n ", "smithy.api#input": {} } }, @@ -11529,7 +11576,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the SMB security strategy on a file gateway. This action is only supported in\n file gateways.

\n \n

This API is called Security level in the User Guide.

\n

A higher security level can affect performance of the gateway.

\n
" + "smithy.api#documentation": "

Updates the SMB security strategy level for an Amazon S3 file gateway. This\n action is only supported for Amazon S3 file gateways.

\n \n

For information about configuring this setting using the Amazon Web Services console,\n see Setting a security level for your gateway in the Amazon S3\n File Gateway User Guide.

\n

A higher security strategy level can affect performance of the gateway.

\n
" } }, "com.amazonaws.storagegateway#UpdateSMBSecurityStrategyInput": { @@ -11544,7 +11591,7 @@ "SMBSecurityStrategy": { "target": "com.amazonaws.storagegateway#SMBSecurityStrategy", "traits": { - "smithy.api#documentation": "

Specifies the type of security strategy.

\n

ClientSpecified: if you use this option, requests are established based on what is\n negotiated by the client. This option is recommended when you want to maximize\n compatibility across different clients in your environment. Supported only in S3 File\n Gateway.

\n

MandatorySigning: if you use this option, file gateway only allows connections from\n SMBv2 or SMBv3 clients that have signing enabled. This option works with SMB clients on\n Microsoft Windows Vista, Windows Server 2008 or newer.

\n

MandatoryEncryption: if you use this option, file gateway only allows connections from\n SMBv3 clients that have encryption enabled. This option is highly recommended for\n environments that handle sensitive data. This option works with SMB clients on Microsoft\n Windows 8, Windows Server 2012 or newer.

", + "smithy.api#documentation": "

Specifies the type of security strategy.

\n

\n ClientSpecified: If you choose this option, requests are established based\n on what is negotiated by the client. This option is recommended when you want to maximize\n compatibility across different clients in your environment. Supported only for S3 File\n Gateway.

\n

\n MandatorySigning: If you choose this option, File Gateway only allows\n connections from SMBv2 or SMBv3 clients that have signing enabled. This option works with\n SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer.

\n

\n MandatoryEncryption: If you choose this option, File Gateway only allows\n connections from SMBv3 clients that have encryption enabled. This option is recommended for\n environments that handle sensitive data. This option works with SMB clients on Microsoft\n Windows 8, Windows Server 2012 or newer.

\n

\n MandatoryEncryptionNoAes128: If you choose this option, File Gateway only\n allows connections from SMBv3 clients that use 256-bit AES encryption algorithms. 128-bit\n algorithms are not allowed. This option is recommended for environments that handle\n sensitive data. It works with SMB clients on Microsoft Windows 8, Windows Server 2012, or\n later.

", "smithy.api#required": {} } } diff --git a/models/swf.json b/models/swf.json index a5cf27b1f9..03d54ccf9c 100644 --- a/models/swf.json +++ b/models/swf.json @@ -1847,6 +1847,96 @@ "smithy.api#error": "client" } }, + "com.amazonaws.swf#DeleteActivityType": { + "type": "operation", + "input": { + "target": "com.amazonaws.swf#DeleteActivityTypeInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.swf#OperationNotPermittedFault" + }, + { + "target": "com.amazonaws.swf#TypeNotDeprecatedFault" + }, + { + "target": "com.amazonaws.swf#UnknownResourceFault" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the specified activity type.

\n

Note: Prior to deletion, activity types must first be deprecated.

\n

\n After an activity type has been deleted, you cannot schedule new activities of that type. Activities that started before the type was deleted will continue to run.\n

\n

\n Access Control\n

\n

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

\n
    \n
  • \n

    Use a Resource element with the domain name to limit the action to\n only specified domains.

    \n
  • \n
  • \n

    Use an Action element to allow or deny permission to call this\n action.

    \n
  • \n
  • \n

    Constrain the following parameters by using a Condition element with\n the appropriate keys.

    \n
      \n
    • \n

      \n activityType.name: String constraint. The key is\n swf:activityType.name.

      \n
    • \n
    • \n

      \n activityType.version: String constraint. The key is\n swf:activityType.version.

      \n
    • \n
    \n
  • \n
\n

If the caller doesn't have sufficient permissions to invoke the action, or the\n parameter values fall outside the specified constraints, the action fails. The associated\n event attribute's cause parameter is set to OPERATION_NOT_PERMITTED.\n For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF\n Workflows in the Amazon SWF Developer Guide.

" + } + }, + "com.amazonaws.swf#DeleteActivityTypeInput": { + "type": "structure", + "members": { + "domain": { + "target": "com.amazonaws.swf#DomainName", + "traits": { + "smithy.api#documentation": "

The name of the domain in which the activity type is registered.

", + "smithy.api#required": {} + } + }, + "activityType": { + "target": "com.amazonaws.swf#ActivityType", + "traits": { + "smithy.api#documentation": "

The activity type to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.swf#DeleteWorkflowType": { + "type": "operation", + "input": { + "target": "com.amazonaws.swf#DeleteWorkflowTypeInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.swf#OperationNotPermittedFault" + }, + { + "target": "com.amazonaws.swf#TypeNotDeprecatedFault" + }, + { + "target": "com.amazonaws.swf#UnknownResourceFault" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the specified workflow type.

\n

Note: Prior to deletion, workflow types must first be deprecated.

\n

\n After a workflow type has been deleted, you cannot create new executions of that type. Executions that\n started before the type was deleted will continue to run.\n

\n

\n Access Control\n

\n

You can use IAM policies to control this action's access to Amazon SWF resources as follows:

\n
    \n
  • \n

    Use a Resource element with the domain name to limit the action to\n only specified domains.

    \n
  • \n
  • \n

    Use an Action element to allow or deny permission to call this\n action.

    \n
  • \n
  • \n

    Constrain the following parameters by using a Condition element with\n the appropriate keys.

    \n
      \n
    • \n

      \n workflowType.name: String constraint. The key is\n swf:workflowType.name.

      \n
    • \n
    • \n

      \n workflowType.version: String constraint. The key is\n swf:workflowType.version.

      \n
    • \n
    \n
  • \n
\n

If the caller doesn't have sufficient permissions to invoke the action, or the\n parameter values fall outside the specified constraints, the action fails. The associated\n event attribute's cause parameter is set to OPERATION_NOT_PERMITTED.\n For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF\n Workflows in the Amazon SWF Developer Guide.

" + } + }, + "com.amazonaws.swf#DeleteWorkflowTypeInput": { + "type": "structure", + "members": { + "domain": { + "target": "com.amazonaws.swf#DomainName", + "traits": { + "smithy.api#documentation": "

The name of the domain in which the workflow type is registered.

", + "smithy.api#required": {} + } + }, + "workflowType": { + "target": "com.amazonaws.swf#WorkflowType", + "traits": { + "smithy.api#documentation": "

The workflow type to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.swf#DeprecateActivityType": { "type": "operation", "input": { @@ -1867,7 +1957,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deprecates the specified activity type. After an activity type has\n been deprecated, you cannot create new tasks of that activity type. Tasks of this type that\n were scheduled before the type was deprecated continue to run.

\n \n

This operation is eventually consistent. The results are best effort and may not\n exactly reflect recent updates and changes.

\n
\n

\n Access Control\n

\n

You can use IAM policies to control this action's access to Amazon SWF resources as\n follows:

\n
    \n
  • \n

    Use a Resource element with the domain name to limit the action to\n only specified domains.

    \n
  • \n
  • \n

    Use an Action element to allow or deny permission to call this\n action.

    \n
  • \n
  • \n

    Constrain the following parameters by using a Condition element with\n the appropriate keys.

    \n
      \n
    • \n

      \n activityType.name: String constraint. The key is\n swf:activityType.name.

      \n
    • \n
    • \n

      \n activityType.version: String constraint. The key is\n swf:activityType.version.

      \n
    • \n
    \n
  • \n
\n

If the caller doesn't have sufficient permissions to invoke the action, or the\n parameter values fall outside the specified constraints, the action fails. The associated\n event attribute's cause parameter is set to OPERATION_NOT_PERMITTED.\n For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF\n Workflows in the Amazon SWF Developer Guide.

" + "smithy.api#documentation": "

Deprecates the specified activity type. After an activity type has\n been deprecated, you cannot create new tasks of that activity type. Tasks of this type that\n were scheduled before the type was deprecated continue to run.

\n

\n Access Control\n

\n

You can use IAM policies to control this action's access to Amazon SWF resources as\n follows:

\n
    \n
  • \n

    Use a Resource element with the domain name to limit the action to\n only specified domains.

    \n
  • \n
  • \n

    Use an Action element to allow or deny permission to call this\n action.

    \n
  • \n
  • \n

    Constrain the following parameters by using a Condition element with\n the appropriate keys.

    \n
      \n
    • \n

      \n activityType.name: String constraint. The key is\n swf:activityType.name.

      \n
    • \n
    • \n

      \n activityType.version: String constraint. The key is\n swf:activityType.version.

      \n
    • \n
    \n
  • \n
\n

If the caller doesn't have sufficient permissions to invoke the action, or the\n parameter values fall outside the specified constraints, the action fails. The associated\n event attribute's cause parameter is set to OPERATION_NOT_PERMITTED.\n For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF\n Workflows in the Amazon SWF Developer Guide.

" } }, "com.amazonaws.swf#DeprecateActivityTypeInput": { @@ -5580,6 +5670,12 @@ { "target": "com.amazonaws.swf#CountPendingDecisionTasks" }, + { + "target": "com.amazonaws.swf#DeleteActivityType" + }, + { + "target": "com.amazonaws.swf#DeleteWorkflowType" + }, { "target": "com.amazonaws.swf#DeprecateActivityType" }, @@ -5739,7 +5835,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -5782,7 +5877,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -5795,7 +5891,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -5809,7 +5904,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -5832,7 +5926,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -5867,7 +5960,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -5878,14 +5970,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -5899,14 +5993,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -5915,18 +6007,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -5935,7 +6026,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -5955,14 +6047,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -5976,7 +6070,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -5996,7 +6089,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -6007,14 +6099,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -6025,9 +6119,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -7577,6 +7673,18 @@ "smithy.api#error": "client" } }, + "com.amazonaws.swf#TypeNotDeprecatedFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.swf#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

Returned when the resource type has not been deprecated.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.swf#UndeprecateActivityType": { "type": "operation", "input": { diff --git a/models/taxsettings.json b/models/taxsettings.json new file mode 100644 index 0000000000..35348647de --- /dev/null +++ b/models/taxsettings.json @@ -0,0 +1,3122 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.taxsettings#AccountDetails": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.taxsettings#AccountId", + "traits": { + "smithy.api#documentation": "

List of unique account identifiers.

" + } + }, + "taxRegistration": { + "target": "com.amazonaws.taxsettings#TaxRegistrationWithJurisdiction", + "traits": { + "smithy.api#documentation": "

Your TRN information. Instead of having full legal address, here TRN information will have\n jurisdiction details (for example, country code and state/region/province if applicable).

" + } + }, + "taxInheritanceDetails": { + "target": "com.amazonaws.taxsettings#TaxInheritanceDetails", + "traits": { + "smithy.api#documentation": "

\n Tax inheritance information associated with the account.\n

" + } + }, + "accountMetaData": { + "target": "com.amazonaws.taxsettings#AccountMetaData", + "traits": { + "smithy.api#documentation": "

\n The meta data information associated with the account.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An object with your accountId and TRN information.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.taxsettings#AccountDetailsList": { + "type": "list", + "member": { + "target": "com.amazonaws.taxsettings#AccountDetails" + } + }, + "com.amazonaws.taxsettings#AccountId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 12, + "max": 12 + }, + "smithy.api#pattern": "^\\d+$" + } + }, + "com.amazonaws.taxsettings#AccountIds": { + "type": "list", + "member": { + "target": "com.amazonaws.taxsettings#AccountId" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, + "com.amazonaws.taxsettings#AccountMetaData": { + "type": "structure", + "members": { + "accountName": { + "target": "com.amazonaws.taxsettings#AccountName", + "traits": { + "smithy.api#documentation": "

\n The Amazon Web Services accounts name.\n

" + } + }, + "seller": { + "target": "com.amazonaws.taxsettings#Seller", + "traits": { + "smithy.api#documentation": "

\n Seller information associated with the account.\n

" + } + }, + "address": { + "target": "com.amazonaws.taxsettings#Address" + }, + "addressType": { + "target": "com.amazonaws.taxsettings#AddressRoleType", + "traits": { + "smithy.api#documentation": "

\n The type of address associated with the legal profile.\n

" + } + }, + "addressRoleMap": { + "target": "com.amazonaws.taxsettings#AddressRoleMap", + "traits": { + "smithy.api#documentation": "

\n Address roles associated with the account containing country code information.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n The meta data information associated with the account.\n

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.taxsettings#AccountName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.taxsettings#AdditionalInfoRequest": { + "type": "structure", + "members": { + "malaysiaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#MalaysiaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information to specify for a TRN in Malaysia.

" + } + }, + "israelAdditionalInfo": { + "target": "com.amazonaws.taxsettings#IsraelAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information to specify for a TRN in Israel.

" + } + }, + "estoniaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#EstoniaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information to specify for a TRN in Estonia.

" + } + }, + "canadaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#CanadaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Canada.

" + } + }, + "spainAdditionalInfo": { + "target": "com.amazonaws.taxsettings#SpainAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information to specify for a TRN in Spain.

" + } + }, + "kenyaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#KenyaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information to specify for a TRN in Kenya.

" + } + }, + "southKoreaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#SouthKoreaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information to specify for a TRN in South Korea.

" + } + }, + "turkeyAdditionalInfo": { + "target": "com.amazonaws.taxsettings#TurkeyAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information to specify for a TRN in Turkey.

" + } + }, + "georgiaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#GeorgiaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

\n Additional tax information to specify for a TRN in Georgia.\n

" + } + }, + "italyAdditionalInfo": { + "target": "com.amazonaws.taxsettings#ItalyAdditionalInfo", + "traits": { + "smithy.api#documentation": "

\n Additional tax information to specify for a TRN in Italy.\n

" + } + }, + "romaniaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#RomaniaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information to specify for a TRN in Romania.

" + } + }, + "ukraineAdditionalInfo": { + "target": "com.amazonaws.taxsettings#UkraineAdditionalInfo", + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Ukraine.\n

" + } + }, + "polandAdditionalInfo": { + "target": "com.amazonaws.taxsettings#PolandAdditionalInfo", + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Poland.\n

" + } + }, + "saudiArabiaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#SaudiArabiaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Saudi Arabia.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your tax registration number (TRN). Depending\n on the TRN for a specific country, you might need to specify this information when you set\n your TRN.

\n

You can only specify one of the following parameters and the value can't be empty.

\n \n

The parameter that you specify must match the country for the TRN, if available. For\n example, if you set a TRN in Canada for specific provinces, you must also specify the\n canadaAdditionalInfo parameter.

\n
" + } + }, + "com.amazonaws.taxsettings#AdditionalInfoResponse": { + "type": "structure", + "members": { + "malaysiaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#MalaysiaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Malaysia.

" + } + }, + "israelAdditionalInfo": { + "target": "com.amazonaws.taxsettings#IsraelAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Israel.

" + } + }, + "estoniaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#EstoniaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Estonia.

" + } + }, + "canadaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#CanadaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Canada.

" + } + }, + "brazilAdditionalInfo": { + "target": "com.amazonaws.taxsettings#BrazilAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Brazil. The Tax Settings API\n returns this information in your response when any additional information is present with your\n TRN in Brazil.

" + } + }, + "spainAdditionalInfo": { + "target": "com.amazonaws.taxsettings#SpainAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Spain.

" + } + }, + "kenyaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#KenyaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Kenya.

" + } + }, + "southKoreaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#SouthKoreaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in South Korea.

" + } + }, + "turkeyAdditionalInfo": { + "target": "com.amazonaws.taxsettings#TurkeyAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Turkey.

" + } + }, + "georgiaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#GeorgiaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Georgia.\n

" + } + }, + "italyAdditionalInfo": { + "target": "com.amazonaws.taxsettings#ItalyAdditionalInfo", + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Italy.\n

" + } + }, + "romaniaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#RomaniaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

Additional tax information to specify for a TRN in Romania.

" + } + }, + "ukraineAdditionalInfo": { + "target": "com.amazonaws.taxsettings#UkraineAdditionalInfo", + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Ukraine.\n

" + } + }, + "polandAdditionalInfo": { + "target": "com.amazonaws.taxsettings#PolandAdditionalInfo", + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Poland. \n

" + } + }, + "saudiArabiaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#SaudiArabiaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Saudi Arabia.\n

" + } + }, + "indiaAdditionalInfo": { + "target": "com.amazonaws.taxsettings#IndiaAdditionalInfo", + "traits": { + "smithy.api#documentation": "

\n Additional tax information in India. \n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN. The Tax Settings API returns\n country-specific information in the response when any additional information is present with\n your TRN for the following countries.

" + } + }, + "com.amazonaws.taxsettings#Address": { + "type": "structure", + "members": { + "addressLine1": { + "target": "com.amazonaws.taxsettings#AddressLine1", + "traits": { + "smithy.api#documentation": "

The first line of the address.

", + "smithy.api#required": {} + } + }, + "addressLine2": { + "target": "com.amazonaws.taxsettings#AddressLine2", + "traits": { + "smithy.api#documentation": "

The second line of the address, if applicable.

" + } + }, + "addressLine3": { + "target": "com.amazonaws.taxsettings#AddressLine3", + "traits": { + "smithy.api#documentation": "

The third line of the address, if applicable. Currently, the Tax Settings API accepts the\n addressLine3 parameter only for Saudi Arabia. When you specify a TRN in Saudi\n Arabia, you must enter the addressLine3 and specify the building number for the\n address. For example, you might enter 1234.

" + } + }, + "districtOrCounty": { + "target": "com.amazonaws.taxsettings#District", + "traits": { + "smithy.api#documentation": "

The district or county the address is located.

\n \n

For addresses in Brazil, this parameter uses the name of the neighborhood. When you set\n a TRN in Brazil, use districtOrCounty for the neighborhood name.

\n
" + } + }, + "city": { + "target": "com.amazonaws.taxsettings#City", + "traits": { + "smithy.api#documentation": "

The city that the address is in.

", + "smithy.api#required": {} + } + }, + "stateOrRegion": { + "target": "com.amazonaws.taxsettings#State", + "traits": { + "smithy.api#documentation": "

The state, region, or province that the address is located.

\n

If this is required for tax settings, use the same name as shown on the Tax Settings page.

" + } + }, + "postalCode": { + "target": "com.amazonaws.taxsettings#PostalCode", + "traits": { + "smithy.api#documentation": "

The postal code associated with the address.

", + "smithy.api#required": {} + } + }, + "countryCode": { + "target": "com.amazonaws.taxsettings#CountryCode", + "traits": { + "smithy.api#documentation": "

The country code for the country that the address is in.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the address associated with the TRN information.

" + } + }, + "com.amazonaws.taxsettings#AddressLine1": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 180 + }, + "smithy.api#pattern": "^(?!\\s*$)[\\s\\S]+$" + } + }, + "com.amazonaws.taxsettings#AddressLine2": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 60 + }, + "smithy.api#pattern": "^(?!\\s*$)[\\s\\S]+$" + } + }, + "com.amazonaws.taxsettings#AddressLine3": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 60 + }, + "smithy.api#pattern": "^(?!\\s*$)[\\s\\S]+$" + } + }, + "com.amazonaws.taxsettings#AddressRoleMap": { + "type": "map", + "key": { + "target": "com.amazonaws.taxsettings#AddressRoleType" + }, + "value": { + "target": "com.amazonaws.taxsettings#Jurisdiction" + } + }, + "com.amazonaws.taxsettings#AddressRoleType": { + "type": "enum", + "members": { + "TAX_ADDRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TaxAddress" + } + }, + "BILLING_ADDRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BillingAddress" + } + }, + "CONTACT_ADDRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ContactAddress" + } + } + } + }, + "com.amazonaws.taxsettings#BatchDeleteTaxRegistration": { + "type": "operation", + "input": { + "target": "com.amazonaws.taxsettings#BatchDeleteTaxRegistrationRequest" + }, + "output": { + "target": "com.amazonaws.taxsettings#BatchDeleteTaxRegistrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.taxsettings#ConflictException" + }, + { + "target": "com.amazonaws.taxsettings#InternalServerException" + }, + { + "target": "com.amazonaws.taxsettings#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants delete permission" + }, + "smithy.api#documentation": "

Deletes tax registration for multiple accounts in batch. This can be used to delete tax\n registrations for up to five accounts in one batch.

\n \n

This API operation can't be used to delete your tax registration in Brazil. Use the Payment preferences page in the Billing and Cost Management console instead.

\n
", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/BatchDeleteTaxRegistration" + } + } + }, + "com.amazonaws.taxsettings#BatchDeleteTaxRegistrationError": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.taxsettings#AccountId", + "traits": { + "smithy.api#documentation": "

The unique account identifier for the account whose tax registration couldn't be deleted\n during the BatchDeleteTaxRegistration operation.

", + "smithy.api#required": {} + } + }, + "message": { + "target": "com.amazonaws.taxsettings#ErrorMessage", + "traits": { + "smithy.api#documentation": "

The error message for an individual failure in the\n BatchDeleteTaxRegistration operation.

", + "smithy.api#required": {} + } + }, + "code": { + "target": "com.amazonaws.taxsettings#ErrorCode", + "traits": { + "smithy.api#documentation": "

The error code for an individual failure in BatchDeleteTaxRegistration operation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The error object for representing failures in the BatchDeleteTaxRegistration\n operation.

" + } + }, + "com.amazonaws.taxsettings#BatchDeleteTaxRegistrationErrors": { + "type": "list", + "member": { + "target": "com.amazonaws.taxsettings#BatchDeleteTaxRegistrationError" + } + }, + "com.amazonaws.taxsettings#BatchDeleteTaxRegistrationRequest": { + "type": "structure", + "members": { + "accountIds": { + "target": "com.amazonaws.taxsettings#AccountIds", + "traits": { + "smithy.api#documentation": "

List of unique account identifiers.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.taxsettings#BatchDeleteTaxRegistrationResponse": { + "type": "structure", + "members": { + "errors": { + "target": "com.amazonaws.taxsettings#BatchDeleteTaxRegistrationErrors", + "traits": { + "smithy.api#documentation": "

The list of errors for the accounts the TRN information could not be deleted for.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.taxsettings#BatchPutTaxRegistration": { + "type": "operation", + "input": { + "target": "com.amazonaws.taxsettings#BatchPutTaxRegistrationRequest" + }, + "output": { + "target": "com.amazonaws.taxsettings#BatchPutTaxRegistrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.taxsettings#ConflictException" + }, + { + "target": "com.amazonaws.taxsettings#InternalServerException" + }, + { + "target": "com.amazonaws.taxsettings#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants store permission" + }, + "smithy.api#documentation": "

Adds or updates tax registration for multiple accounts in batch. This can be used to add\n or update tax registrations for up to five accounts in one batch. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first.

\n

To call this API operation for specific countries, see the following country-specific\n requirements.

\n

\n Bangladesh\n

\n
    \n
  • \n

    You must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.

    \n
  • \n
\n

\n Brazil\n

\n
    \n
  • \n

    You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation.

    \n
  • \n
  • \n

    For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address.

    \n
  • \n
\n

\n Georgia\n

\n
    \n
  • \n

    The valid personType values are Physical Person and Business.

    \n
  • \n
\n

\n Kenya\n

\n
    \n
  • \n

    You must specify the personType in the kenyaAdditionalInfo\n field of the additionalTaxInformation object.

    \n
  • \n
  • \n

    If the personType is Physical Person, you must specify the\n tax registration certificate document in the taxRegistrationDocuments field\n of the VerificationDetails object.

    \n
  • \n
\n

\n Malaysia\n

\n
    \n
  • \n

    If you use this operation to set a tax registration number (TRN) in Malaysia, only\n resellers with a valid sales and service tax (SST) number are required to provide tax\n registration information.

    \n
  • \n
  • \n

    By using this API operation to set a TRN in Malaysia, Amazon Web Services will regard you as\n self-declaring that you're an authorized business reseller registered with the Royal\n Malaysia Customs Department (RMCD) and have a valid SST number.

    \n
  • \n
  • \n

    Amazon Web Services reserves the right to seek additional information and/or take other actions to\n support your self-declaration as appropriate.

    \n
  • \n
  • \n

    If you're not a reseller of Amazon Web Services, we don't recommend that you use\n this operation to set the TRN in Malaysia.

    \n
  • \n
  • \n

    Only use this API operation to upload the TRNs for accounts through which you're\n reselling Amazon Web Services.

    \n
  • \n
  • \n

    Amazon Web Services is currently registered under the following service tax codes. You must include\n at least one of the service tax codes in the service tax code strings to declare yourself\n as an authorized registered business reseller.

    \n

    Taxable service and service tax codes:

    \n

    Consultancy - 9907061674

    \n

    Training or coaching service - 9907071685

    \n

    IT service - 9907101676

    \n

    Digital services and electronic medium - 9907121690

    \n
  • \n
\n

\n Nepal\n

\n
    \n
  • \n

    The sector valid values are Business and Individual.

    \n
  • \n
\n

\n Saudi Arabia\n

\n
    \n
  • \n

    For address, you must specify addressLine3.

    \n
  • \n
\n

\n South Korea\n

\n
    \n
  • \n

    You must specify the certifiedEmailId and legalName in the\n TaxRegistrationEntry object. Use Korean characters for\n legalName.

    \n
  • \n
  • \n

    You must specify the businessRepresentativeName,\n itemOfBusiness, and lineOfBusiness in the\n southKoreaAdditionalInfo field of the additionalTaxInformation\n object. Use Korean characters for these fields.

    \n
  • \n
  • \n

    You must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.

    \n
  • \n
  • \n

    For the address object, use Korean characters for addressLine1, addressLine2\n city, postalCode, and stateOrRegion.

    \n
  • \n
\n

\n Spain\n

\n
    \n
  • \n

    You must specify the registrationType in the\n spainAdditionalInfo field of the additionalTaxInformation\n object.

    \n
  • \n
  • \n

    If the registrationType is Local, you must specify the tax\n registration certificate document in the taxRegistrationDocuments field of\n the VerificationDetails object.

    \n
  • \n
\n

\n Turkey\n

\n
    \n
  • \n

    You must specify the sector in the taxRegistrationEntry object.

    \n
  • \n
  • \n

    If your sector is Business, Individual, or\n Government:

    \n
      \n
    • \n

      Specify the taxOffice. If your\n sector is Individual, don't enter this value.

      \n
    • \n
    • \n

      (Optional) Specify the kepEmailId. If your\n sector is Individual, don't enter this value.

      \n
    • \n
    • \n

      \n Note: In the Tax Settings page of the Billing console, Government appears as Public institutions\n

      \n
    • \n
    \n
  • \n
  • \n

    If your sector is Business and you're subject to KDV tax,\n you must specify your industry in the industries field.

    \n
  • \n
  • \n

    For address, you must specify districtOrCounty.

    \n
  • \n
\n

\n Ukraine\n

\n
    \n
  • \n

    The sector valid values are Business and Individual.

    \n
  • \n
", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/BatchPutTaxRegistration" + } + } + }, + "com.amazonaws.taxsettings#BatchPutTaxRegistrationError": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.taxsettings#AccountId", + "traits": { + "smithy.api#documentation": "

The unique account identifier for the account that the tax registration couldn't be\n added, or updated during the BatchPutTaxRegistration operation.

", + "smithy.api#required": {} + } + }, + "message": { + "target": "com.amazonaws.taxsettings#ErrorMessage", + "traits": { + "smithy.api#documentation": "

The error message for an individual failure in the BatchPutTaxRegistration\n operation.

", + "smithy.api#required": {} + } + }, + "code": { + "target": "com.amazonaws.taxsettings#ErrorCode", + "traits": { + "smithy.api#documentation": "

The error code for an individual failure in the BatchPutTaxRegistration\n operation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The error object for representing failures in the BatchPutTaxRegistration\n operation.

" + } + }, + "com.amazonaws.taxsettings#BatchPutTaxRegistrationErrors": { + "type": "list", + "member": { + "target": "com.amazonaws.taxsettings#BatchPutTaxRegistrationError" + } + }, + "com.amazonaws.taxsettings#BatchPutTaxRegistrationRequest": { + "type": "structure", + "members": { + "accountIds": { + "target": "com.amazonaws.taxsettings#AccountIds", + "traits": { + "smithy.api#documentation": "

List of unique account identifiers.

", + "smithy.api#required": {} + } + }, + "taxRegistrationEntry": { + "target": "com.amazonaws.taxsettings#TaxRegistrationEntry", + "traits": { + "smithy.api#documentation": "

Your TRN information that will be stored to the accounts mentioned in\n putEntries.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.taxsettings#BatchPutTaxRegistrationResponse": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.taxsettings#TaxRegistrationStatus", + "traits": { + "smithy.api#documentation": "

The status of your TRN stored in the system after processing. Based on the validation\n occurring on the TRN, the status can be Verified, Pending or\n Rejected.

" + } + }, + "errors": { + "target": "com.amazonaws.taxsettings#BatchPutTaxRegistrationErrors", + "traits": { + "smithy.api#documentation": "

List of errors for the accounts the TRN information could not be added or updated to.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.taxsettings#Boolean": { + "type": "boolean" + }, + "com.amazonaws.taxsettings#BrazilAdditionalInfo": { + "type": "structure", + "members": { + "ccmCode": { + "target": "com.amazonaws.taxsettings#CcmCode", + "traits": { + "smithy.api#documentation": "

The Cadastro de Contribuintes Mobiliários (CCM) code for your TRN in Brazil. This only applies for a CNPJ tax type for the São Paulo municipality.

" + } + }, + "legalNatureCode": { + "target": "com.amazonaws.taxsettings#LegalNatureCode", + "traits": { + "smithy.api#documentation": "

Legal nature of business, based on your TRN in Brazil. This only applies for a CNPJ tax\n type.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Brazil.

" + } + }, + "com.amazonaws.taxsettings#BusinessRepresentativeName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + }, + "smithy.api#pattern": "^[0-9\\u3130-\\u318F\\uAC00-\\uD7AF,.( )-\\\\s]*$" + } + }, + "com.amazonaws.taxsettings#CanadaAdditionalInfo": { + "type": "structure", + "members": { + "provincialSalesTaxId": { + "target": "com.amazonaws.taxsettings#CanadaProvincialSalesTaxIdString", + "traits": { + "smithy.api#documentation": "

The provincial sales tax ID for your TRN in Canada. This parameter can represent the\n following:

\n
    \n
  • \n

    Provincial sales tax ID number for British Columbia and Saskatchewan provinces

    \n
  • \n
  • \n

    Manitoba retail sales tax ID number for Manitoba province

    \n
  • \n
  • \n

    Quebec sales tax ID number for Quebec province

    \n
  • \n
\n

The Tax Setting API only accepts this parameter if the TRN is specified for the previous\n provinces. For other provinces, the Tax Settings API doesn't accept this parameter.

" + } + }, + "canadaQuebecSalesTaxNumber": { + "target": "com.amazonaws.taxsettings#CanadaQuebecSalesTaxNumberString", + "traits": { + "smithy.api#documentation": "

\n The Quebec Sales Tax ID number. Leave blank if you do not have a Quebec Sales Tax ID number.\n

" + } + }, + "canadaRetailSalesTaxNumber": { + "target": "com.amazonaws.taxsettings#CanadaRetailSalesTaxNumberString", + "traits": { + "smithy.api#documentation": "

\n Manitoba Retail Sales Tax ID number. Customers purchasing Amazon Web Services for resale in Manitoba must provide a valid Retail Sales Tax ID number for Manitoba. Leave this blank if you do not have a Retail Sales Tax ID number in Manitoba or are not purchasing Amazon Web Services for resale.\n

" + } + }, + "isResellerAccount": { + "target": "com.amazonaws.taxsettings#Boolean", + "traits": { + "smithy.api#documentation": "

The value for this parameter must be true if the\n provincialSalesTaxId value is provided for a TRN in British Columbia,\n Saskatchewan, or Manitoba provinces.

\n

To claim a provincial sales tax (PST) and retail sales tax (RST) reseller exemption, you\n must confirm that purchases from this account were made for resale. Otherwise, remove the PST\n or RST number from the provincialSalesTaxId parameter from your request.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Canada .

" + } + }, + "com.amazonaws.taxsettings#CanadaProvincialSalesTaxIdString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 7, + "max": 16 + }, + "smithy.api#pattern": "^([0-9A-Z/-]+)$" + } + }, + "com.amazonaws.taxsettings#CanadaQuebecSalesTaxNumberString": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9]{10})(TQ[0-9]{4})?$" + } + }, + "com.amazonaws.taxsettings#CanadaRetailSalesTaxNumberString": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9]{6}-[0-9]{1})$" + } + }, + "com.amazonaws.taxsettings#CcmCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 1024 + }, + "smithy.api#pattern": "^\\d+$" + } + }, + "com.amazonaws.taxsettings#CertifiedEmailId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,20}$" + } + }, + "com.amazonaws.taxsettings#CigNumber": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9A-Z]{1,15})$" + } + }, + "com.amazonaws.taxsettings#City": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + }, + "smithy.api#pattern": "^(?!\\s*$)[\\s\\S]+$" + } + }, + "com.amazonaws.taxsettings#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.taxsettings#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + }, + "errorCode": { + "target": "com.amazonaws.taxsettings#ErrorCode", + "traits": { + "smithy.api#documentation": "

409

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The exception when the input is creating conflict with the given state.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.taxsettings#CountryCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 2, + "max": 2 + }, + "smithy.api#pattern": "^[a-zA-Z]+$" + } + }, + "com.amazonaws.taxsettings#CupNumber": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9A-Z]{1,15})$" + } + }, + "com.amazonaws.taxsettings#DateOfBirth": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 10, + "max": 10 + }, + "smithy.api#pattern": "^(\\d{4}-(0[0-9]|1[0-2])-([0-2][0-9]|3[0-1]))$" + } + }, + "com.amazonaws.taxsettings#DeleteTaxRegistration": { + "type": "operation", + "input": { + "target": "com.amazonaws.taxsettings#DeleteTaxRegistrationRequest" + }, + "output": { + "target": "com.amazonaws.taxsettings#DeleteTaxRegistrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.taxsettings#ConflictException" + }, + { + "target": "com.amazonaws.taxsettings#InternalServerException" + }, + { + "target": "com.amazonaws.taxsettings#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.taxsettings#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants delete permission" + }, + "smithy.api#documentation": "

Deletes tax registration for a single account.

\n \n

This API operation can't be used to delete your tax registration in Brazil. Use the Payment preferences page in the Billing and Cost Management console instead.

\n
", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/DeleteTaxRegistration" + } + } + }, + "com.amazonaws.taxsettings#DeleteTaxRegistrationRequest": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.taxsettings#AccountId", + "traits": { + "smithy.api#documentation": "

Unique account identifier for the TRN information that needs to be deleted. If this isn't\n passed, the account ID corresponding to the credentials of the API caller will be used for\n this parameter.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.taxsettings#DeleteTaxRegistrationResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.taxsettings#DestinationFilePath": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.taxsettings#DestinationS3Location": { + "type": "structure", + "members": { + "bucket": { + "target": "com.amazonaws.taxsettings#S3BucketName", + "traits": { + "smithy.api#documentation": "

The name of your Amazon S3 bucket that you specify to download your tax documents to.

", + "smithy.api#required": {} + } + }, + "prefix": { + "target": "com.amazonaws.taxsettings#S3Prefix", + "traits": { + "smithy.api#documentation": "

The Amazon S3 object prefix that you specify for your tax document file.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The location of the Amazon S3 bucket that you specify to download your tax documents to.

" + } + }, + "com.amazonaws.taxsettings#District": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + }, + "smithy.api#pattern": "^(?!\\s*$)[\\s\\S]+$" + } + }, + "com.amazonaws.taxsettings#ErrorCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + }, + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.taxsettings#ErrorMessage": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#pattern": "^[\\s\\S]*$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.taxsettings#EstoniaAdditionalInfo": { + "type": "structure", + "members": { + "registryCommercialCode": { + "target": "com.amazonaws.taxsettings#RegistryCommercialCode", + "traits": { + "smithy.api#documentation": "

Registry commercial code (RCC) for your TRN in Estonia. This value is an eight-numeric\n string, such as 12345678.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Estonia.

" + } + }, + "com.amazonaws.taxsettings#FieldName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(?!\\s*$)[\\s\\S]+$" + } + }, + "com.amazonaws.taxsettings#GeorgiaAdditionalInfo": { + "type": "structure", + "members": { + "personType": { + "target": "com.amazonaws.taxsettings#PersonType", + "traits": { + "smithy.api#documentation": "

\n The legal person or physical person assigned to this TRN in Georgia.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Georgia.\n

" + } + }, + "com.amazonaws.taxsettings#GetTaxRegistration": { + "type": "operation", + "input": { + "target": "com.amazonaws.taxsettings#GetTaxRegistrationRequest" + }, + "output": { + "target": "com.amazonaws.taxsettings#GetTaxRegistrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.taxsettings#InternalServerException" + }, + { + "target": "com.amazonaws.taxsettings#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.taxsettings#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants retrieve permission" + }, + "smithy.api#documentation": "

Retrieves tax registration for a single account.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/GetTaxRegistration" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.taxsettings#GetTaxRegistrationDocument": { + "type": "operation", + "input": { + "target": "com.amazonaws.taxsettings#GetTaxRegistrationDocumentRequest" + }, + "output": { + "target": "com.amazonaws.taxsettings#GetTaxRegistrationDocumentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.taxsettings#InternalServerException" + }, + { + "target": "com.amazonaws.taxsettings#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants retrieve permission" + }, + "smithy.api#documentation": "

Downloads your tax documents to the Amazon S3 bucket that you specify in your\n request.

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/GetTaxRegistrationDocument" + } + } + }, + "com.amazonaws.taxsettings#GetTaxRegistrationDocumentRequest": { + "type": "structure", + "members": { + "destinationS3Location": { + "target": "com.amazonaws.taxsettings#DestinationS3Location", + "traits": { + "smithy.api#documentation": "

The Amazon S3 bucket that you specify to download your tax documents to.

", + "smithy.api#required": {} + } + }, + "taxDocumentMetadata": { + "target": "com.amazonaws.taxsettings#TaxDocumentMetadata", + "traits": { + "smithy.api#documentation": "

The metadata for your tax document.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.taxsettings#GetTaxRegistrationDocumentResponse": { + "type": "structure", + "members": { + "destinationFilePath": { + "target": "com.amazonaws.taxsettings#DestinationFilePath", + "traits": { + "smithy.api#documentation": "

The file path of the Amazon S3 bucket where you want to download your tax document to.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.taxsettings#GetTaxRegistrationRequest": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.taxsettings#AccountId", + "traits": { + "smithy.api#documentation": "

Your unique account identifier.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.taxsettings#GetTaxRegistrationResponse": { + "type": "structure", + "members": { + "taxRegistration": { + "target": "com.amazonaws.taxsettings#TaxRegistration", + "traits": { + "smithy.api#documentation": "

TRN information of the account mentioned in the request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.taxsettings#IndiaAdditionalInfo": { + "type": "structure", + "members": { + "pan": { + "target": "com.amazonaws.taxsettings#Pan", + "traits": { + "smithy.api#documentation": "

\n India pan information associated with the account.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Additional tax information in India.\n

" + } + }, + "com.amazonaws.taxsettings#IndividualRegistrationNumber": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9]{10})$" + } + }, + "com.amazonaws.taxsettings#Industries": { + "type": "enum", + "members": { + "CIRCULATING_ORG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CirculatingOrg" + } + }, + "PROFESSIONAL_ORG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ProfessionalOrg" + } + }, + "BANKS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Banks" + } + }, + "INSURANCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Insurance" + } + }, + "PENSION_AND_BENEFIT_FUNDS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PensionAndBenefitFunds" + } + }, + "DEVELOPMENT_AGENCIES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DevelopmentAgencies" + } + } + } + }, + "com.amazonaws.taxsettings#InheritanceObtainedReason": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.taxsettings#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.taxsettings#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + }, + "errorCode": { + "target": "com.amazonaws.taxsettings#ErrorCode", + "traits": { + "smithy.api#documentation": "

500

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The exception thrown when an unexpected error occurs when processing a request.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.taxsettings#IsraelAdditionalInfo": { + "type": "structure", + "members": { + "dealerType": { + "target": "com.amazonaws.taxsettings#IsraelDealerType", + "traits": { + "smithy.api#documentation": "

Dealer type for your TRN in Israel. If you're not a local authorized dealer with an\n Israeli VAT ID, specify your tax identification number so that Amazon Web Services can send you\n a compliant tax invoice.

", + "smithy.api#required": {} + } + }, + "customerType": { + "target": "com.amazonaws.taxsettings#IsraelCustomerType", + "traits": { + "smithy.api#documentation": "

Customer type for your TRN in Israel. The value can be Business or\n Individual. Use Businessfor entities such as not-for-profit and\n financial institutions.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Israel.

" + } + }, + "com.amazonaws.taxsettings#IsraelCustomerType": { + "type": "enum", + "members": { + "BUSINESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Business" + } + }, + "INDIVIDUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Individual" + } + } + } + }, + "com.amazonaws.taxsettings#IsraelDealerType": { + "type": "enum", + "members": { + "AUTHORIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Authorized" + } + }, + "NON_AUTHORIZED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Non-authorized" + } + } + } + }, + "com.amazonaws.taxsettings#ItalyAdditionalInfo": { + "type": "structure", + "members": { + "sdiAccountId": { + "target": "com.amazonaws.taxsettings#SdiAccountId", + "traits": { + "smithy.api#documentation": "

\n Additional tax information to specify for a TRN in Italy. Use CodiceDestinatario to receive your invoices via web service (API) or FTP.\n

" + } + }, + "cigNumber": { + "target": "com.amazonaws.taxsettings#CigNumber", + "traits": { + "smithy.api#documentation": "

\n The tender procedure identification code.\n

" + } + }, + "cupNumber": { + "target": "com.amazonaws.taxsettings#CupNumber", + "traits": { + "smithy.api#documentation": "

\n Additional tax information to specify for a TRN in Italy. This is managed by the Interministerial Committee for Economic Planning (CIPE) which characterizes every public investment project (Individual Project Code).\n

" + } + }, + "taxCode": { + "target": "com.amazonaws.taxsettings#TaxCode", + "traits": { + "smithy.api#documentation": "

List of service tax codes for your TRN in Italy. You can use your customer tax code as part of a VAT Group.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Italy.\n

" + } + }, + "com.amazonaws.taxsettings#ItemOfBusiness": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[0-9\\u3130-\\u318F\\uAC00-\\uD7AF,.( )-\\\\s]*$" + } + }, + "com.amazonaws.taxsettings#Jurisdiction": { + "type": "structure", + "members": { + "stateOrRegion": { + "target": "com.amazonaws.taxsettings#State", + "traits": { + "smithy.api#documentation": "

The state, region, or province associated with the country of the jurisdiction, if\n applicable.

" + } + }, + "countryCode": { + "target": "com.amazonaws.taxsettings#CountryCode", + "traits": { + "smithy.api#documentation": "

The country code of the jurisdiction.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The jurisdiction details of the TRN information of the customers. This doesn't contain\n full legal address, and contains only country code and state/region/province.

" + } + }, + "com.amazonaws.taxsettings#KenyaAdditionalInfo": { + "type": "structure", + "members": { + "personType": { + "target": "com.amazonaws.taxsettings#PersonType", + "traits": { + "smithy.api#documentation": "

The legal person or physical person assigned to this TRN in Kenya.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Kenya.

" + } + }, + "com.amazonaws.taxsettings#KepEmailId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.taxsettings#LegalName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200 + }, + "smithy.api#pattern": "^(?!\\s*$)[\\s\\S]+$" + } + }, + "com.amazonaws.taxsettings#LegalNatureCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 1024 + }, + "smithy.api#pattern": "^\\d+$" + } + }, + "com.amazonaws.taxsettings#LineOfBusiness": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[0-9\\u3130-\\u318F\\uAC00-\\uD7AF,.( )-\\\\s]*$" + } + }, + "com.amazonaws.taxsettings#ListTaxRegistrations": { + "type": "operation", + "input": { + "target": "com.amazonaws.taxsettings#ListTaxRegistrationsRequest" + }, + "output": { + "target": "com.amazonaws.taxsettings#ListTaxRegistrationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.taxsettings#InternalServerException" + }, + { + "target": "com.amazonaws.taxsettings#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.taxsettings#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants retrieve permission" + }, + "smithy.api#documentation": "

Retrieves the tax registration of accounts listed in a consolidated billing family. This\n can be used to retrieve up to 100 accounts' tax registrations in one call (default 50).\n

", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/ListTaxRegistrations" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "accountDetails", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.taxsettings#ListTaxRegistrationsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.taxsettings#MaxResults", + "traits": { + "smithy.api#documentation": "

Number of accountDetails results you want in one response.

" + } + }, + "nextToken": { + "target": "com.amazonaws.taxsettings#PaginationTokenString", + "traits": { + "smithy.api#documentation": "

The token to retrieve the next set of results.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.taxsettings#ListTaxRegistrationsResponse": { + "type": "structure", + "members": { + "accountDetails": { + "target": "com.amazonaws.taxsettings#AccountDetailsList", + "traits": { + "smithy.api#documentation": "

The list of account details. This contains account Ids and TRN Information for each of the\n linked accounts.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.taxsettings#PaginationTokenString", + "traits": { + "smithy.api#documentation": "

The token to retrieve the next set of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.taxsettings#MalaysiaAdditionalInfo": { + "type": "structure", + "members": { + "serviceTaxCodes": { + "target": "com.amazonaws.taxsettings#MalaysiaServiceTaxCodesList", + "traits": { + "smithy.api#documentation": "

List of service tax codes for your TRN in Malaysia.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Malaysia.

" + } + }, + "com.amazonaws.taxsettings#MalaysiaServiceTaxCode": { + "type": "enum", + "members": { + "CONSULTANCY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Consultancy" + } + }, + "DIGITAL_SVC_ELECTRONIC_MEDIUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Digital Service And Electronic Medium" + } + }, + "IT_SERVICES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IT Services" + } + }, + "TRAINING_OR_COACHING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Training Or Coaching" + } + } + } + }, + "com.amazonaws.taxsettings#MalaysiaServiceTaxCodesList": { + "type": "list", + "member": { + "target": "com.amazonaws.taxsettings#MalaysiaServiceTaxCode" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4 + }, + "smithy.api#uniqueItems": {} + } + }, + "com.amazonaws.taxsettings#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.taxsettings#PaginationTokenString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2000 + }, + "smithy.api#pattern": "^[-A-Za-z0-9_+\\=\\/]+$" + } + }, + "com.amazonaws.taxsettings#Pan": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[A-Z]{5}[0-9]{4}[A-Z]{1}$" + } + }, + "com.amazonaws.taxsettings#PersonType": { + "type": "enum", + "members": { + "LEGAL_PERSON": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Legal Person" + } + }, + "PHYSICAL_PERSON": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Physical Person" + } + }, + "BUSINESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Business" + } + } + } + }, + "com.amazonaws.taxsettings#PolandAdditionalInfo": { + "type": "structure", + "members": { + "individualRegistrationNumber": { + "target": "com.amazonaws.taxsettings#IndividualRegistrationNumber", + "traits": { + "smithy.api#documentation": "

\n The individual tax registration number (NIP). Individual NIP is valid for other taxes excluding VAT purposes.\n

" + } + }, + "isGroupVatEnabled": { + "target": "com.amazonaws.taxsettings#Boolean", + "traits": { + "smithy.api#documentation": "

\n True if your business is a member of a VAT group with a NIP active for VAT purposes. Otherwise, this is false.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Poland.\n

" + } + }, + "com.amazonaws.taxsettings#PostalCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + }, + "smithy.api#pattern": "^(?!\\s*$)[\\s\\S]+$" + } + }, + "com.amazonaws.taxsettings#PutTaxRegistration": { + "type": "operation", + "input": { + "target": "com.amazonaws.taxsettings#PutTaxRegistrationRequest" + }, + "output": { + "target": "com.amazonaws.taxsettings#PutTaxRegistrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.taxsettings#ConflictException" + }, + { + "target": "com.amazonaws.taxsettings#InternalServerException" + }, + { + "target": "com.amazonaws.taxsettings#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "documentation": "Grants store permission" + }, + "smithy.api#documentation": "

Adds or updates tax registration for a single account. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first.

\n

To call this API operation for specific countries, see the following country-specific\n requirements.

\n

\n Bangladesh\n

\n
    \n
  • \n

    You must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.

    \n
  • \n
\n

\n Brazil\n

\n
    \n
  • \n

    You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation.

    \n
  • \n
  • \n

    For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address.

    \n
  • \n
\n

\n Georgia\n

\n
    \n
  • \n

    The valid personType values are Physical Person and Business.

    \n
  • \n
\n

\n Kenya\n

\n
    \n
  • \n

    You must specify the personType in the kenyaAdditionalInfo\n field of the additionalTaxInformation object.

    \n
  • \n
  • \n

    If the personType is Physical Person, you must specify the\n tax registration certificate document in the taxRegistrationDocuments field\n of the VerificationDetails object.

    \n
  • \n
\n

\n Malaysia\n

\n
    \n
  • \n

    If you use this operation to set a tax registration number (TRN) in Malaysia, only\n resellers with a valid sales and service tax (SST) number are required to provide tax\n registration information.

    \n
  • \n
  • \n

    By using this API operation to set a TRN in Malaysia, Amazon Web Services will regard you as\n self-declaring that you're an authorized business reseller registered with the Royal\n Malaysia Customs Department (RMCD) and have a valid SST number.

    \n
  • \n
  • \n

    Amazon Web Services reserves the right to seek additional information and/or take other actions to\n support your self-declaration as appropriate.

    \n
  • \n
  • \n

    If you're not a reseller of Amazon Web Services, we don't recommend that you use\n this operation to set the TRN in Malaysia.

    \n
  • \n
  • \n

    Only use this API operation to upload the TRNs for accounts through which you're\n reselling Amazon Web Services.

    \n
  • \n
  • \n

    Amazon Web Services is currently registered under the following service tax codes. You must include\n at least one of the service tax codes in the service tax code strings to declare yourself\n as an authorized registered business reseller.

    \n

    Taxable service and service tax codes:

    \n

    Consultancy - 9907061674

    \n

    Training or coaching service - 9907071685

    \n

    IT service - 9907101676

    \n

    Digital services and electronic medium - 9907121690

    \n
  • \n
\n

\n Nepal\n

\n
    \n
  • \n

    The sector valid values are Business and Individual.

    \n
  • \n
\n

\n Saudi Arabia\n

\n
    \n
  • \n

    For address, you must specify addressLine3.

    \n
  • \n
\n

\n South Korea\n

\n
    \n
  • \n

    You must specify the certifiedEmailId and legalName in the\n TaxRegistrationEntry object. Use Korean characters for\n legalName.

    \n
  • \n
  • \n

    You must specify the businessRepresentativeName,\n itemOfBusiness, and lineOfBusiness in the\n southKoreaAdditionalInfo field of the additionalTaxInformation\n object. Use Korean characters for these fields.

    \n
  • \n
  • \n

    You must specify the tax registration certificate document in the\n taxRegistrationDocuments field of the VerificationDetails\n object.

    \n
  • \n
  • \n

    For the address object, use Korean characters for addressLine1, addressLine2\n city, postalCode, and stateOrRegion.

    \n
  • \n
\n

\n Spain\n

\n
    \n
  • \n

    You must specify the registrationType in the\n spainAdditionalInfo field of the additionalTaxInformation\n object.

    \n
  • \n
  • \n

    If the registrationType is Local, you must specify the tax\n registration certificate document in the taxRegistrationDocuments field of\n the VerificationDetails object.

    \n
  • \n
\n

\n Turkey\n

\n
    \n
  • \n

    You must specify the sector in the taxRegistrationEntry object.

    \n
  • \n
  • \n

    If your sector is Business, Individual, or\n Government:

    \n
      \n
    • \n

      Specify the taxOffice. If your\n sector is Individual, don't enter this value.

      \n
    • \n
    • \n

      (Optional) Specify the kepEmailId. If your\n sector is Individual, don't enter this value.

      \n
    • \n
    • \n

      \n Note: In the Tax Settings page of the Billing console, Government appears as Public institutions\n

      \n
    • \n
    \n
  • \n
  • \n

    If your sector is Business and you're subject to KDV tax,\n you must specify your industry in the industries field.

    \n
  • \n
  • \n

    For address, you must specify districtOrCounty.

    \n
  • \n
\n

\n Ukraine\n

\n
    \n
  • \n

    The sector valid values are Business and Individual.

    \n
  • \n
", + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/PutTaxRegistration" + } + } + }, + "com.amazonaws.taxsettings#PutTaxRegistrationRequest": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.taxsettings#AccountId", + "traits": { + "smithy.api#documentation": "

Your unique account identifier.

" + } + }, + "taxRegistrationEntry": { + "target": "com.amazonaws.taxsettings#TaxRegistrationEntry", + "traits": { + "smithy.api#documentation": "

Your TRN information that will be stored to the account mentioned in\n accountId.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.taxsettings#PutTaxRegistrationResponse": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.taxsettings#TaxRegistrationStatus", + "traits": { + "smithy.api#documentation": "

The status of your TRN stored in the system after processing. Based on the validation\n occurring on the TRN, the status can be Verified, Pending or\n Rejected.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.taxsettings#RegistrationId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + }, + "smithy.api#pattern": "^(?!\\s*$)[\\s\\S]+$" + } + }, + "com.amazonaws.taxsettings#RegistrationType": { + "type": "enum", + "members": { + "INTRA_EU": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Intra-EU" + } + }, + "LOCAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Local" + } + } + } + }, + "com.amazonaws.taxsettings#RegistryCommercialCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 8, + "max": 8 + }, + "smithy.api#pattern": "^\\d+$" + } + }, + "com.amazonaws.taxsettings#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.taxsettings#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + }, + "errorCode": { + "target": "com.amazonaws.taxsettings#ErrorCode", + "traits": { + "smithy.api#documentation": "

404

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The exception thrown when the input doesn't have a resource associated to it.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.taxsettings#RomaniaAdditionalInfo": { + "type": "structure", + "members": { + "taxRegistrationNumberType": { + "target": "com.amazonaws.taxsettings#TaxRegistrationNumberType", + "traits": { + "smithy.api#documentation": "

\n The tax registration number type. The value can be TaxRegistrationNumber or LocalRegistrationNumber.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information to specify for a TRN in Romania.\n

" + } + }, + "com.amazonaws.taxsettings#S3BucketName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 63 + }, + "smithy.api#pattern": "^(?=^.{3,63}$)(?!^(\\d+\\.)+\\d+$)(^(([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])\\.)*([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])$)$" + } + }, + "com.amazonaws.taxsettings#S3Key": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^.*\\S.*$" + } + }, + "com.amazonaws.taxsettings#S3Prefix": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^.*\\S.*$" + } + }, + "com.amazonaws.taxsettings#SaudiArabiaAdditionalInfo": { + "type": "structure", + "members": { + "taxRegistrationNumberType": { + "target": "com.amazonaws.taxsettings#SaudiArabiaTaxRegistrationNumberType", + "traits": { + "smithy.api#documentation": "

\n The tax registration number type.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Saudi Arabia.\n

" + } + }, + "com.amazonaws.taxsettings#SaudiArabiaTaxRegistrationNumberType": { + "type": "enum", + "members": { + "TAX_REGISTRATION_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TaxRegistrationNumber" + } + }, + "TAX_IDENTIFICATION_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TaxIdentificationNumber" + } + }, + "COMMERCIAL_REGISTRATION_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CommercialRegistrationNumber" + } + } + } + }, + "com.amazonaws.taxsettings#SdiAccountId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9A-Z]{6,7}$" + } + }, + "com.amazonaws.taxsettings#SecondaryTaxId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9]{10})$" + } + }, + "com.amazonaws.taxsettings#Sector": { + "type": "enum", + "members": { + "BUSINESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Business" + } + }, + "INDIVIDUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Individual" + } + }, + "PUBLIC_INSTITUTIONS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Government" + } + } + } + }, + "com.amazonaws.taxsettings#Seller": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.taxsettings#SourceS3Location": { + "type": "structure", + "members": { + "bucket": { + "target": "com.amazonaws.taxsettings#S3BucketName", + "traits": { + "smithy.api#documentation": "

The name of your Amazon S3 bucket that your tax document is located.

", + "smithy.api#required": {} + } + }, + "key": { + "target": "com.amazonaws.taxsettings#S3Key", + "traits": { + "smithy.api#documentation": "

The object key of your tax document object in Amazon S3.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The Amazon S3 bucket in your account where your tax document is located.

" + } + }, + "com.amazonaws.taxsettings#SouthKoreaAdditionalInfo": { + "type": "structure", + "members": { + "businessRepresentativeName": { + "target": "com.amazonaws.taxsettings#BusinessRepresentativeName", + "traits": { + "smithy.api#documentation": "

The business legal name based on the most recently uploaded tax registration certificate.

", + "smithy.api#required": {} + } + }, + "lineOfBusiness": { + "target": "com.amazonaws.taxsettings#LineOfBusiness", + "traits": { + "smithy.api#documentation": "

Line of business based on the most recently uploaded tax registration certificate.

", + "smithy.api#required": {} + } + }, + "itemOfBusiness": { + "target": "com.amazonaws.taxsettings#ItemOfBusiness", + "traits": { + "smithy.api#documentation": "

Item of business based on the most recently uploaded tax registration certificate.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in South Korea.

" + } + }, + "com.amazonaws.taxsettings#SpainAdditionalInfo": { + "type": "structure", + "members": { + "registrationType": { + "target": "com.amazonaws.taxsettings#RegistrationType", + "traits": { + "smithy.api#documentation": "

The registration type in Spain.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Spain.

" + } + }, + "com.amazonaws.taxsettings#State": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + }, + "smithy.api#pattern": "^(?!\\s*$)[\\s\\S]+$" + } + }, + "com.amazonaws.taxsettings#TaxCode": { + "type": "string", + "traits": { + "smithy.api#pattern": "^([0-9]{11}|[A-Z]{6}[0-9]{2}[A-Z][0-9]{2}[A-Z][0-9]{3}[A-Z])$" + } + }, + "com.amazonaws.taxsettings#TaxDocumentAccessToken": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.taxsettings#TaxDocumentMetadata": { + "type": "structure", + "members": { + "taxDocumentAccessToken": { + "target": "com.amazonaws.taxsettings#TaxDocumentAccessToken", + "traits": { + "smithy.api#documentation": "

The tax document access token, which contains information that the Tax Settings API uses to locate the tax document.

\n \n

If you update your tax registration, the existing taxDocumentAccessToken won't be valid. To get the latest token, call the GetTaxRegistration or ListTaxRegistrations API operation. This token is valid for 24 hours.

\n
", + "smithy.api#required": {} + } + }, + "taxDocumentName": { + "target": "com.amazonaws.taxsettings#TaxDocumentName", + "traits": { + "smithy.api#documentation": "

The name of your tax document.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The metadata for your tax document.

" + } + }, + "com.amazonaws.taxsettings#TaxDocumentMetadatas": { + "type": "list", + "member": { + "target": "com.amazonaws.taxsettings#TaxDocumentMetadata" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, + "com.amazonaws.taxsettings#TaxDocumentName": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.taxsettings#TaxInheritanceDetails": { + "type": "structure", + "members": { + "parentEntityId": { + "target": "com.amazonaws.taxsettings#AccountId", + "traits": { + "smithy.api#documentation": "

\n Tax inheritance parent account information associated with the account.\n

" + } + }, + "inheritanceObtainedReason": { + "target": "com.amazonaws.taxsettings#InheritanceObtainedReason", + "traits": { + "smithy.api#documentation": "

\n Tax inheritance reason information associated with the account.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Tax inheritance information associated with the account.\n

" + } + }, + "com.amazonaws.taxsettings#TaxOffice": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.taxsettings#TaxRegistration": { + "type": "structure", + "members": { + "registrationId": { + "target": "com.amazonaws.taxsettings#RegistrationId", + "traits": { + "smithy.api#documentation": "

Your tax registration unique identifier.

", + "smithy.api#required": {} + } + }, + "registrationType": { + "target": "com.amazonaws.taxsettings#TaxRegistrationType", + "traits": { + "smithy.api#documentation": "

Type of your tax registration. This can be either VAT or GST.\n

", + "smithy.api#required": {} + } + }, + "legalName": { + "target": "com.amazonaws.taxsettings#LegalName", + "traits": { + "smithy.api#documentation": "

The legal name associated with your TRN registration.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.taxsettings#TaxRegistrationStatus", + "traits": { + "smithy.api#documentation": "

The status of your TRN. This can be either Verified, Pending,\n Deleted, or Rejected.

", + "smithy.api#required": {} + } + }, + "sector": { + "target": "com.amazonaws.taxsettings#Sector", + "traits": { + "smithy.api#documentation": "

The industry that describes your business. For business-to-business (B2B) customers, specify Business. For business-to-consumer (B2C) customers, specify Individual. For business-to-government (B2G), specify Government. Note that certain values may not applicable for the request country. Please refer to country specific information in API document.\n

" + } + }, + "taxDocumentMetadatas": { + "target": "com.amazonaws.taxsettings#TaxDocumentMetadatas", + "traits": { + "smithy.api#documentation": "

The metadata for your tax document.

" + } + }, + "certifiedEmailId": { + "target": "com.amazonaws.taxsettings#CertifiedEmailId", + "traits": { + "smithy.api#documentation": "

The email address to receive VAT invoices.

" + } + }, + "additionalTaxInformation": { + "target": "com.amazonaws.taxsettings#AdditionalInfoResponse", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN.

" + } + }, + "legalAddress": { + "target": "com.amazonaws.taxsettings#Address", + "traits": { + "smithy.api#documentation": "

The legal address associated with your TRN registration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Your TRN information.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.taxsettings#TaxRegistrationDocument": { + "type": "structure", + "members": { + "s3Location": { + "target": "com.amazonaws.taxsettings#SourceS3Location", + "traits": { + "smithy.api#documentation": "

The Amazon S3 location where your tax registration document is stored.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Tax registration document information.

" + } + }, + "com.amazonaws.taxsettings#TaxRegistrationDocuments": { + "type": "list", + "member": { + "target": "com.amazonaws.taxsettings#TaxRegistrationDocument" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + }, + "smithy.api#uniqueItems": {} + } + }, + "com.amazonaws.taxsettings#TaxRegistrationEntry": { + "type": "structure", + "members": { + "registrationId": { + "target": "com.amazonaws.taxsettings#RegistrationId", + "traits": { + "smithy.api#documentation": "

Your tax registration unique identifier.

", + "smithy.api#required": {} + } + }, + "registrationType": { + "target": "com.amazonaws.taxsettings#TaxRegistrationType", + "traits": { + "smithy.api#documentation": "

Your tax registration type. This can be either VAT or GST.\n

", + "smithy.api#required": {} + } + }, + "legalName": { + "target": "com.amazonaws.taxsettings#LegalName", + "traits": { + "smithy.api#documentation": "

The legal name associated with your TRN.

\n \n

If you're setting a TRN in Brazil, you don't need to specify the legal name. For TRNs in\n other countries, you must specify the legal name.

\n
" + } + }, + "legalAddress": { + "target": "com.amazonaws.taxsettings#Address", + "traits": { + "smithy.api#documentation": "

The legal address associated with your TRN.

\n \n

If you're setting a TRN in Brazil for the CNPJ tax type, you don't need to specify the\n legal address.

\n

For TRNs in other countries and for CPF tax types Brazil, you must specify the legal\n address.

\n
" + } + }, + "sector": { + "target": "com.amazonaws.taxsettings#Sector", + "traits": { + "smithy.api#documentation": "

The industry that describes your business. For business-to-business (B2B) customers, specify Business. For business-to-consumer (B2C) customers, specify Individual. For business-to-government (B2G), specify Government.Note that certain values may not applicable for the request country. Please refer to country specific information in API document.\n

" + } + }, + "additionalTaxInformation": { + "target": "com.amazonaws.taxsettings#AdditionalInfoRequest", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN. You only need to specify this\n parameter if Amazon Web Services collects any additional information for your country within\n AdditionalInfoRequest.

" + } + }, + "verificationDetails": { + "target": "com.amazonaws.taxsettings#VerificationDetails", + "traits": { + "smithy.api#documentation": "

Additional details needed to verify your TRN information in Brazil. You only need to specify this\n parameter when you set a TRN in Brazil that is the CPF tax type.

\n \n

Don't specify this parameter to set a TRN in Brazil of the CNPJ tax type or to set a TRN\n for another country.

\n
" + } + }, + "certifiedEmailId": { + "target": "com.amazonaws.taxsettings#CertifiedEmailId", + "traits": { + "smithy.api#documentation": "

The email address to receive VAT invoices.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The TRN information you provide when you add a new TRN, or update.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.taxsettings#TaxRegistrationNumberType": { + "type": "enum", + "members": { + "TAX_REGISTRATION_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TaxRegistrationNumber" + } + }, + "LOCAL_REGISTRATION_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LocalRegistrationNumber" + } + } + } + }, + "com.amazonaws.taxsettings#TaxRegistrationStatus": { + "type": "enum", + "members": { + "VERIFIED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Verified" + } + }, + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Pending" + } + }, + "DELETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deleted" + } + }, + "REJECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Rejected" + } + } + } + }, + "com.amazonaws.taxsettings#TaxRegistrationType": { + "type": "enum", + "members": { + "VAT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VAT" + } + }, + "GST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GST" + } + }, + "CPF": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CPF" + } + }, + "CNPJ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CNPJ" + } + }, + "SST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SST" + } + } + } + }, + "com.amazonaws.taxsettings#TaxRegistrationWithJurisdiction": { + "type": "structure", + "members": { + "registrationId": { + "target": "com.amazonaws.taxsettings#RegistrationId", + "traits": { + "smithy.api#documentation": "

Your tax registration unique identifier.

", + "smithy.api#required": {} + } + }, + "registrationType": { + "target": "com.amazonaws.taxsettings#TaxRegistrationType", + "traits": { + "smithy.api#documentation": "

The type of your tax registration. This can be either VAT or\n GST.

", + "smithy.api#required": {} + } + }, + "legalName": { + "target": "com.amazonaws.taxsettings#LegalName", + "traits": { + "smithy.api#documentation": "

The legal name associated with your TRN information.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.taxsettings#TaxRegistrationStatus", + "traits": { + "smithy.api#documentation": "

The status of your TRN. This can be either Verified, Pending,\n Deleted, or Rejected.

", + "smithy.api#required": {} + } + }, + "sector": { + "target": "com.amazonaws.taxsettings#Sector", + "traits": { + "smithy.api#documentation": "

The industry that describes your business. For business-to-business (B2B) customers, specify Business. For business-to-consumer (B2C) customers, specify Individual. For business-to-government (B2G), specify Government.Note that certain values may not applicable for the request country. Please refer to country specific information in API document.\n

" + } + }, + "taxDocumentMetadatas": { + "target": "com.amazonaws.taxsettings#TaxDocumentMetadatas", + "traits": { + "smithy.api#documentation": "

The metadata for your tax document.

" + } + }, + "certifiedEmailId": { + "target": "com.amazonaws.taxsettings#CertifiedEmailId", + "traits": { + "smithy.api#documentation": "

The email address to receive VAT invoices.

" + } + }, + "additionalTaxInformation": { + "target": "com.amazonaws.taxsettings#AdditionalInfoResponse", + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN.

" + } + }, + "jurisdiction": { + "target": "com.amazonaws.taxsettings#Jurisdiction", + "traits": { + "smithy.api#documentation": "

The jurisdiction associated with your TRN information.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Your TRN information with jurisdiction details. This doesn't contain the full legal\n address associated with the TRN information.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.taxsettings#TaxSettings": { + "type": "service", + "version": "2018-05-10", + "operations": [ + { + "target": "com.amazonaws.taxsettings#BatchDeleteTaxRegistration" + }, + { + "target": "com.amazonaws.taxsettings#BatchPutTaxRegistration" + }, + { + "target": "com.amazonaws.taxsettings#DeleteTaxRegistration" + }, + { + "target": "com.amazonaws.taxsettings#GetTaxRegistration" + }, + { + "target": "com.amazonaws.taxsettings#GetTaxRegistrationDocument" + }, + { + "target": "com.amazonaws.taxsettings#ListTaxRegistrations" + }, + { + "target": "com.amazonaws.taxsettings#PutTaxRegistration" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "TaxSettings", + "arnNamespace": "tax", + "cloudTrailEventSource": "tax.amazonaws.com" + }, + "aws.auth#sigv4": { + "name": "tax" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "

You can use the tax setting API to programmatically set, modify, and delete the tax\n registration number (TRN), associated business legal name, and address (Collectively referred\n to as \"TRN information\"). You can also programmatically view TRN information and tax addresses\n (\"Tax profiles\").

\n

You can use this API to automate your TRN information settings instead of manually using\n the console.

\n

Service Endpoint

\n
    \n
  • \n

    https://tax.us-east-1.amazonaws.com

    \n
  • \n
", + "smithy.api#title": "Tax Settings", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://tax-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://tax-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://tax.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://tax.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://tax-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://tax-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://tax.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://tax.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://tax-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://tax-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://tax.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://tax.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://tax-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://tax-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://tax.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://tax.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://tax-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://tax.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://tax-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://tax.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.taxsettings#TurkeyAdditionalInfo": { + "type": "structure", + "members": { + "taxOffice": { + "target": "com.amazonaws.taxsettings#TaxOffice", + "traits": { + "smithy.api#documentation": "

The tax office where you're registered. You can enter this information as a string. The Tax Settings API will add this information to your invoice. This parameter is required for business-to-business (B2B) and business-to-government customers. It's not required for business-to-consumer (B2C) customers.

" + } + }, + "kepEmailId": { + "target": "com.amazonaws.taxsettings#KepEmailId", + "traits": { + "smithy.api#documentation": "

The Registered Electronic Mail (REM) that is used to send notarized communication. This parameter is optional for business-to-business (B2B) and business-to-government (B2G) customers. It's not required for business-to-consumer (B2C) customers.

" + } + }, + "secondaryTaxId": { + "target": "com.amazonaws.taxsettings#SecondaryTaxId", + "traits": { + "smithy.api#documentation": "

\n Secondary tax ID (“harcama birimi VKN”si”). If one isn't provided, we will use your VKN as the secondary ID.\n

" + } + }, + "industries": { + "target": "com.amazonaws.taxsettings#Industries", + "traits": { + "smithy.api#documentation": "

The industry information that tells the Tax Settings API if you're subject to additional\n withholding taxes. This information required for business-to-business (B2B) customers. This\n information is conditionally mandatory for B2B customers who are subject to KDV tax.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Additional tax information associated with your TRN in Turkey.

" + } + }, + "com.amazonaws.taxsettings#UkraineAdditionalInfo": { + "type": "structure", + "members": { + "ukraineTrnType": { + "target": "com.amazonaws.taxsettings#UkraineTrnType", + "traits": { + "smithy.api#documentation": "

\n The tax registration type.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

\n Additional tax information associated with your TRN in Ukraine.\n

" + } + }, + "com.amazonaws.taxsettings#UkraineTrnType": { + "type": "enum", + "members": { + "BUSINESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Business" + } + }, + "INDIVIDUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Individual" + } + } + } + }, + "com.amazonaws.taxsettings#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.taxsettings#ErrorMessage", + "traits": { + "smithy.api#required": {} + } + }, + "errorCode": { + "target": "com.amazonaws.taxsettings#ValidationExceptionErrorCode", + "traits": { + "smithy.api#documentation": "

400

", + "smithy.api#required": {} + } + }, + "fieldList": { + "target": "com.amazonaws.taxsettings#ValidationExceptionFieldList", + "traits": { + "smithy.api#documentation": "

400

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The exception when the input doesn't pass validation for at least one of the input\n parameters.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.taxsettings#ValidationExceptionErrorCode": { + "type": "enum", + "members": { + "MALFORMED_TOKEN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MalformedToken" + } + }, + "EXPIRED_TOKEN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ExpiredToken" + } + }, + "INVALID_TOKEN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InvalidToken" + } + }, + "FIELD_VALIDATION_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FieldValidationFailed" + } + }, + "MISSING_INPUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MissingInput" + } + } + } + }, + "com.amazonaws.taxsettings#ValidationExceptionField": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.taxsettings#FieldName", + "traits": { + "smithy.api#documentation": "

The name of the parameter that caused a ValidationException error.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The information about the specified parameter in the request that caused an error.

" + } + }, + "com.amazonaws.taxsettings#ValidationExceptionFieldList": { + "type": "list", + "member": { + "target": "com.amazonaws.taxsettings#ValidationExceptionField" + } + }, + "com.amazonaws.taxsettings#VerificationDetails": { + "type": "structure", + "members": { + "dateOfBirth": { + "target": "com.amazonaws.taxsettings#DateOfBirth", + "traits": { + "smithy.api#documentation": "

Date of birth to verify your submitted TRN. Use the YYYY-MM-DD format.

" + } + }, + "taxRegistrationDocuments": { + "target": "com.amazonaws.taxsettings#TaxRegistrationDocuments", + "traits": { + "smithy.api#documentation": "

The tax registration document, which is required for specific countries such as Bangladesh, Kenya, South Korea and Spain.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Required information to verify your TRN.

" + } + } + } +} \ No newline at end of file diff --git a/models/transfer.json b/models/transfer.json index 9a7d0b552f..9ae16fbdda 100644 --- a/models/transfer.json +++ b/models/transfer.json @@ -1,6 +1,26 @@ { "smithy": "2.0", "shapes": { + "com.amazonaws.necco.coral#CfnSshPublicKeys": { + "type": "list", + "member": { + "target": "com.amazonaws.transfer#SshPublicKeyBody" + }, + "traits": { + "smithy.api#documentation": "This represents the SSH User Public Keys for CloudFormation resource" + } + }, + "com.amazonaws.necco.coral#CfnUserProperties": { + "type": "structure", + "members": { + "SshPublicKeys": { + "target": "com.amazonaws.necco.coral#CfnSshPublicKeys" + } + }, + "traits": { + "smithy.api#documentation": "This represents the SSH User Public Keys for CloudFormation resource" + } + }, "com.amazonaws.transfer#AccessDeniedException": { "type": "structure", "members": { @@ -37,6 +57,46 @@ "smithy.api#pattern": "^a-([0-9a-f]{17})$" } }, + "com.amazonaws.transfer#AgreementResource": { + "type": "resource", + "identifiers": { + "ServerId": { + "target": "com.amazonaws.transfer#ServerId" + }, + "AgreementId": { + "target": "com.amazonaws.transfer#AgreementId" + } + }, + "create": { + "target": "com.amazonaws.transfer#CreateAgreement" + }, + "read": { + "target": "com.amazonaws.transfer#DescribeAgreement" + }, + "update": { + "target": "com.amazonaws.transfer#UpdateAgreement" + }, + "delete": { + "target": "com.amazonaws.transfer#DeleteAgreement" + }, + "list": { + "target": "com.amazonaws.transfer#ListAgreements" + }, + "traits": { + "aws.api#arn": { + "template": "agreement/{ServerId}/{AgreementId}" + }, + "aws.api#taggable": { + "property": "Tags" + }, + "aws.cloudformation#cfnResource": { + "name": "Agreement", + "additionalSchemas": [ + "com.amazonaws.transfer#DescribedAgreement" + ] + } + } + }, "com.amazonaws.transfer#AgreementStatusType": { "type": "enum", "members": { @@ -238,6 +298,43 @@ "target": "com.amazonaws.transfer#CertificateId" } }, + "com.amazonaws.transfer#CertificateResource": { + "type": "resource", + "identifiers": { + "CertificateId": { + "target": "com.amazonaws.transfer#CertificateId" + } + }, + "create": { + "target": "com.amazonaws.transfer#ImportCertificate" + }, + "read": { + "target": "com.amazonaws.transfer#DescribeCertificate" + }, + "update": { + "target": "com.amazonaws.transfer#UpdateCertificate" + }, + "delete": { + "target": "com.amazonaws.transfer#DeleteCertificate" + }, + "list": { + "target": "com.amazonaws.transfer#ListCertificates" + }, + "traits": { + "aws.api#arn": { + "template": "certificate/{CertificateId}" + }, + "aws.api#taggable": { + "property": "Tags" + }, + "aws.cloudformation#cfnResource": { + "name": "Certificate", + "additionalSchemas": [ + "com.amazonaws.transfer#DescribedCertificate" + ] + } + } + }, "com.amazonaws.transfer#CertificateStatusType": { "type": "enum", "members": { @@ -344,6 +441,43 @@ "smithy.api#pattern": "^c-([0-9a-f]{17})$" } }, + "com.amazonaws.transfer#ConnectorResource": { + "type": "resource", + "identifiers": { + "ConnectorId": { + "target": "com.amazonaws.transfer#ConnectorId" + } + }, + "create": { + "target": "com.amazonaws.transfer#CreateConnector" + }, + "read": { + "target": "com.amazonaws.transfer#DescribeConnector" + }, + "update": { + "target": "com.amazonaws.transfer#UpdateConnector" + }, + "delete": { + "target": "com.amazonaws.transfer#DeleteConnector" + }, + "list": { + "target": "com.amazonaws.transfer#ListConnectors" + }, + "traits": { + "aws.api#arn": { + "template": "connector/{ConnectorId}" + }, + "aws.api#taggable": { + "property": "Tags" + }, + "aws.cloudformation#cfnResource": { + "name": "Connector", + "additionalSchemas": [ + "com.amazonaws.transfer#DescribedConnector" + ] + } + } + }, "com.amazonaws.transfer#ConnectorSecurityPolicyName": { "type": "string", "traits": { @@ -522,6 +656,12 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:TagResource", + "iam:PassRole" + ] + }, "smithy.api#documentation": "

Creates an agreement. An agreement is a bilateral trading partner agreement, or partnership,\n between an Transfer Family server and an AS2 process. The agreement defines the file and message\n transfer relationship between the server and the AS2 process. To define an agreement, Transfer Family\n combines a server, local profile, partner profile, certificate, and other\n attributes.

\n

The partner is identified with the PartnerProfileId, and the AS2 process is identified with the LocalProfileId.

" } }, @@ -630,6 +770,12 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:TagResource", + "iam:PassRole" + ] + }, "smithy.api#documentation": "

Creates the connector, which captures the parameters for a connection for the\n AS2 or SFTP protocol. For AS2, the connector is required for sending files to an externally hosted AS2 server. For SFTP, the connector is required when sending files to an SFTP server or receiving files from an SFTP server.\n For more details about connectors, see Configure AS2 connectors and Create SFTP connectors.

\n \n

You must specify exactly one configuration object: either for AS2 (As2Config) or SFTP (SftpConfig).

\n
" } }, @@ -726,6 +872,11 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:TagResource" + ] + }, "smithy.api#documentation": "

Creates the local or partner profile to use for AS2 transfers.

" } }, @@ -810,6 +961,33 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "apigateway:GET", + "ds:AuthorizeApplication", + "ds:DescribeDirectories", + "ec2:AssociateAddress", + "ec2:CreateVpcEndpoint", + "ec2:DescribeAddresses", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeVpcEndpoints", + "iam:PassRole", + "transfer:CreateServer", + "transfer:DescribeServer", + "transfer:StartServer", + "transfer:StopServer", + "transfer:UpdateServer", + "transfer:TagResource", + "logs:CreateLogDelivery", + "logs:GetLogDelivery", + "logs:UpdateLogDelivery", + "logs:DeleteLogDelivery", + "logs:ListLogDeliveries", + "logs:PutResourcePolicy", + "logs:DescribeResourcePolicies", + "logs:DescribeLogGroups" + ] + }, "smithy.api#documentation": "

Instantiates an auto-scaling virtual server based on the selected file transfer protocol\n in Amazon Web Services. When you make updates to your file transfer protocol-enabled server or when you work\n with users, use the service-generated ServerId property that is assigned to the\n newly created server.

" } }, @@ -843,6 +1021,7 @@ "HostKey": { "target": "com.amazonaws.transfer#HostKey", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The RSA, ECDSA, or ED25519 private key to use for your SFTP-enabled server. You can add multiple host keys, in case you want\n to rotate keys, or have a set of active keys that use different algorithms.

\n

Use the following command to generate an RSA 2048 bit key with no passphrase:

\n

\n ssh-keygen -t rsa -b 2048 -N \"\" -m PEM -f my-new-server-key.

\n

Use a minimum value of 2048 for the -b option. You can create a stronger key by using 3072 or 4096.

\n

Use the following command to generate an ECDSA 256 bit key with no passphrase:

\n

\n ssh-keygen -t ecdsa -b 256 -N \"\" -m PEM -f my-new-server-key.

\n

Valid values for the -b option for ECDSA are 256, 384, and 521.

\n

Use the following command to generate an ED25519 key with no passphrase:

\n

\n ssh-keygen -t ed25519 -N \"\" -f my-new-server-key.

\n

For all of these commands, you can replace my-new-server-key with a string of your choice.

\n \n

If you aren't planning to migrate existing users from an existing SFTP-enabled\n server to a new server, don't update the host key. Accidentally changing a\n server's host key can be disruptive.

\n
\n

For more information, see Manage host keys for your SFTP-enabled server in the Transfer Family User Guide.

" } }, @@ -964,7 +1143,17 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a user and associates them with an existing file transfer protocol-enabled server.\n You can only create and associate users with servers that have the\n IdentityProviderType set to SERVICE_MANAGED. Using parameters for\n CreateUser, you can specify the user name, set the home directory, store the\n user's public key, and assign the user's Identity and Access Management (IAM)\n role. You can also optionally add a session policy, and assign metadata with tags that can\n be used to group and search for users.

" + "aws.iam#iamAction": { + "requiredActions": [ + "iam:PassRole", + "transfer:CreateUser", + "transfer:DescribeUser", + "transfer:ImportSshPublicKey", + "transfer:TagResource" + ] + }, + "smithy.api#documentation": "

Creates a user and associates them with an existing file transfer protocol-enabled server.\n You can only create and associate users with servers that have the\n IdentityProviderType set to SERVICE_MANAGED. Using parameters for\n CreateUser, you can specify the user name, set the home directory, store the\n user's public key, and assign the user's Identity and Access Management (IAM)\n role. You can also optionally add a session policy, and assign metadata with tags that can\n be used to group and search for users.

", + "smithy.api#idempotent": {} } }, "com.amazonaws.transfer#CreateUserRequest": { @@ -1017,6 +1206,7 @@ "SshPublicKeyBody": { "target": "com.amazonaws.transfer#SshPublicKeyBody", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The public portion of the Secure Shell (SSH) key used to authenticate the user to the\n server.

\n

The three standard SSH public key format elements are <key type>,\n <body base64>, and an optional <comment>, with spaces\n between each element.

\n

Transfer Family accepts RSA, ECDSA, and ED25519 keys.

\n
    \n
  • \n

    For RSA keys, the key type is ssh-rsa.

    \n
  • \n
  • \n

    For ED25519 keys, the key type is ssh-ed25519.

    \n
  • \n
  • \n

    For ECDSA keys, the key type is either ecdsa-sha2-nistp256,\n ecdsa-sha2-nistp384, or ecdsa-sha2-nistp521, depending on the\n size of the key you generated.

    \n
  • \n
" } }, @@ -1089,6 +1279,11 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:TagResource" + ] + }, "smithy.api#documentation": "

\n Allows you to create a workflow with specified steps and step details the workflow invokes after file transfer completes.\n After creating a workflow, you can associate the workflow created with any transfer servers by specifying the workflow-details field in CreateServer and UpdateServer operations.\n

" } }, @@ -1322,7 +1517,8 @@ } ], "traits": { - "smithy.api#documentation": "

Delete the agreement that's specified in the provided AgreementId.

" + "smithy.api#documentation": "

Delete the agreement that's specified in the provided AgreementId.

", + "smithy.api#idempotent": {} } }, "com.amazonaws.transfer#DeleteAgreementRequest": { @@ -1370,7 +1566,8 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the certificate that's specified in the CertificateId\n parameter.

" + "smithy.api#documentation": "

Deletes the certificate that's specified in the CertificateId\n parameter.

", + "smithy.api#idempotent": {} } }, "com.amazonaws.transfer#DeleteCertificateRequest": { @@ -1411,7 +1608,8 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the connector that's specified in the provided ConnectorId.

" + "smithy.api#documentation": "

Deletes the connector that's specified in the provided ConnectorId.

", + "smithy.api#idempotent": {} } }, "com.amazonaws.transfer#DeleteConnectorRequest": { @@ -1503,7 +1701,8 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the profile that's specified in the ProfileId parameter.

" + "smithy.api#documentation": "

Deletes the profile that's specified in the ProfileId parameter.

", + "smithy.api#idempotent": {} } }, "com.amazonaws.transfer#DeleteProfileRequest": { @@ -1547,7 +1746,23 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the file transfer protocol-enabled server that you specify.

\n

No response returns from this operation.

" + "aws.iam#iamAction": { + "requiredActions": [ + "ds:DescribeDirectories", + "ds:UnauthorizeApplication", + "ec2:DeleteVpcEndpoints", + "ec2:DescribeAddresses", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeVpcEndpoints", + "ec2:DisassociateAddress", + "logs:GetLogDelivery", + "logs:DeleteLogDelivery", + "logs:ListLogDeliveries", + "transfer:DeleteServer" + ] + }, + "smithy.api#documentation": "

Deletes the file transfer protocol-enabled server that you specify.

\n

No response returns from this operation.

", + "smithy.api#idempotent": {} } }, "com.amazonaws.transfer#DeleteServerRequest": { @@ -1666,7 +1881,13 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the user belonging to a file transfer protocol-enabled server you specify.

\n

No response returns from this operation.

\n \n

When you delete a user from a server, the user's information is lost.

\n
" + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:DeleteUser" + ] + }, + "smithy.api#documentation": "

Deletes the user belonging to a file transfer protocol-enabled server you specify.

\n

No response returns from this operation.

\n \n

When you delete a user from a server, the user's information is lost.

\n
", + "smithy.api#idempotent": {} } }, "com.amazonaws.transfer#DeleteUserRequest": { @@ -1717,7 +1938,8 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified workflow.

" + "smithy.api#documentation": "

Deletes the specified workflow.

", + "smithy.api#idempotent": {} } }, "com.amazonaws.transfer#DeleteWorkflowRequest": { @@ -1861,6 +2083,7 @@ "Agreement": { "target": "com.amazonaws.transfer#DescribedAgreement", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The details for the specified agreement, returned as a DescribedAgreement\n object.

", "smithy.api#required": {} } @@ -1918,6 +2141,7 @@ "Certificate": { "target": "com.amazonaws.transfer#DescribedCertificate", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The details for the specified certificate, returned as an object.

", "smithy.api#required": {} } @@ -1975,6 +2199,7 @@ "Connector": { "target": "com.amazonaws.transfer#DescribedConnector", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The structure that contains the details of the connector.

", "smithy.api#required": {} } @@ -2167,6 +2392,7 @@ "Profile": { "target": "com.amazonaws.transfer#DescribedProfile", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The details of the specified profile, returned as an object.

", "smithy.api#required": {} } @@ -2256,6 +2482,12 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "ec2:DescribeVpcEndpoints", + "transfer:DescribeServer" + ] + }, "smithy.api#documentation": "

Describes a file transfer protocol-enabled server that you specify by passing the\n ServerId parameter.

\n

The response contains a description of a server's properties. When you set\n EndpointType to VPC, the response will contain the\n EndpointDetails.

", "smithy.api#readonly": {}, "smithy.waiters#waitable": { @@ -2335,6 +2567,7 @@ "Server": { "target": "com.amazonaws.transfer#DescribedServer", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

An array containing the properties of a server with the ServerID you\n specified.

", "smithy.api#required": {} } @@ -2367,6 +2600,11 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:DescribeUser" + ] + }, "smithy.api#documentation": "

Describes the user assigned to the specific file transfer protocol-enabled server, as\n identified by its ServerId property.

\n

The response from this call returns the properties of the user associated with the\n ServerId value that was specified.

", "smithy.api#readonly": {} } @@ -2406,6 +2644,7 @@ "User": { "target": "com.amazonaws.transfer#DescribedUser", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

An array containing the properties of the Transfer Family user for the ServerID value\n that you specified.

", "smithy.api#required": {} } @@ -2463,6 +2702,7 @@ "Workflow": { "target": "com.amazonaws.transfer#DescribedWorkflow", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The structure that contains the details of the workflow.

", "smithy.api#required": {} } @@ -2525,6 +2765,7 @@ "Arn": { "target": "com.amazonaws.transfer#Arn", "traits": { + "aws.cloudformation#cfnMutability": "read", "smithy.api#documentation": "

The unique Amazon Resource Name (ARN) for the agreement.

", "smithy.api#required": {} } @@ -2594,6 +2835,7 @@ "Arn": { "target": "com.amazonaws.transfer#Arn", "traits": { + "aws.cloudformation#cfnMutability": "read", "smithy.api#documentation": "

The unique Amazon Resource Name (ARN) for the certificate.

", "smithy.api#required": {} } @@ -2607,7 +2849,7 @@ "Usage": { "target": "com.amazonaws.transfer#CertificateUsageType", "traits": { - "smithy.api#documentation": "

Specifies whether this certificate is used for signing or encryption.

" + "smithy.api#documentation": "

Specifies how this certificate is used. It can be used in the following ways:

\n
    \n
  • \n

    \n SIGNING: For signing AS2 messages

    \n
  • \n
  • \n

    \n ENCRYPTION: For encrypting AS2 messages

    \n
  • \n
  • \n

    \n TLS: For securing AS2 communications sent over HTTPS

    \n
  • \n
" } }, "Status": { @@ -2968,6 +3210,8 @@ "Arn": { "target": "com.amazonaws.transfer#Arn", "traits": { + "aws.cloudformation#cfnAdditionalIdentifier": {}, + "aws.cloudformation#cfnMutability": "read", "smithy.api#documentation": "

Specifies the unique Amazon Resource Name (ARN) of the server.

", "smithy.api#required": {} } @@ -2975,42 +3219,49 @@ "Certificate": { "target": "com.amazonaws.transfer#Certificate", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies the ARN of the Amazon Web ServicesCertificate Manager (ACM) certificate. Required when\n Protocols is set to FTPS.

" } }, "ProtocolDetails": { "target": "com.amazonaws.transfer#ProtocolDetails", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

The protocol settings that are configured for your server.

\n
    \n
  • \n

    \n To indicate passive mode (for FTP and FTPS protocols), use the PassiveIp parameter.\n Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n

    \n
  • \n
  • \n

    To ignore the error that is generated when the client attempts to use the SETSTAT command on a file that you are \n uploading to an Amazon S3 bucket, use the SetStatOption parameter. To have the Transfer Family server ignore the \n SETSTAT command and upload files without needing to make any changes to your SFTP client, set the value to \n ENABLE_NO_OP. If you set the SetStatOption parameter to ENABLE_NO_OP, Transfer Family \n generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a SETSTAT \n call.

    \n
  • \n
  • \n

    To determine whether your Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the \n TlsSessionResumptionMode parameter.

    \n
  • \n
  • \n

    \n As2Transports indicates the transport method for the AS2 messages. Currently, only HTTP is supported.

    \n
  • \n
" } }, "Domain": { "target": "com.amazonaws.transfer#Domain", "traits": { - "smithy.api#documentation": "

Specifies the domain of the storage system that is used for file transfers.

" + "aws.cloudformation#cfnMutability": "create-and-read", + "smithy.api#documentation": "

Specifies the domain of the storage system that is used for file transfers. There are two domains\n available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The\n default value is S3.

" } }, "EndpointDetails": { "target": "com.amazonaws.transfer#EndpointDetails", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

The virtual private cloud (VPC) endpoint settings that are configured for your server.\n When you host your endpoint within your VPC, you can make your endpoint accessible only to resources\n within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over\n the internet. Your VPC's default security groups are automatically assigned to your\n endpoint.

" } }, "EndpointType": { "target": "com.amazonaws.transfer#EndpointType", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Defines the type of endpoint that your server is connected to. If your server is connected\n to a VPC endpoint, your server isn't accessible over the public internet.

" } }, "HostKeyFingerprint": { "target": "com.amazonaws.transfer#HostKeyFingerprint", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

Specifies the Base64-encoded SHA256 fingerprint of the server's host key. This value\n is equivalent to the output of the ssh-keygen -l -f my-new-server-key\n command.

" } }, "IdentityProviderDetails": { "target": "com.amazonaws.transfer#IdentityProviderDetails", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies information to call a customer-supplied authentication API. This field is not\n populated when the IdentityProviderType of a server is\n AWS_DIRECTORY_SERVICE or SERVICE_MANAGED.

" } }, @@ -3023,30 +3274,35 @@ "LoggingRole": { "target": "com.amazonaws.transfer#NullableRole", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn\n on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in\n your CloudWatch logs.

" } }, "PostAuthenticationLoginBanner": { "target": "com.amazonaws.transfer#PostAuthenticationLoginBanner", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.

\n \n

The SFTP protocol does not support post-authentication display banners.

\n
" } }, "PreAuthenticationLoginBanner": { "target": "com.amazonaws.transfer#PreAuthenticationLoginBanner", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies a string to display when users connect to a server. This string is displayed before the user authenticates.\n For example, the following banner displays details about using the system:

\n

\n This system is for the use of authorized users only. Individuals using this computer system without authority,\n or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by\n system personnel.\n

" } }, "Protocols": { "target": "com.amazonaws.transfer#Protocols", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies the file transfer protocol or protocols over which your file transfer protocol\n client can connect to your server's endpoint. The available protocols are:

\n
    \n
  • \n

    \n SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over\n SSH

    \n
  • \n
  • \n

    \n FTPS (File Transfer Protocol Secure): File transfer with TLS\n encryption

    \n
  • \n
  • \n

    \n FTP (File Transfer Protocol): Unencrypted file transfer

    \n
  • \n
  • \n

    \n AS2 (Applicability Statement 2): used for transporting structured business-to-business data

    \n
  • \n
\n \n
    \n
  • \n

    If you select FTPS, you must choose a certificate stored in Certificate Manager (ACM) \n which is used to identify your server when clients connect to it over\n FTPS.

    \n
  • \n
  • \n

    If Protocol includes either FTP or FTPS, then the\n EndpointType must be VPC and the\n IdentityProviderType must be either AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.

    \n
  • \n
  • \n

    If Protocol includes FTP, then\n AddressAllocationIds cannot be associated.

    \n
  • \n
  • \n

    If Protocol is set only to SFTP, the EndpointType\n can be set to PUBLIC and the IdentityProviderType can be set any of the supported identity types: \n SERVICE_MANAGED, AWS_DIRECTORY_SERVICE, AWS_LAMBDA, or API_GATEWAY.

    \n
  • \n
  • \n

    If Protocol includes AS2, then the\n EndpointType must be VPC, and domain must be Amazon S3.

    \n
  • \n
\n
" } }, "SecurityPolicyName": { "target": "com.amazonaws.transfer#SecurityPolicyName", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies the name of the security policy for the server.

" } }, @@ -3059,42 +3315,49 @@ "State": { "target": "com.amazonaws.transfer#State", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The condition of the server that was described. A value of\n ONLINE indicates that the server can accept jobs and transfer files. A\n State value of OFFLINE means that the server cannot perform file\n transfer operations.

\n

The states of STARTING and STOPPING indicate that the server is\n in an intermediate state, either not fully able to respond, or not fully offline. The values\n of START_FAILED or STOP_FAILED can indicate an error\n condition.

" } }, "Tags": { "target": "com.amazonaws.transfer#Tags", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies the key-value pairs that you can use to search for and group servers that were\n assigned to the server that was described.

" } }, "UserCount": { "target": "com.amazonaws.transfer#UserCount", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

Specifies the number of users that are assigned to a server you specified with the\n ServerId.

" } }, "WorkflowDetails": { "target": "com.amazonaws.transfer#WorkflowDetails", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.

\n

In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects\n while the file is still being uploaded.

" } }, "StructuredLogDestinations": { "target": "com.amazonaws.transfer#StructuredLogDestinations", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies the log groups to which your server logs are sent.

\n

To specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:

\n

\n arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*\n

\n

For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*\n

\n

If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty\n value for this parameter in an update-server call. For example:

\n

\n update-server --server-id s-1234567890abcdef0 --structured-log-destinations\n

" } }, "S3StorageOptions": { "target": "com.amazonaws.transfer#S3StorageOptions", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.

\n

By default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry\n Type to FILE if you want a mapping to have a file target.

" } }, "As2ServiceManagedEgressIpAddresses": { "target": "com.amazonaws.transfer#ServiceManagedEgressIpAddresses", "traits": { + "aws.cloudformation#cfnMutability": "read", "smithy.api#documentation": "

The list of egress IP addresses of this server. These IP addresses are only relevant\n for servers that use the AS2 protocol. They are used for sending asynchronous MDNs.

\n

These IP addresses are assigned automatically when you create an AS2 server. Additionally,\n if you update an existing server and add the AS2 protocol, static IP addresses are assigned as well.

" } } @@ -3109,6 +3372,8 @@ "Arn": { "target": "com.amazonaws.transfer#Arn", "traits": { + "aws.cloudformation#cfnAdditionalIdentifier": {}, + "aws.cloudformation#cfnMutability": "read", "smithy.api#documentation": "

Specifies the unique Amazon Resource Name (ARN) for the user that was requested to be\n described.

", "smithy.api#required": {} } @@ -3116,48 +3381,56 @@ "HomeDirectory": { "target": "com.amazonaws.transfer#HomeDirectory", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

The landing directory (folder) for a user when they log in to the server using the client.

\n

A HomeDirectory example is /bucket_name/home/mydirectory.

\n \n

The HomeDirectory parameter is only used if HomeDirectoryType is set to PATH.

\n
" } }, "HomeDirectoryMappings": { "target": "com.amazonaws.transfer#HomeDirectoryMappings", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should\n be visible to your user and how you want to make them visible. You must specify the\n Entry and Target pair, where Entry shows how the path\n is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you\n only specify a target, it is displayed as is. You also must ensure that your Identity and Access Management (IAM) \n role provides access to paths in Target. This value\n can be set only when HomeDirectoryType is set to\n LOGICAL.

\n

In most cases, you can use this value instead of the session policy to lock your user\n down to the designated home directory (\"chroot\"). To do this, you can set\n Entry to '/' and set Target to the HomeDirectory\n parameter value.

" } }, "HomeDirectoryType": { "target": "com.amazonaws.transfer#HomeDirectoryType", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

The type of landing directory (folder) that you want your users' home directory to be when they log in to the server.\n If you set it to PATH, the user will see the absolute Amazon S3 bucket or Amazon EFS path as is in their file transfer \n protocol clients. If you set it to LOGICAL, you need to provide mappings in the HomeDirectoryMappings for \n how you want to make Amazon S3 or Amazon EFS paths visible to your users.

\n \n

If HomeDirectoryType is LOGICAL, you must provide mappings,\n using the HomeDirectoryMappings parameter. If, on the other hand,\n HomeDirectoryType is PATH, you provide an absolute path\n using the HomeDirectory parameter. You cannot have both\n HomeDirectory and HomeDirectoryMappings in your\n template.

\n
" } }, "Policy": { "target": "com.amazonaws.transfer#Policy", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

A session policy for your user so that you can use the same Identity and Access Management (IAM) role across multiple users. This policy scopes down a user's\n access to portions of their Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName},\n ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}.

" } }, "PosixProfile": { "target": "com.amazonaws.transfer#PosixProfile", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies the full POSIX identity, including user ID (Uid), group ID\n (Gid), and any secondary groups IDs (SecondaryGids), that controls\n your users' access to your Amazon Elastic File System (Amazon EFS) file systems. The POSIX\n permissions that are set on files and directories in your file system determine the level of\n access your users get when transferring files into and out of your Amazon EFS file\n systems.

" } }, "Role": { "target": "com.amazonaws.transfer#Role", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 \n bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users \n when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust \n relationship that allows the server to access your resources when servicing your users' transfer requests.

" } }, "SshPublicKeys": { "target": "com.amazonaws.transfer#SshPublicKeys", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

Specifies the public key portion of the Secure Shell (SSH) keys stored for the described\n user.

" } }, "Tags": { "target": "com.amazonaws.transfer#Tags", "traits": { + "aws.cloudformation#cfnMutability": "full", "smithy.api#documentation": "

Specifies the key-value pairs for the user requested. Tag can be used to search for and\n group users for a variety of purposes.

" } }, @@ -3178,6 +3451,7 @@ "Arn": { "target": "com.amazonaws.transfer#Arn", "traits": { + "aws.cloudformation#cfnMutability": "read", "smithy.api#documentation": "

Specifies the unique Amazon Resource Name (ARN) for the workflow.

", "smithy.api#required": {} } @@ -3857,6 +4131,11 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:TagResource" + ] + }, "smithy.api#documentation": "

Imports the signing and encryption certificates that you need to create local (AS2)\n profiles and partner\n profiles.

" } }, @@ -3866,7 +4145,7 @@ "Usage": { "target": "com.amazonaws.transfer#CertificateUsageType", "traits": { - "smithy.api#documentation": "

Specifies whether this certificate is used for signing or encryption.

", + "smithy.api#documentation": "

Specifies how this certificate is used. It can be used in the following ways:

\n
    \n
  • \n

    \n SIGNING: For signing AS2 messages

    \n
  • \n
  • \n

    \n ENCRYPTION: For encrypting AS2 messages

    \n
  • \n
  • \n

    \n TLS: For securing AS2 communications sent over HTTPS

    \n
  • \n
", "smithy.api#required": {} } }, @@ -4865,6 +5144,11 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:ListServers" + ] + }, "smithy.api#documentation": "

Lists the file transfer protocol-enabled servers that are associated with your Amazon Web Services\n account.

", "smithy.api#paginated": { "inputToken": "NextToken", @@ -5028,6 +5312,11 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:ListUsers" + ] + }, "smithy.api#documentation": "

Lists the users for a file transfer protocol-enabled server that you specify by passing\n the ServerId parameter.

", "smithy.api#paginated": { "inputToken": "NextToken", @@ -5279,7 +5568,7 @@ "Usage": { "target": "com.amazonaws.transfer#CertificateUsageType", "traits": { - "smithy.api#documentation": "

Specifies whether this certificate is used for signing or encryption.

" + "smithy.api#documentation": "

Specifies how this certificate is used. It can be used in the following ways:

\n
    \n
  • \n

    \n SIGNING: For signing AS2 messages

    \n
  • \n
  • \n

    \n ENCRYPTION: For encrypting AS2 messages

    \n
  • \n
  • \n

    \n TLS: For securing AS2 communications sent over HTTPS

    \n
  • \n
" } }, "Status": { @@ -5495,7 +5784,7 @@ "Domain": { "target": "com.amazonaws.transfer#Domain", "traits": { - "smithy.api#documentation": "

Specifies the domain of the storage system that is used for file transfers.

" + "smithy.api#documentation": "

Specifies the domain of the storage system that is used for file transfers. There are two domains\n available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The\n default value is S3.

" } }, "IdentityProviderType": { @@ -5960,6 +6249,43 @@ "smithy.api#pattern": "^p-([0-9a-f]{17})$" } }, + "com.amazonaws.transfer#ProfileResource": { + "type": "resource", + "identifiers": { + "ProfileId": { + "target": "com.amazonaws.transfer#ProfileId" + } + }, + "create": { + "target": "com.amazonaws.transfer#CreateProfile" + }, + "read": { + "target": "com.amazonaws.transfer#DescribeProfile" + }, + "update": { + "target": "com.amazonaws.transfer#UpdateProfile" + }, + "delete": { + "target": "com.amazonaws.transfer#DeleteProfile" + }, + "list": { + "target": "com.amazonaws.transfer#ListProfiles" + }, + "traits": { + "aws.api#arn": { + "template": "profile/{ProfileId}" + }, + "aws.api#taggable": { + "property": "Tags" + }, + "aws.cloudformation#cfnResource": { + "name": "Profile", + "additionalSchemas": [ + "com.amazonaws.transfer#DescribedProfile" + ] + } + } + }, "com.amazonaws.transfer#ProfileType": { "type": "enum", "members": { @@ -6487,6 +6813,43 @@ "smithy.api#pattern": "^s-([0-9a-f]{17})$" } }, + "com.amazonaws.transfer#ServerResource": { + "type": "resource", + "identifiers": { + "ServerId": { + "target": "com.amazonaws.transfer#ServerId" + } + }, + "create": { + "target": "com.amazonaws.transfer#CreateServer" + }, + "read": { + "target": "com.amazonaws.transfer#DescribeServer" + }, + "update": { + "target": "com.amazonaws.transfer#UpdateServer" + }, + "delete": { + "target": "com.amazonaws.transfer#DeleteServer" + }, + "list": { + "target": "com.amazonaws.transfer#ListServers" + }, + "traits": { + "aws.api#arn": { + "template": "server/{ServerId}" + }, + "aws.api#taggable": { + "property": "Tags" + }, + "aws.cloudformation#cfnResource": { + "name": "Server", + "additionalSchemas": [ + "com.amazonaws.transfer#DescribedServer" + ] + } + } + }, "com.amazonaws.transfer#ServiceErrorMessage": { "type": "string" }, @@ -6500,6 +6863,9 @@ "type": "list", "member": { "target": "com.amazonaws.transfer#ServiceManagedEgressIpAddress" + }, + "traits": { + "smithy.api#documentation": "The list of egress IP addresses of this server. These IP addresses are only relevant for servers that use the AS2 protocol. They are used for sending asynchronous MDNs. These IP addresses are assigned automatically when you create an AS2 server. Additionally, if you update an existing server and add the AS2 protocol, static IP addresses are assigned as well." } }, "com.amazonaws.transfer#ServiceMetadata": { @@ -6721,7 +7087,8 @@ "smithy.api#length": { "min": 0, "max": 2048 - } + }, + "smithy.api#pattern": "^\\s*(ssh|ecdsa)-[a-z0-9-]+[ \\t]+(([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{1,3})?(={0,3})?)(\\s*|[ \\t]+[\\S \\t]*\\s*)$" } }, "com.amazonaws.transfer#SshPublicKeyCount": { @@ -7451,90 +7818,27 @@ { "target": "com.amazonaws.transfer#CreateAccess" }, - { - "target": "com.amazonaws.transfer#CreateAgreement" - }, - { - "target": "com.amazonaws.transfer#CreateConnector" - }, - { - "target": "com.amazonaws.transfer#CreateProfile" - }, - { - "target": "com.amazonaws.transfer#CreateServer" - }, - { - "target": "com.amazonaws.transfer#CreateUser" - }, - { - "target": "com.amazonaws.transfer#CreateWorkflow" - }, { "target": "com.amazonaws.transfer#DeleteAccess" }, - { - "target": "com.amazonaws.transfer#DeleteAgreement" - }, - { - "target": "com.amazonaws.transfer#DeleteCertificate" - }, - { - "target": "com.amazonaws.transfer#DeleteConnector" - }, { "target": "com.amazonaws.transfer#DeleteHostKey" }, - { - "target": "com.amazonaws.transfer#DeleteProfile" - }, - { - "target": "com.amazonaws.transfer#DeleteServer" - }, { "target": "com.amazonaws.transfer#DeleteSshPublicKey" }, - { - "target": "com.amazonaws.transfer#DeleteUser" - }, - { - "target": "com.amazonaws.transfer#DeleteWorkflow" - }, { "target": "com.amazonaws.transfer#DescribeAccess" }, - { - "target": "com.amazonaws.transfer#DescribeAgreement" - }, - { - "target": "com.amazonaws.transfer#DescribeCertificate" - }, - { - "target": "com.amazonaws.transfer#DescribeConnector" - }, { "target": "com.amazonaws.transfer#DescribeExecution" }, { "target": "com.amazonaws.transfer#DescribeHostKey" }, - { - "target": "com.amazonaws.transfer#DescribeProfile" - }, { "target": "com.amazonaws.transfer#DescribeSecurityPolicy" }, - { - "target": "com.amazonaws.transfer#DescribeServer" - }, - { - "target": "com.amazonaws.transfer#DescribeUser" - }, - { - "target": "com.amazonaws.transfer#DescribeWorkflow" - }, - { - "target": "com.amazonaws.transfer#ImportCertificate" - }, { "target": "com.amazonaws.transfer#ImportHostKey" }, @@ -7544,39 +7848,18 @@ { "target": "com.amazonaws.transfer#ListAccesses" }, - { - "target": "com.amazonaws.transfer#ListAgreements" - }, - { - "target": "com.amazonaws.transfer#ListCertificates" - }, - { - "target": "com.amazonaws.transfer#ListConnectors" - }, { "target": "com.amazonaws.transfer#ListExecutions" }, { "target": "com.amazonaws.transfer#ListHostKeys" }, - { - "target": "com.amazonaws.transfer#ListProfiles" - }, { "target": "com.amazonaws.transfer#ListSecurityPolicies" }, - { - "target": "com.amazonaws.transfer#ListServers" - }, { "target": "com.amazonaws.transfer#ListTagsForResource" }, - { - "target": "com.amazonaws.transfer#ListUsers" - }, - { - "target": "com.amazonaws.transfer#ListWorkflows" - }, { "target": "com.amazonaws.transfer#SendWorkflowStepState" }, @@ -7608,25 +7891,30 @@ "target": "com.amazonaws.transfer#UpdateAccess" }, { - "target": "com.amazonaws.transfer#UpdateAgreement" + "target": "com.amazonaws.transfer#UpdateHostKey" + } + ], + "resources": [ + { + "target": "com.amazonaws.transfer#AgreementResource" }, { - "target": "com.amazonaws.transfer#UpdateCertificate" + "target": "com.amazonaws.transfer#CertificateResource" }, { - "target": "com.amazonaws.transfer#UpdateConnector" + "target": "com.amazonaws.transfer#ConnectorResource" }, { - "target": "com.amazonaws.transfer#UpdateHostKey" + "target": "com.amazonaws.transfer#ProfileResource" }, { - "target": "com.amazonaws.transfer#UpdateProfile" + "target": "com.amazonaws.transfer#ServerResource" }, { - "target": "com.amazonaws.transfer#UpdateServer" + "target": "com.amazonaws.transfer#UserResource" }, { - "target": "com.amazonaws.transfer#UpdateUser" + "target": "com.amazonaws.transfer#WorkflowResource" } ], "traits": { @@ -7637,6 +7925,7 @@ "cloudTrailEventSource": "transfer.amazonaws.com", "endpointPrefix": "transfer" }, + "aws.api#tagEnabled": {}, "aws.auth#sigv4": { "name": "transfer" }, @@ -8810,6 +9099,13 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:TagResource", + "transfer:UnTagResource", + "iam:PassRole" + ] + }, "smithy.api#documentation": "

Updates some of the parameters for an existing agreement. Provide the\n AgreementId and the ServerId for the agreement that you want to\n update, along with the new values for the parameters to update.

" } }, @@ -8912,6 +9208,12 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:TagResource", + "transfer:UnTagResource" + ] + }, "smithy.api#documentation": "

Updates the active and inactive dates for a certificate.

" } }, @@ -8992,6 +9294,13 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:TagResource", + "transfer:UnTagResource", + "iam:PassRole" + ] + }, "smithy.api#documentation": "

Updates some of the parameters for an existing connector. Provide the\n ConnectorId for the connector that you want to update, along with the new\n values for the parameters to update.

" } }, @@ -9167,6 +9476,12 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "transfer:TagResource", + "transfer:UnTagResource" + ] + }, "smithy.api#documentation": "

Updates some of the parameters for an existing profile. Provide the ProfileId\n for the profile that you want to update, along with the new values for the parameters to\n update.

" } }, @@ -9241,6 +9556,34 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "apigateway:GET", + "ec2:AssociateAddress", + "ec2:DisassociateAddress", + "ec2:CreateVpcEndpoint", + "ec2:DeleteVpcEndpoints", + "ec2:DescribeAddresses", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeVpcEndpoints", + "ec2:ModifyVpcEndpoint", + "iam:PassRole", + "transfer:DescribeServer", + "transfer:StartServer", + "transfer:StopServer", + "transfer:UpdateServer", + "transfer:TagResource", + "transfer:UnTagResource", + "logs:CreateLogDelivery", + "logs:GetLogDelivery", + "logs:UpdateLogDelivery", + "logs:DeleteLogDelivery", + "logs:ListLogDeliveries", + "logs:PutResourcePolicy", + "logs:DescribeResourcePolicies", + "logs:DescribeLogGroups" + ] + }, "smithy.api#documentation": "

Updates the file transfer protocol-enabled server's properties after that server has\n been created.

\n

The UpdateServer call returns the ServerId of the server you\n updated.

" } }, @@ -9274,6 +9617,7 @@ "HostKey": { "target": "com.amazonaws.transfer#HostKey", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The RSA, ECDSA, or ED25519 private key to use for your SFTP-enabled server. You can add multiple host keys, in case you want\n to rotate keys, or have a set of active keys that use different algorithms.

\n

Use the following command to generate an RSA 2048 bit key with no passphrase:

\n

\n ssh-keygen -t rsa -b 2048 -N \"\" -m PEM -f my-new-server-key.

\n

Use a minimum value of 2048 for the -b option. You can create a stronger key by using 3072 or 4096.

\n

Use the following command to generate an ECDSA 256 bit key with no passphrase:

\n

\n ssh-keygen -t ecdsa -b 256 -N \"\" -m PEM -f my-new-server-key.

\n

Valid values for the -b option for ECDSA are 256, 384, and 521.

\n

Use the following command to generate an ED25519 key with no passphrase:

\n

\n ssh-keygen -t ed25519 -N \"\" -f my-new-server-key.

\n

For all of these commands, you can replace my-new-server-key with a string of your choice.

\n \n

If you aren't planning to migrate existing users from an existing SFTP-enabled\n server to a new server, don't update the host key. Accidentally changing a\n server's host key can be disruptive.

\n
\n

For more information, see Manage host keys for your SFTP-enabled server in the Transfer Family User Guide.

" } }, @@ -9384,6 +9728,16 @@ } ], "traits": { + "aws.iam#iamAction": { + "requiredActions": [ + "iam:PassRole", + "transfer:DeleteSshPublicKey", + "transfer:DescribeUser", + "transfer:ImportSshPublicKey", + "transfer:TagResource", + "transfer:UnTagResource" + ] + }, "smithy.api#documentation": "

Assigns new properties to a user. Parameters you pass modify any or all of the following:\n the home directory, role, and policy for the UserName and ServerId\n you specify.

\n

The response returns the ServerId and the UserName for the\n updated user.

\n

In the console, you can select Restricted when you create or update a\n user. This ensures that the user can't access anything outside of their home directory. The\n programmatic way to configure this behavior is to update the user. Set their\n HomeDirectoryType to LOGICAL, and specify\n HomeDirectoryMappings with Entry as root (/) and\n Target as their home directory.

\n

For example, if the user's home directory is /test/admin-user, the following\n command updates the user so that their configuration in the console shows the\n Restricted flag as selected.

\n

\n aws transfer update-user --server-id <server-id> --user-name admin-user --home-directory-type LOGICAL --home-directory-mappings \"[{\\\"Entry\\\":\\\"/\\\", \\\"Target\\\":\\\"/test/admin-user\\\"}]\"\n

" } }, @@ -9528,6 +9882,48 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.transfer#UserResource": { + "type": "resource", + "identifiers": { + "ServerId": { + "target": "com.amazonaws.transfer#ServerId" + }, + "UserName": { + "target": "com.amazonaws.transfer#UserName" + } + }, + "put": { + "target": "com.amazonaws.transfer#CreateUser" + }, + "read": { + "target": "com.amazonaws.transfer#DescribeUser" + }, + "update": { + "target": "com.amazonaws.transfer#UpdateUser" + }, + "delete": { + "target": "com.amazonaws.transfer#DeleteUser" + }, + "list": { + "target": "com.amazonaws.transfer#ListUsers" + }, + "traits": { + "aws.api#arn": { + "template": "user/{ServerId}/{UserName}" + }, + "aws.api#taggable": { + "property": "Tags" + }, + "aws.cloudformation#cfnResource": { + "name": "User", + "additionalSchemas": [ + "com.amazonaws.transfer#DescribedUser", + "com.amazonaws.necco.coral#CfnUserProperties" + ] + }, + "smithy.api#noReplace": {} + } + }, "com.amazonaws.transfer#VpcEndpointId": { "type": "string", "traits": { @@ -9603,6 +9999,40 @@ "smithy.api#pattern": "^w-([a-z0-9]{17})$" } }, + "com.amazonaws.transfer#WorkflowResource": { + "type": "resource", + "identifiers": { + "WorkflowId": { + "target": "com.amazonaws.transfer#WorkflowId" + } + }, + "create": { + "target": "com.amazonaws.transfer#CreateWorkflow" + }, + "read": { + "target": "com.amazonaws.transfer#DescribeWorkflow" + }, + "delete": { + "target": "com.amazonaws.transfer#DeleteWorkflow" + }, + "list": { + "target": "com.amazonaws.transfer#ListWorkflows" + }, + "traits": { + "aws.api#arn": { + "template": "workflow/{WorkflowId}" + }, + "aws.api#taggable": { + "property": "Tags" + }, + "aws.cloudformation#cfnResource": { + "name": "Workflow", + "additionalSchemas": [ + "com.amazonaws.transfer#DescribedWorkflow" + ] + } + } + }, "com.amazonaws.transfer#WorkflowStep": { "type": "structure", "members": { diff --git a/models/verifiedpermissions.json b/models/verifiedpermissions.json index 948569b762..dfcd3abd40 100644 --- a/models/verifiedpermissions.json +++ b/models/verifiedpermissions.json @@ -50,6 +50,12 @@ "smithy.api#documentation": "

Contains information about an action for a request for which an authorization decision\n is made.

\n

This data type is used as a request parameter to the IsAuthorized, BatchIsAuthorized, and IsAuthorizedWithToken\n operations.

\n

Example: { \"actionId\": \"<action name>\", \"actionType\": \"Action\"\n }\n

" } }, + "com.amazonaws.verifiedpermissions#ActionIdentifierList": { + "type": "list", + "member": { + "target": "com.amazonaws.verifiedpermissions#ActionIdentifier" + } + }, "com.amazonaws.verifiedpermissions#ActionType": { "type": "string", "traits": { @@ -105,6 +111,27 @@ "smithy.api#documentation": "

The value of an attribute.

\n

Contains information about the runtime context for a request for which an\n authorization decision is made.

\n

This data type is used as a member of the ContextDefinition structure\n which is uses as a request parameter for the IsAuthorized, BatchIsAuthorized, and IsAuthorizedWithToken\n operations.

" } }, + "com.amazonaws.verifiedpermissions#Audience": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.verifiedpermissions#Audiences": { + "type": "list", + "member": { + "target": "com.amazonaws.verifiedpermissions#Audience" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, "com.amazonaws.verifiedpermissions#BatchIsAuthorized": { "type": "operation", "input": { @@ -423,6 +450,15 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.verifiedpermissions#Claim": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.verifiedpermissions#ClientId": { "type": "string", "traits": { @@ -458,7 +494,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list of user groups and entities from an Amazon Cognito user pool identity\n source.

\n

This data type is part of a CognitoUserPoolConfiguration structure and is a request parameter in CreateIdentitySource.

" + "smithy.api#documentation": "

The type of entity that a policy store maps to groups from an Amazon Cognito user \n pool identity source.

\n

This data type is part of a CognitoUserPoolConfiguration structure and is a request parameter in CreateIdentitySource.

" } }, "com.amazonaws.verifiedpermissions#CognitoGroupConfigurationDetail": { @@ -472,7 +508,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list of user groups and entities from an Amazon Cognito user pool identity\n source.

\n

This data type is part of an CognitoUserPoolConfigurationDetail structure and is a response parameter to\n GetIdentitySource.

" + "smithy.api#documentation": "

The type of entity that a policy store maps to groups from an Amazon Cognito user \n pool identity source.

\n

This data type is part of an CognitoUserPoolConfigurationDetail structure and is a response parameter to\n GetIdentitySource.

" } }, "com.amazonaws.verifiedpermissions#CognitoGroupConfigurationItem": { @@ -486,7 +522,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list of user groups and entities from an Amazon Cognito user pool identity\n source.

\n

This data type is part of an CognitoUserPoolConfigurationItem structure and is a response parameter to\n ListIdentitySources.

" + "smithy.api#documentation": "

The type of entity that a policy store maps to groups from an Amazon Cognito user \n pool identity source.

\n

This data type is part of an CognitoUserPoolConfigurationItem structure and is a response parameter to\n ListIdentitySources.

" } }, "com.amazonaws.verifiedpermissions#CognitoUserPoolConfiguration": { @@ -508,12 +544,12 @@ "groupConfiguration": { "target": "com.amazonaws.verifiedpermissions#CognitoGroupConfiguration", "traits": { - "smithy.api#documentation": "

The configuration of the user groups from an Amazon Cognito user pool identity\n source.

" + "smithy.api#documentation": "

The type of entity that a policy store maps to groups from an Amazon Cognito user \n pool identity source.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration for an identity source that represents a connection to an Amazon Cognito user pool used\n as an identity provider for Verified Permissions.

\n

This data type is used as a field that is part of an Configuration structure that is\n used as a parameter to CreateIdentitySource.

\n

Example:\"CognitoUserPoolConfiguration\":{\"UserPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"ClientIds\":\n [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"]}\n

" + "smithy.api#documentation": "

The configuration for an identity source that represents a connection to an Amazon Cognito user pool used\n as an identity provider for Verified Permissions.

\n

This data type part of a Configuration structure that is\n used as a parameter to CreateIdentitySource.

\n

Example:\"CognitoUserPoolConfiguration\":{\"UserPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"ClientIds\":\n [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}\n

" } }, "com.amazonaws.verifiedpermissions#CognitoUserPoolConfigurationDetail": { @@ -543,12 +579,12 @@ "groupConfiguration": { "target": "com.amazonaws.verifiedpermissions#CognitoGroupConfigurationDetail", "traits": { - "smithy.api#documentation": "

The configuration of the user groups from an Amazon Cognito user pool identity\n source.

" + "smithy.api#documentation": "

The type of entity that a policy store maps to groups from an Amazon Cognito user \n pool identity source.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration for an identity source that represents a connection to an Amazon Cognito user pool used\n as an identity provider for Verified Permissions.

\n

This data type is used as a field that is part of an ConfigurationDetail structure that is\n part of the response to GetIdentitySource.

\n

Example:\"CognitoUserPoolConfiguration\":{\"UserPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"ClientIds\":\n [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"]}\n

" + "smithy.api#documentation": "

The configuration for an identity source that represents a connection to an Amazon Cognito user pool used\n as an identity provider for Verified Permissions.

\n

This data type is used as a field that is part of an ConfigurationDetail structure that is\n part of the response to GetIdentitySource.

\n

Example:\"CognitoUserPoolConfiguration\":{\"UserPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"ClientIds\":\n [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}\n

" } }, "com.amazonaws.verifiedpermissions#CognitoUserPoolConfigurationItem": { @@ -578,12 +614,12 @@ "groupConfiguration": { "target": "com.amazonaws.verifiedpermissions#CognitoGroupConfigurationItem", "traits": { - "smithy.api#documentation": "

The configuration of the user groups from an Amazon Cognito user pool identity\n source.

" + "smithy.api#documentation": "

The type of entity that a policy store maps to groups from an Amazon Cognito user \n pool identity source.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration for an identity source that represents a connection to an Amazon Cognito user pool used\n as an identity provider for Verified Permissions.

\n

This data type is used as a field that is part of the ConfigurationItem structure that is\n part of the response to ListIdentitySources.

\n

Example:\"CognitoUserPoolConfiguration\":{\"UserPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"ClientIds\":\n [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"]}\n

" + "smithy.api#documentation": "

The configuration for an identity source that represents a connection to an Amazon Cognito user pool used\n as an identity provider for Verified Permissions.

\n

This data type is used as a field that is part of the ConfigurationItem structure that is\n part of the response to ListIdentitySources.

\n

Example:\"CognitoUserPoolConfiguration\":{\"UserPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"ClientIds\":\n [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}\n

" } }, "com.amazonaws.verifiedpermissions#Configuration": { @@ -594,10 +630,16 @@ "traits": { "smithy.api#documentation": "

Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of\n authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool\n and one or more application client IDs.

\n

Example:\n \"configuration\":{\"cognitoUserPoolConfiguration\":{\"userPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"clientIds\":\n [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}}\n

" } + }, + "openIdConnectConfiguration": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configuration details of an OpenID Connect (OIDC) identity provider, or\n identity source, that Verified Permissions can use to generate entities from authenticated identities. It\n specifies the issuer URL, token type that you want to use, and policy store entity\n details.

\n

Example:\"configuration\":{\"openIdConnectConfiguration\":{\"issuer\":\"https://auth.example.com\",\"tokenSelection\":{\"accessTokenOnly\":{\"audiences\":[\"https://myapp.example.com\",\"https://myapp2.example.com\"],\"principalIdClaim\":\"sub\"}},\"entityIdPrefix\":\"MyOIDCProvider\",\"groupConfiguration\":{\"groupClaim\":\"groups\",\"groupEntityType\":\"MyCorp::UserGroup\"}}}\n

" + } } }, "traits": { - "smithy.api#documentation": "

Contains configuration information used when creating a new identity source.

\n \n

At this time, the only valid member of this structure is a Amazon Cognito user pool\n configuration.

\n

You must specify a userPoolArn, and optionally, a\n ClientId.

\n
\n

This data type is used as a request parameter for the CreateIdentitySource\n operation.

" + "smithy.api#documentation": "

Contains configuration information used when creating a new identity source.

\n

This data type is used as a request parameter for the CreateIdentitySource\n operation.

" } }, "com.amazonaws.verifiedpermissions#ConfigurationDetail": { @@ -606,7 +648,13 @@ "cognitoUserPoolConfiguration": { "target": "com.amazonaws.verifiedpermissions#CognitoUserPoolConfigurationDetail", "traits": { - "smithy.api#documentation": "

Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of\n authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool\n and one or more application client IDs.

\n

Example:\n \"configuration\":{\"cognitoUserPoolConfiguration\":{\"userPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"clientIds\":\n [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}}\n

" + "smithy.api#documentation": "

Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of\n authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool,\n the policy store entity that you want to assign to user groups,\n and one or more application client IDs.

\n

Example:\n \"configuration\":{\"cognitoUserPoolConfiguration\":{\"userPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"clientIds\":\n [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}}\n

" + } + }, + "openIdConnectConfiguration": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectConfigurationDetail", + "traits": { + "smithy.api#documentation": "

Contains configuration details of an OpenID Connect (OIDC) identity provider, or\n identity source, that Verified Permissions can use to generate entities from authenticated identities. It\n specifies the issuer URL, token type that you want to use, and policy store entity\n details.

\n

Example:\"configuration\":{\"openIdConnectConfiguration\":{\"issuer\":\"https://auth.example.com\",\"tokenSelection\":{\"accessTokenOnly\":{\"audiences\":[\"https://myapp.example.com\",\"https://myapp2.example.com\"],\"principalIdClaim\":\"sub\"}},\"entityIdPrefix\":\"MyOIDCProvider\",\"groupConfiguration\":{\"groupClaim\":\"groups\",\"groupEntityType\":\"MyCorp::UserGroup\"}}}\n

" } } }, @@ -620,7 +668,13 @@ "cognitoUserPoolConfiguration": { "target": "com.amazonaws.verifiedpermissions#CognitoUserPoolConfigurationItem", "traits": { - "smithy.api#documentation": "

Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of\n authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool\n and one or more application client IDs.

\n

Example:\n \"configuration\":{\"cognitoUserPoolConfiguration\":{\"userPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"clientIds\":\n [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}}\n

" + "smithy.api#documentation": "

Contains configuration details of a Amazon Cognito user pool that Verified Permissions can use as a source of\n authenticated identities as entities. It specifies the Amazon Resource Name (ARN) of a Amazon Cognito user pool,\n the policy store entity that you want to assign to user groups,\n and one or more application client IDs.

\n

Example:\n \"configuration\":{\"cognitoUserPoolConfiguration\":{\"userPoolArn\":\"arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5\",\"clientIds\":\n [\"a1b2c3d4e5f6g7h8i9j0kalbmc\"],\"groupConfiguration\": {\"groupEntityType\": \"MyCorp::Group\"}}}\n

" + } + }, + "openIdConnectConfiguration": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectConfigurationItem", + "traits": { + "smithy.api#documentation": "

Contains configuration details of an OpenID Connect (OIDC) identity provider, or\n identity source, that Verified Permissions can use to generate entities from authenticated identities. It\n specifies the issuer URL, token type that you want to use, and policy store entity\n details.

\n

Example:\"configuration\":{\"openIdConnectConfiguration\":{\"issuer\":\"https://auth.example.com\",\"tokenSelection\":{\"accessTokenOnly\":{\"audiences\":[\"https://myapp.example.com\",\"https://myapp2.example.com\"],\"principalIdClaim\":\"sub\"}},\"entityIdPrefix\":\"MyOIDCProvider\",\"groupConfiguration\":{\"groupClaim\":\"groups\",\"groupEntityType\":\"MyCorp::UserGroup\"}}}\n

" } } }, @@ -672,6 +726,9 @@ }, "value": { "target": "com.amazonaws.verifiedpermissions#AttributeValue" + }, + "traits": { + "smithy.api#sensitive": {} } }, "com.amazonaws.verifiedpermissions#CreateIdentitySource": { @@ -697,7 +754,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to create a reference to an external identity provider (IdP) that is compatible with OpenID Connect (OIDC) authentication protocol, such as Amazon Cognito" }, - "smithy.api#documentation": "

Creates a reference to an Amazon Cognito user pool as an external identity provider (IdP).\n

\n

After you create an identity source, you can use the identities provided by the IdP as proxies\n for the principal in authorization queries that use the IsAuthorizedWithToken\n operation. These identities take the form of tokens that contain claims about the user,\n such as IDs, attributes and group memberships. Amazon Cognito provides both identity tokens and\n access tokens, and Verified Permissions can use either or both. Any combination of identity and access\n tokens results in the same Cedar principal. Verified Permissions automatically translates the\n information about the identities into the standard Cedar attributes that can be\n evaluated by your policies. Because the Amazon Cognito identity and access tokens can contain\n different information, the tokens you choose to use determine which principal attributes\n are available to access when evaluating Cedar policies.

\n \n

If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

\n
\n \n

To reference a user from this identity source in your Cedar policies, use the following\n syntax.

\n

\n IdentityType::\"<CognitoUserPoolIdentifier>|<CognitoClientId>\n

\n

Where IdentityType is the string that you provide to the\n PrincipalEntityType parameter for this operation. The\n CognitoUserPoolId and CognitoClientId are defined by\n the Amazon Cognito user pool.

\n
\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#documentation": "

Adds an identity source to a policy store–an Amazon Cognito user pool or OpenID Connect\n (OIDC) identity provider (IdP).\n

\n

After you create an identity source, you can use the identities provided by the IdP as proxies\n for the principal in authorization queries that use the IsAuthorizedWithToken or\n BatchIsAuthorizedWithToken API operations. These identities take the form\n of tokens that contain claims about the user, such as IDs, attributes and group\n memberships. Identity sources provide identity (ID) tokens and access tokens. Verified Permissions\n derives information about your user and session from token claims. Access tokens provide\n action context to your policies, and ID tokens provide principal\n Attributes.

\n \n

Tokens from an identity source user continue to be usable until they expire. \n Token revocation and resource deletion have no effect on the validity of a token in your policy store

\n
\n \n

To reference a user from this identity source in your Cedar policies, refer to the\n following syntax examples.

\n
    \n
  • \n

    Amazon Cognito user pool: Namespace::[Entity type]::[User pool ID]|[user\n principal attribute], for example\n MyCorp::User::us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111.

    \n
  • \n
  • \n

    OpenID Connect (OIDC) provider: Namespace::[Entity\n type]::[principalIdClaim]|[user principal attribute], for example\n MyCorp::User::MyOIDCProvider|a1b2c3d4-5678-90ab-cdef-EXAMPLE22222.

    \n
  • \n
\n
\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", "smithy.api#idempotent": {} } }, @@ -721,7 +778,7 @@ "configuration": { "target": "com.amazonaws.verifiedpermissions#Configuration", "traits": { - "smithy.api#documentation": "

Specifies the details required to communicate with the identity provider (IdP)\n associated with this identity source.

\n \n

At this time, the only valid member of this structure is a Amazon Cognito user pool\n configuration.

\n

You must specify a UserPoolArn, and optionally, a\n ClientId.

\n
", + "smithy.api#documentation": "

Specifies the details required to communicate with the identity provider (IdP)\n associated with this identity source.

", "smithy.api#required": {} } }, @@ -864,6 +921,12 @@ "smithy.api#documentation": "

The resource specified in the new policy's scope. This response element isn't present\n when the resource isn't specified in the policy content.

" } }, + "actions": { + "target": "com.amazonaws.verifiedpermissions#ActionIdentifierList", + "traits": { + "smithy.api#documentation": "

The action that a policy permits or forbids. For example, \n{\"actions\": [{\"actionId\": \"ViewPhoto\", \"actionType\": \"PhotoFlash::Action\"}, {\"entityID\": \"SharePhoto\", \n\"entityType\": \"PhotoFlash::Action\"}]}.

" + } + }, "createdDate": { "target": "com.amazonaws.verifiedpermissions#TimestampFormat", "traits": { @@ -877,6 +940,12 @@ "smithy.api#documentation": "

The date and time the policy was last updated.

", "smithy.api#required": {} } + }, + "effect": { + "target": "com.amazonaws.verifiedpermissions#PolicyEffect", + "traits": { + "smithy.api#documentation": "

The effect of the decision that a policy returns to an authorization \nrequest. For example, \"effect\": \"Permit\".

" + } } }, "traits": { @@ -1351,6 +1420,16 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.verifiedpermissions#EntityIdPrefix": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.verifiedpermissions#EntityIdentifier": { "type": "structure", "members": { @@ -1392,7 +1471,7 @@ "parents": { "target": "com.amazonaws.verifiedpermissions#ParentList", "traits": { - "smithy.api#documentation": "

The parents in the hierarchy that contains the entity.

" + "smithy.api#documentation": "

The parent entities in the hierarchy that contains the entity. A principal or resource\n entity can be defined with at most 99 transitive parents per\n authorization request.

\n

A transitive parent is an entity in the hierarchy of entities including all direct\n parents, and parents of parents. For example, a user can be a member of 91 groups if one\n of those groups is a member of eight groups, for a total of 100: one entity, 91 entity\n parents, and eight parents of parents.

" } } }, @@ -1519,7 +1598,6 @@ "details": { "target": "com.amazonaws.verifiedpermissions#IdentitySourceDetails", "traits": { - "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#deprecated": { "message": "This attribute has been replaced by configuration.cognitoUserPoolConfiguration" }, @@ -1637,15 +1715,24 @@ "principal": { "target": "com.amazonaws.verifiedpermissions#EntityIdentifier", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The principal specified in the policy's scope. This element isn't included in the\n response when Principal isn't present in the policy content.

" } }, "resource": { "target": "com.amazonaws.verifiedpermissions#EntityIdentifier", "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, "smithy.api#documentation": "

The resource specified in the policy's scope. This element isn't included in the\n response when Resource isn't present in the policy content.

" } }, + "actions": { + "target": "com.amazonaws.verifiedpermissions#ActionIdentifierList", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The action that a policy permits or forbids. For example, \n{\"actions\": [{\"actionId\": \"ViewPhoto\", \"actionType\": \"PhotoFlash::Action\"}, {\"entityID\": \"SharePhoto\", \n\"entityType\": \"PhotoFlash::Action\"}]}.

" + } + }, "definition": { "target": "com.amazonaws.verifiedpermissions#PolicyDefinitionDetail", "traits": { @@ -1669,6 +1756,13 @@ "smithy.api#documentation": "

The date and time that the policy was last updated.

", "smithy.api#required": {} } + }, + "effect": { + "target": "com.amazonaws.verifiedpermissions#PolicyEffect", + "traits": { + "aws.cloudformation#cfnExcludeProperty": {}, + "smithy.api#documentation": "

The effect of the decision that a policy returns to an authorization \nrequest. For example, \"effect\": \"Permit\".

" + } } }, "traits": { @@ -2309,7 +2403,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to make an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source" }, - "smithy.api#documentation": "

Makes an authorization decision about a service request described in the parameters.\n The principal in this request comes from an external identity source in the form of an identity\n token formatted as a JSON web\n token (JWT). The information in the parameters can also define additional\n context that Verified Permissions can include in the evaluation. The request is evaluated against all\n matching policies in the specified policy store. The result of the decision is either\n Allow or Deny, along with a list of the policies that\n resulted in the decision.

\n

At this time, Verified Permissions accepts tokens from only Amazon Cognito.

\n

Verified Permissions validates each token that is specified in a request by checking its expiration\n date and its signature.

\n \n

If you delete a Amazon Cognito user pool or user, tokens from that deleted pool or that deleted user continue to be usable until they expire.

\n
", + "smithy.api#documentation": "

Makes an authorization decision about a service request described in the parameters.\n The principal in this request comes from an external identity source in the form of an identity\n token formatted as a JSON web\n token (JWT). The information in the parameters can also define additional\n context that Verified Permissions can include in the evaluation. The request is evaluated against all\n matching policies in the specified policy store. The result of the decision is either\n Allow or Deny, along with a list of the policies that\n resulted in the decision.

\n

At this time, Verified Permissions accepts tokens from only Amazon Cognito.

\n

Verified Permissions validates each token that is specified in a request by checking its expiration\n date and its signature.

\n \n

Tokens from an identity source user continue to be usable until they expire. \n Token revocation and resource deletion have no effect on the validity of a token in your policy store

\n
", "smithy.api#readonly": {} } }, @@ -2455,7 +2549,7 @@ "maxResults": { "target": "com.amazonaws.verifiedpermissions#ListIdentitySourcesMaxResults", "traits": { - "smithy.api#documentation": "

Specifies the total number of results that you want included in each\n response. If additional items exist beyond the number you specify, the \n NextToken response element is returned with a value (not null). Include the\n specified value as the NextToken request parameter in the next call to the\n operation to get the next set of results. Note that the service might return fewer\n results than the maximum even when there are more results available. You should check \n NextToken after every operation to ensure that you receive all of the\n results.

\n

If you do not specify this parameter, the operation defaults to 10 identity sources per response.\n You can specify a maximum of 200 identity sources per response.

" + "smithy.api#documentation": "

Specifies the total number of results that you want included in each\n response. If additional items exist beyond the number you specify, the \n NextToken response element is returned with a value (not null). Include the\n specified value as the NextToken request parameter in the next call to the\n operation to get the next set of results. Note that the service might return fewer\n results than the maximum even when there are more results available. You should check \n NextToken after every operation to ensure that you receive all of the\n results.

\n

If you do not specify this parameter, the operation defaults to 10 identity sources per response.\n You can specify a maximum of 50 identity sources per response.

" } }, "filters": { @@ -2758,6 +2852,360 @@ "smithy.api#pattern": "^[A-Za-z0-9-_=+/\\.]*$" } }, + "com.amazonaws.verifiedpermissions#OpenIdConnectAccessTokenConfiguration": { + "type": "structure", + "members": { + "principalIdClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#default": "sub", + "smithy.api#documentation": "

The claim that determines the principal in OIDC access tokens. For example,\n sub.

" + } + }, + "audiences": { + "target": "com.amazonaws.verifiedpermissions#Audiences", + "traits": { + "smithy.api#documentation": "

The access token aud claim values that you want to accept in your policy\n store. For example, https://myapp.example.com, https://myapp2.example.com.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of an OpenID Connect (OIDC) identity source for handling access token\n claims. Contains the claim that you want to identify as the principal in an authorization\n request, and the values of the aud claim, or audiences, that you want to\n accept.

\n

This data type is part of a OpenIdConnectTokenSelection structure, which is a parameter of CreateIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectAccessTokenConfigurationDetail": { + "type": "structure", + "members": { + "principalIdClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#default": "sub", + "smithy.api#documentation": "

The claim that determines the principal in OIDC access tokens. For example,\n sub.

" + } + }, + "audiences": { + "target": "com.amazonaws.verifiedpermissions#Audiences", + "traits": { + "smithy.api#documentation": "

The access token aud claim values that you want to accept in your policy\n store. For example, https://myapp.example.com, https://myapp2.example.com.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of an OpenID Connect (OIDC) identity source for handling access token\n claims. Contains the claim that you want to identify as the principal in an authorization\n request, and the values of the aud claim, or audiences, that you want to\n accept.

\n

This data type is part of a OpenIdConnectTokenSelectionDetail structure, which is a parameter of GetIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectAccessTokenConfigurationItem": { + "type": "structure", + "members": { + "principalIdClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#default": "sub", + "smithy.api#documentation": "

The claim that determines the principal in OIDC access tokens. For example,\n sub.

" + } + }, + "audiences": { + "target": "com.amazonaws.verifiedpermissions#Audiences", + "traits": { + "smithy.api#documentation": "

The access token aud claim values that you want to accept in your policy\n store. For example, https://myapp.example.com, https://myapp2.example.com.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of an OpenID Connect (OIDC) identity source for handling access token\n claims. Contains the claim that you want to identify as the principal in an authorization\n request, and the values of the aud claim, or audiences, that you want to\n accept.

\n

This data type is part of a OpenIdConnectTokenSelectionItem structure, which is a parameter of ListIdentitySources.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectConfiguration": { + "type": "structure", + "members": { + "issuer": { + "target": "com.amazonaws.verifiedpermissions#Issuer", + "traits": { + "smithy.api#documentation": "

The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery\n endpoint at the path .well-known/openid-configuration.

", + "smithy.api#required": {} + } + }, + "entityIdPrefix": { + "target": "com.amazonaws.verifiedpermissions#EntityIdPrefix", + "traits": { + "smithy.api#documentation": "

A descriptive string that you want to prefix to user entities from your OIDC identity\n provider. For example, if you set an entityIdPrefix of\n MyOIDCProvider, you can reference principals in your policies in the format\n MyCorp::User::MyOIDCProvider|Carlos.

" + } + }, + "groupConfiguration": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectGroupConfiguration", + "traits": { + "smithy.api#documentation": "

The claim in OIDC identity provider tokens that indicates a user's group membership, and\n the entity type that you want to map it to. For example, this object can map the contents\n of a groups claim to MyCorp::UserGroup.

" + } + }, + "tokenSelection": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectTokenSelection", + "traits": { + "smithy.api#documentation": "

The token type that you want to process from your OIDC identity provider. Your policy\n store can process either identity (ID) or access tokens from a given OIDC identity\n source.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configuration details of an OpenID Connect (OIDC) identity provider, or\n identity source, that Verified Permissions can use to generate entities from authenticated identities. It\n specifies the issuer URL, token type that you want to use, and policy store entity\n details.

\n

This data type is part of a Configuration structure, which is a\n parameter to CreateIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectConfigurationDetail": { + "type": "structure", + "members": { + "issuer": { + "target": "com.amazonaws.verifiedpermissions#Issuer", + "traits": { + "smithy.api#documentation": "

The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery\n endpoint at the path .well-known/openid-configuration.

", + "smithy.api#required": {} + } + }, + "entityIdPrefix": { + "target": "com.amazonaws.verifiedpermissions#EntityIdPrefix", + "traits": { + "smithy.api#documentation": "

A descriptive string that you want to prefix to user entities from your OIDC identity\n provider. For example, if you set an entityIdPrefix of\n MyOIDCProvider, you can reference principals in your policies in the format\n MyCorp::User::MyOIDCProvider|Carlos.

" + } + }, + "groupConfiguration": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectGroupConfigurationDetail", + "traits": { + "smithy.api#documentation": "

The claim in OIDC identity provider tokens that indicates a user's group membership, and\n the entity type that you want to map it to. For example, this object can map the contents\n of a groups claim to MyCorp::UserGroup.

" + } + }, + "tokenSelection": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectTokenSelectionDetail", + "traits": { + "smithy.api#documentation": "

The token type that you want to process from your OIDC identity provider. Your policy\n store can process either identity (ID) or access tokens from a given OIDC identity\n source.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configuration details of an OpenID Connect (OIDC) identity provider, or\n identity source, that Verified Permissions can use to generate entities from authenticated identities. It\n specifies the issuer URL, token type that you want to use, and policy store entity\n details.

\n

This data type is part of a ConfigurationDetail structure,\n which is a parameter to GetIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectConfigurationItem": { + "type": "structure", + "members": { + "issuer": { + "target": "com.amazonaws.verifiedpermissions#Issuer", + "traits": { + "smithy.api#documentation": "

The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery\n endpoint at the path .well-known/openid-configuration.

", + "smithy.api#required": {} + } + }, + "entityIdPrefix": { + "target": "com.amazonaws.verifiedpermissions#EntityIdPrefix", + "traits": { + "smithy.api#documentation": "

A descriptive string that you want to prefix to user entities from your OIDC identity\n provider. For example, if you set an entityIdPrefix of\n MyOIDCProvider, you can reference principals in your policies in the format\n MyCorp::User::MyOIDCProvider|Carlos.

" + } + }, + "groupConfiguration": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectGroupConfigurationItem", + "traits": { + "smithy.api#documentation": "

The claim in OIDC identity provider tokens that indicates a user's group membership, and\n the entity type that you want to map it to. For example, this object can map the contents\n of a groups claim to MyCorp::UserGroup.

" + } + }, + "tokenSelection": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectTokenSelectionItem", + "traits": { + "smithy.api#documentation": "

The token type that you want to process from your OIDC identity provider. Your policy\n store can process either identity (ID) or access tokens from a given OIDC identity\n source.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configuration details of an OpenID Connect (OIDC) identity provider, or\n identity source, that Verified Permissions can use to generate entities from authenticated identities. It\n specifies the issuer URL, token type that you want to use, and policy store entity\n details.

\n

This data type is part of a ConfigurationItem structure,\n which is a parameter to ListIdentitySources.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectGroupConfiguration": { + "type": "structure", + "members": { + "groupClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#documentation": "

The token claim that you want Verified Permissions to interpret as group membership. For example,\n groups.

", + "smithy.api#required": {} + } + }, + "groupEntityType": { + "target": "com.amazonaws.verifiedpermissions#GroupEntityType", + "traits": { + "smithy.api#documentation": "

The policy store entity type that you want to map your users' group claim to. For example,\n MyCorp::UserGroup. A group entity type is an entity that can have a user\n entity type as a member.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The claim in OIDC identity provider tokens that indicates a user's group membership, and\n the entity type that you want to map it to. For example, this object can map the contents\n of a groups claim to MyCorp::UserGroup.

\n

This data type is part of a OpenIdConnectConfiguration structure, which is a parameter of CreateIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectGroupConfigurationDetail": { + "type": "structure", + "members": { + "groupClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#documentation": "

The token claim that you want Verified Permissions to interpret as group membership. For example,\n groups.

", + "smithy.api#required": {} + } + }, + "groupEntityType": { + "target": "com.amazonaws.verifiedpermissions#GroupEntityType", + "traits": { + "smithy.api#documentation": "

The policy store entity type that you want to map your users' group claim to. For example,\n MyCorp::UserGroup. A group entity type is an entity that can have a user\n entity type as a member.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The claim in OIDC identity provider tokens that indicates a user's group membership, and\n the entity type that you want to map it to. For example, this object can map the contents\n of a groups claim to MyCorp::UserGroup.

\n

This data type is part of a OpenIdConnectConfigurationDetail structure, which is a parameter of GetIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectGroupConfigurationItem": { + "type": "structure", + "members": { + "groupClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#documentation": "

The token claim that you want Verified Permissions to interpret as group membership. For example,\n groups.

", + "smithy.api#required": {} + } + }, + "groupEntityType": { + "target": "com.amazonaws.verifiedpermissions#GroupEntityType", + "traits": { + "smithy.api#documentation": "

The policy store entity type that you want to map your users' group claim to. For example,\n MyCorp::UserGroup. A group entity type is an entity that can have a user\n entity type as a member.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The claim in OIDC identity provider tokens that indicates a user's group membership, and\n the entity type that you want to map it to. For example, this object can map the contents\n of a groups claim to MyCorp::UserGroup.

\n

This data type is part of a OpenIdConnectConfigurationItem structure, which is a parameter of ListIdentitySourcea.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectIdentityTokenConfiguration": { + "type": "structure", + "members": { + "principalIdClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#default": "sub", + "smithy.api#documentation": "

The claim that determines the principal in OIDC access tokens. For example,\n sub.

" + } + }, + "clientIds": { + "target": "com.amazonaws.verifiedpermissions#ClientIds", + "traits": { + "smithy.api#documentation": "

The ID token audience, or client ID, claim values that you want to accept in your policy\n store from an OIDC identity provider. For example, 1example23456789,\n 2example10111213.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of an OpenID Connect (OIDC) identity source for handling identity (ID)\n token claims. Contains the claim that you want to identify as the principal in an\n authorization request, and the values of the aud claim, or audiences, that\n you want to accept.

\n

This data type is part of a OpenIdConnectTokenSelection structure, which is a parameter of CreateIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectIdentityTokenConfigurationDetail": { + "type": "structure", + "members": { + "principalIdClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#default": "sub", + "smithy.api#documentation": "

The claim that determines the principal in OIDC access tokens. For example,\n sub.

" + } + }, + "clientIds": { + "target": "com.amazonaws.verifiedpermissions#ClientIds", + "traits": { + "smithy.api#documentation": "

The ID token audience, or client ID, claim values that you want to accept in your policy\n store from an OIDC identity provider. For example, 1example23456789,\n 2example10111213.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of an OpenID Connect (OIDC) identity source for handling identity (ID)\n token claims. Contains the claim that you want to identify as the principal in an\n authorization request, and the values of the aud claim, or audiences, that\n you want to accept.

\n

This data type is part of a OpenIdConnectTokenSelectionDetail structure, which is a parameter of GetIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectIdentityTokenConfigurationItem": { + "type": "structure", + "members": { + "principalIdClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#default": "sub", + "smithy.api#documentation": "

The claim that determines the principal in OIDC access tokens. For example,\n sub.

" + } + }, + "clientIds": { + "target": "com.amazonaws.verifiedpermissions#ClientIds", + "traits": { + "smithy.api#documentation": "

The ID token audience, or client ID, claim values that you want to accept in your policy\n store from an OIDC identity provider. For example, 1example23456789,\n 2example10111213.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of an OpenID Connect (OIDC) identity source for handling identity (ID)\n token claims. Contains the claim that you want to identify as the principal in an\n authorization request, and the values of the aud claim, or audiences, that\n you want to accept.

\n

This data type is part of a OpenIdConnectTokenSelectionItem structure, which is a parameter of ListIdentitySources.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectTokenSelection": { + "type": "union", + "members": { + "accessTokenOnly": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectAccessTokenConfiguration", + "traits": { + "smithy.api#documentation": "

The OIDC configuration for processing access tokens. Contains allowed audience claims,\n for example https://auth.example.com, and the claim that you want to map to the\n principal, for example sub.

" + } + }, + "identityTokenOnly": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectIdentityTokenConfiguration", + "traits": { + "smithy.api#documentation": "

The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID\n claims, for example 1example23456789, and the claim that you want to map to\n the principal, for example sub.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The token type that you want to process from your OIDC identity provider. Your policy\n store can process either identity (ID) or access tokens from a given OIDC identity\n source.

\n

This data type is part of a OpenIdConnectConfiguration structure, which is a parameter of CreateIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectTokenSelectionDetail": { + "type": "union", + "members": { + "accessTokenOnly": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectAccessTokenConfigurationDetail", + "traits": { + "smithy.api#documentation": "

The OIDC configuration for processing access tokens. Contains allowed audience claims,\n for example https://auth.example.com, and the claim that you want to map to the\n principal, for example sub.

" + } + }, + "identityTokenOnly": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectIdentityTokenConfigurationDetail", + "traits": { + "smithy.api#documentation": "

The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID\n claims, for example 1example23456789, and the claim that you want to map to\n the principal, for example sub.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The token type that you want to process from your OIDC identity provider. Your policy\n store can process either identity (ID) or access tokens from a given OIDC identity\n source.

\n

This data type is part of a OpenIdConnectConfigurationDetail structure, which is a parameter of GetIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#OpenIdConnectTokenSelectionItem": { + "type": "union", + "members": { + "accessTokenOnly": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectAccessTokenConfigurationItem", + "traits": { + "smithy.api#documentation": "

The OIDC configuration for processing access tokens. Contains allowed audience claims,\n for example https://auth.example.com, and the claim that you want to map to the\n principal, for example sub.

" + } + }, + "identityTokenOnly": { + "target": "com.amazonaws.verifiedpermissions#OpenIdConnectIdentityTokenConfigurationItem", + "traits": { + "smithy.api#documentation": "

The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID\n claims, for example 1example23456789, and the claim that you want to map to\n the principal, for example sub.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The token type that you want to process from your OIDC identity provider. Your policy\n store can process either identity (ID) or access tokens from a given OIDC identity\n source.

\n

This data type is part of a OpenIdConnectConfigurationItem structure, which is a parameter of ListIdentitySources.

" + } + }, "com.amazonaws.verifiedpermissions#OpenIdIssuer": { "type": "enum", "members": { @@ -2775,9 +3223,6 @@ "target": "com.amazonaws.verifiedpermissions#EntityIdentifier" }, "traits": { - "smithy.api#length": { - "max": 100 - }, "smithy.api#uniqueItems": {} } }, @@ -2871,6 +3316,23 @@ "smithy.api#documentation": "

A structure that describes a PolicyDefinintion. It will\n always have either an StaticPolicy or a TemplateLinkedPolicy\n element.

\n

This data type is used as a response parameter for the CreatePolicy and ListPolicies\n operations.

" } }, + "com.amazonaws.verifiedpermissions#PolicyEffect": { + "type": "enum", + "members": { + "PERMIT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Permit" + } + }, + "FORBID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Forbid" + } + } + } + }, "com.amazonaws.verifiedpermissions#PolicyFilter": { "type": "structure", "members": { @@ -2949,6 +3411,12 @@ "smithy.api#documentation": "

The resource associated with the policy.

" } }, + "actions": { + "target": "com.amazonaws.verifiedpermissions#ActionIdentifierList", + "traits": { + "smithy.api#documentation": "

The action that a policy permits or forbids. For example, \n{\"actions\": [{\"actionId\": \"ViewPhoto\", \"actionType\": \"PhotoFlash::Action\"}, {\"entityID\": \"SharePhoto\", \n\"entityType\": \"PhotoFlash::Action\"}]}.

" + } + }, "definition": { "target": "com.amazonaws.verifiedpermissions#PolicyDefinitionItem", "traits": { @@ -2969,6 +3437,12 @@ "smithy.api#documentation": "

The date and time the policy was most recently updated.

", "smithy.api#required": {} } + }, + "effect": { + "target": "com.amazonaws.verifiedpermissions#PolicyEffect", + "traits": { + "smithy.api#documentation": "

The effect of the decision that a policy returns to an authorization \nrequest. For example, \"effect\": \"Permit\".

" + } } }, "traits": { @@ -3731,7 +4205,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list of user groups and entities from an Amazon Cognito user pool identity\n source.

" + "smithy.api#documentation": "

The user group entities from an Amazon Cognito user pool identity\n source.

" } }, "com.amazonaws.verifiedpermissions#UpdateCognitoUserPoolConfiguration": { @@ -3769,10 +4243,16 @@ "traits": { "smithy.api#documentation": "

Contains configuration details of a Amazon Cognito user pool.

" } + }, + "openIdConnectConfiguration": { + "target": "com.amazonaws.verifiedpermissions#UpdateOpenIdConnectConfiguration", + "traits": { + "smithy.api#documentation": "

Contains configuration details of an OpenID Connect (OIDC) identity provider, or\n identity source, that Verified Permissions can use to generate entities from authenticated identities. It\n specifies the issuer URL, token type that you want to use, and policy store entity\n details.

" + } } }, "traits": { - "smithy.api#documentation": "

Contains an updated configuration to replace the configuration in an existing\n identity source.

\n \n

At this time, the only valid member of this structure is a Amazon Cognito user pool\n configuration.

\n

You must specify a userPoolArn, and optionally, a\n ClientId.

\n
" + "smithy.api#documentation": "

Contains an update to replace the configuration in an existing\n identity source.

" } }, "com.amazonaws.verifiedpermissions#UpdateIdentitySource": { @@ -3795,7 +4275,7 @@ "aws.iam#iamAction": { "documentation": "Grants permission to update the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type" }, - "smithy.api#documentation": "

Updates the specified identity source to use a new identity provider (IdP) source, or to change\n the mapping of identities from the IdP to a different principal entity type.

\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#documentation": "

Updates the specified identity source to use a new identity provider (IdP), or to change\n the mapping of identities from the IdP to a different principal entity type.

\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", "smithy.api#idempotent": {} } }, @@ -3871,6 +4351,124 @@ "smithy.api#output": {} } }, + "com.amazonaws.verifiedpermissions#UpdateOpenIdConnectAccessTokenConfiguration": { + "type": "structure", + "members": { + "principalIdClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#default": "sub", + "smithy.api#documentation": "

The claim that determines the principal in OIDC access tokens. For example,\n sub.

" + } + }, + "audiences": { + "target": "com.amazonaws.verifiedpermissions#Audiences", + "traits": { + "smithy.api#documentation": "

The access token aud claim values that you want to accept in your policy\n store. For example, https://myapp.example.com, https://myapp2.example.com.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of an OpenID Connect (OIDC) identity source for handling access token\n claims. Contains the claim that you want to identify as the principal in an authorization\n request, and the values of the aud claim, or audiences, that you want to\n accept.

\n

This data type is part of a UpdateOpenIdConnectTokenSelection structure, which is a parameter to UpdateIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#UpdateOpenIdConnectConfiguration": { + "type": "structure", + "members": { + "issuer": { + "target": "com.amazonaws.verifiedpermissions#Issuer", + "traits": { + "smithy.api#documentation": "

The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery\n endpoint at the path .well-known/openid-configuration.

", + "smithy.api#required": {} + } + }, + "entityIdPrefix": { + "target": "com.amazonaws.verifiedpermissions#EntityIdPrefix", + "traits": { + "smithy.api#documentation": "

A descriptive string that you want to prefix to user entities from your OIDC identity\n provider. For example, if you set an entityIdPrefix of\n MyOIDCProvider, you can reference principals in your policies in the format\n MyCorp::User::MyOIDCProvider|Carlos.

" + } + }, + "groupConfiguration": { + "target": "com.amazonaws.verifiedpermissions#UpdateOpenIdConnectGroupConfiguration", + "traits": { + "smithy.api#documentation": "

The claim in OIDC identity provider tokens that indicates a user's group membership, and\n the entity type that you want to map it to. For example, this object can map the contents\n of a groups claim to MyCorp::UserGroup.

" + } + }, + "tokenSelection": { + "target": "com.amazonaws.verifiedpermissions#UpdateOpenIdConnectTokenSelection", + "traits": { + "smithy.api#documentation": "

The token type that you want to process from your OIDC identity provider. Your policy\n store can process either identity (ID) or access tokens from a given OIDC identity\n source.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains configuration details of an OpenID Connect (OIDC) identity provider, or\n identity source, that Verified Permissions can use to generate entities from authenticated identities. It\n specifies the issuer URL, token type that you want to use, and policy store entity\n details.

\n

This data type is part of a UpdateConfiguration structure,\n which is a parameter to UpdateIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#UpdateOpenIdConnectGroupConfiguration": { + "type": "structure", + "members": { + "groupClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#documentation": "

The token claim that you want Verified Permissions to interpret as group membership. For example,\n groups.

", + "smithy.api#required": {} + } + }, + "groupEntityType": { + "target": "com.amazonaws.verifiedpermissions#GroupEntityType", + "traits": { + "smithy.api#documentation": "

The policy store entity type that you want to map your users' group claim to. For example,\n MyCorp::UserGroup. A group entity type is an entity that can have a user\n entity type as a member.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The claim in OIDC identity provider tokens that indicates a user's group membership, and\n the entity type that you want to map it to. For example, this object can map the contents\n of a groups claim to MyCorp::UserGroup.

\n

This data type is part of a UpdateOpenIdConnectConfiguration structure, which is a parameter to UpdateIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#UpdateOpenIdConnectIdentityTokenConfiguration": { + "type": "structure", + "members": { + "principalIdClaim": { + "target": "com.amazonaws.verifiedpermissions#Claim", + "traits": { + "smithy.api#default": "sub", + "smithy.api#documentation": "

The claim that determines the principal in OIDC access tokens. For example,\n sub.

" + } + }, + "clientIds": { + "target": "com.amazonaws.verifiedpermissions#ClientIds", + "traits": { + "smithy.api#documentation": "

The ID token audience, or client ID, claim values that you want to accept in your policy\n store from an OIDC identity provider. For example, 1example23456789,\n 2example10111213.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of an OpenID Connect (OIDC) identity source for handling identity (ID)\n token claims. Contains the claim that you want to identify as the principal in an\n authorization request, and the values of the aud claim, or audiences, that\n you want to accept.

\n

This data type is part of a UpdateOpenIdConnectTokenSelection structure, which is a parameter to UpdateIdentitySource.

" + } + }, + "com.amazonaws.verifiedpermissions#UpdateOpenIdConnectTokenSelection": { + "type": "union", + "members": { + "accessTokenOnly": { + "target": "com.amazonaws.verifiedpermissions#UpdateOpenIdConnectAccessTokenConfiguration", + "traits": { + "smithy.api#documentation": "

The OIDC configuration for processing access tokens. Contains allowed audience claims,\n for example https://auth.example.com, and the claim that you want to map to the\n principal, for example sub.

" + } + }, + "identityTokenOnly": { + "target": "com.amazonaws.verifiedpermissions#UpdateOpenIdConnectIdentityTokenConfiguration", + "traits": { + "smithy.api#documentation": "

The OIDC configuration for processing identity (ID) tokens. Contains allowed client ID\n claims, for example 1example23456789, and the claim that you want to map to\n the principal, for example sub.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The token type that you want to process from your OIDC identity provider. Your policy\n store can process either identity (ID) or access tokens from a given OIDC identity\n source.

\n

This data type is part of a UpdateOpenIdConnectConfiguration structure, which is a parameter to UpdateIdentitySource.

" + } + }, "com.amazonaws.verifiedpermissions#UpdatePolicy": { "type": "operation", "input": { @@ -3978,6 +4576,12 @@ "smithy.api#documentation": "

The resource specified in the policy's scope. This element isn't included in the\n response when Resource isn't present in the policy content.

" } }, + "actions": { + "target": "com.amazonaws.verifiedpermissions#ActionIdentifierList", + "traits": { + "smithy.api#documentation": "

The action that a policy permits or forbids. For example, \n{\"actions\": [{\"actionId\": \"ViewPhoto\", \"actionType\": \"PhotoFlash::Action\"}, {\"entityID\": \"SharePhoto\", \n\"entityType\": \"PhotoFlash::Action\"}]}.

" + } + }, "createdDate": { "target": "com.amazonaws.verifiedpermissions#TimestampFormat", "traits": { @@ -3991,6 +4595,12 @@ "smithy.api#documentation": "

The date and time that the policy was most recently updated.

", "smithy.api#required": {} } + }, + "effect": { + "target": "com.amazonaws.verifiedpermissions#PolicyEffect", + "traits": { + "smithy.api#documentation": "

The effect of the decision that a policy returns to an authorization \nrequest. For example, \"effect\": \"Permit\".

" + } } }, "traits": { diff --git a/models/vpc-lattice.json b/models/vpc-lattice.json index 374eafd7fb..771bd32f11 100644 --- a/models/vpc-lattice.json +++ b/models/vpc-lattice.json @@ -229,7 +229,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the listener rules in a batch. You can use this operation to change the priority of\n listener rules. This can be useful when bulk updating or swapping rule priority.

", + "smithy.api#documentation": "

Updates the listener rules in a batch. You can use this operation to change the priority of\n listener rules. This can be useful when bulk updating or swapping rule priority.

\n

\n Required permissions:\n vpc-lattice:UpdateRule\n

\n

For more information, see How Amazon VPC Lattice works with\n IAM in the Amazon VPC Lattice User Guide.

", "smithy.api#http": { "code": 200, "method": "PATCH", @@ -364,7 +364,7 @@ } ], "traits": { - "smithy.api#documentation": "

Enables access logs to be sent to Amazon CloudWatch, Amazon S3, and Amazon Kinesis Data Firehose. The service network owner\n can use the access logs to audit the services in the network. The service network owner will only\n see access logs from clients and services that are associated with their service network. Access\n log entries represent traffic originated from VPCs associated with that network. For more\n information, see Access logs in the\n Amazon VPC Lattice User Guide.

", + "smithy.api#documentation": "

Enables access logs to be sent to Amazon CloudWatch, Amazon S3, and Amazon Kinesis Data Firehose. The service network owner\n can use the access logs to audit the services in the network. The service network owner can only\n see access logs from clients and services that are associated with their service network. Access\n log entries represent traffic originated from VPCs associated with that network. For more\n information, see Access logs in the\n Amazon VPC Lattice User Guide.

", "smithy.api#http": { "code": 201, "method": "POST", @@ -507,20 +507,20 @@ "protocol": { "target": "com.amazonaws.vpclattice#ListenerProtocol", "traits": { - "smithy.api#documentation": "

The listener protocol HTTP or HTTPS.

", + "smithy.api#documentation": "

The listener protocol.

", "smithy.api#required": {} } }, "port": { "target": "com.amazonaws.vpclattice#Port", "traits": { - "smithy.api#documentation": "

The listener port. You can specify a value from 1 to 65535. For\n HTTP, the default is 80. For HTTPS, the default is 443.

" + "smithy.api#documentation": "

The listener port. You can specify a value from 1 to 65535. For\n HTTP, the default is 80. For HTTPS, the default is 443.

" } }, "defaultAction": { "target": "com.amazonaws.vpclattice#RuleAction", "traits": { - "smithy.api#documentation": "

The action for the default rule. Each listener has a default rule. Each rule consists of a\n priority, one or more actions, and one or more conditions. The default rule is the rule that's\n used if no other rules match. Each rule must include exactly one of the following types of\n actions: forward or fixed-response, and it must be the last action to\n be performed.

", + "smithy.api#documentation": "

The action for the default rule. Each listener has a default rule. The default rule is used \n if no other rules match.

", "smithy.api#required": {} } }, @@ -731,7 +731,7 @@ "action": { "target": "com.amazonaws.vpclattice#RuleAction", "traits": { - "smithy.api#documentation": "

The rule action. Each rule must include exactly one of the following types of actions:\n forward or fixed-response, and it must be the last action to be\n performed.

" + "smithy.api#documentation": "

The rule action.

" } } } @@ -910,7 +910,7 @@ } ], "traits": { - "smithy.api#documentation": "

Associates a service with a service network.

\n

You can't use this operation if the service and service network are already associated or if\n there is a disassociation or deletion in progress. If the association fails, you can retry the\n operation by deleting the association and recreating it.

\n

You cannot associate a service and service network that are shared with a caller. The caller\n must own either the service or the service network.

\n

As a result of this operation, the association is created in the service network account and\n the association owner account.

", + "smithy.api#documentation": "

Associates a service with a service network. For more information, see Manage service associations in the Amazon VPC Lattice User Guide.

\n

You can't use this operation if the service and service network are already associated or if\n there is a disassociation or deletion in progress. If the association fails, you can retry the\n operation by deleting the association and recreating it.

\n

You cannot associate a service and service network that are shared with a caller. The caller\n must own either the service or the service network.

\n

As a result of this operation, the association is created in the service network account and\n the association owner account.

", "smithy.api#http": { "code": 200, "uri": "/servicenetworkserviceassociations", @@ -963,7 +963,7 @@ "status": { "target": "com.amazonaws.vpclattice#ServiceNetworkServiceAssociationStatus", "traits": { - "smithy.api#documentation": "

The operation's status.

" + "smithy.api#documentation": "

The association status.

" } }, "arn": { @@ -1024,7 +1024,7 @@ } ], "traits": { - "smithy.api#documentation": "

Associates a VPC with a service network. When you associate a VPC with the service network,\n it enables all the resources within that VPC to be clients and communicate with other services in\n the service network. For more information, see Manage VPC associations in the Amazon VPC Lattice User Guide.

\n

You can't use this operation if there is a disassociation in progress. If the association\n fails, retry by deleting the association and recreating it.

\n

As a result of this operation, the association gets created in the service network account\n and the VPC owner account.

\n

Once a security group is added to the VPC association it cannot be removed. You can add or\n update the security groups being used for the VPC association once a security group is attached.\n To remove all security groups you must reassociate the VPC.

", + "smithy.api#documentation": "

Associates a VPC with a service network. When you associate a VPC with the service network,\n it enables all the resources within that VPC to be clients and communicate with other services in\n the service network. For more information, see Manage VPC associations in the Amazon VPC Lattice User Guide.

\n

You can't use this operation if there is a disassociation in progress. If the association\n fails, retry by deleting the association and recreating it.

\n

As a result of this operation, the association gets created in the service network account\n and the VPC owner account.

\n

If you add a security group to the service network and VPC association, the association must\n continue to always have at least one security group. You can add or edit security groups at any\n time. However, to remove all security groups, you must first delete the association and recreate\n it without security groups.

", "smithy.api#http": { "code": 200, "uri": "/servicenetworkvpcassociations", @@ -1087,7 +1087,7 @@ "status": { "target": "com.amazonaws.vpclattice#ServiceNetworkVpcAssociationStatus", "traits": { - "smithy.api#documentation": "

The operation's status.

" + "smithy.api#documentation": "

The association status.

" } }, "arn": { @@ -1189,7 +1189,7 @@ "status": { "target": "com.amazonaws.vpclattice#ServiceStatus", "traits": { - "smithy.api#documentation": "

The status. If the status is CREATE_FAILED, you will have to delete and\n recreate the service.

" + "smithy.api#documentation": "

The status. If the status is CREATE_FAILED, you must delete and\n recreate the service.

" } }, "authType": { @@ -1267,7 +1267,7 @@ "config": { "target": "com.amazonaws.vpclattice#TargetGroupConfig", "traits": { - "smithy.api#documentation": "

The target group configuration. If type is set to LAMBDA, this\n parameter doesn't apply.

" + "smithy.api#documentation": "

The target group configuration.

" } }, "clientToken": { @@ -1315,13 +1315,13 @@ "config": { "target": "com.amazonaws.vpclattice#TargetGroupConfig", "traits": { - "smithy.api#documentation": "

The target group configuration. If type is set to LAMBDA, this\n parameter doesn't apply.

" + "smithy.api#documentation": "

The target group configuration.

" } }, "status": { "target": "com.amazonaws.vpclattice#TargetGroupStatus", "traits": { - "smithy.api#documentation": "

The operation's status. You can retry the operation if the status is\n CREATE_FAILED. However, if you retry it while the status is\n CREATE_IN_PROGRESS, there is no change in the status.

" + "smithy.api#documentation": "

The status. You can retry the operation if the status is CREATE_FAILED. \n However, if you retry it while the status is CREATE_IN_PROGRESS, there is \n no change in the status.

" } } } @@ -1404,7 +1404,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified auth policy. If an auth is set to Amazon Web Services_IAM\n and the auth policy is deleted, all requests will be denied by default. If you are trying to\n remove the auth policy completely, you must set the auth_type to NONE. If auth is\n enabled on the resource, but no auth policy is set, all requests will be denied.

", + "smithy.api#documentation": "

Deletes the specified auth policy. If an auth is set to AWS_IAM and the auth\n policy is deleted, all requests are denied. If you are trying to remove the auth\n policy completely, you must set the auth type to NONE. If auth is enabled on the\n resource, but no auth policy is set, all requests are denied.

", "smithy.api#http": { "code": 204, "method": "DELETE", @@ -1738,7 +1738,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the association between a specified service and the specific service network. This\n request will fail if an association is still in progress.

", + "smithy.api#documentation": "

Deletes the association between a specified service and the specific service network. This\n operation fails if an association is still in progress.

", "smithy.api#http": { "code": 200, "uri": "/servicenetworkserviceassociations/{serviceNetworkServiceAssociationIdentifier}", @@ -1772,7 +1772,7 @@ "status": { "target": "com.amazonaws.vpclattice#ServiceNetworkServiceAssociationStatus", "traits": { - "smithy.api#documentation": "

The operation's status. You can retry the operation if the status is\n DELETE_FAILED. However, if you retry it when the status is\n DELETE_IN_PROGRESS, there is no change in the status.

" + "smithy.api#documentation": "

The status. You can retry the operation if the status is DELETE_FAILED. \n However, if you retry it when the status is DELETE_IN_PROGRESS, there is no \n change in the status.

" } }, "arn": { @@ -1846,7 +1846,7 @@ "status": { "target": "com.amazonaws.vpclattice#ServiceNetworkVpcAssociationStatus", "traits": { - "smithy.api#documentation": "

The status. You can retry the operation if the status is DELETE_FAILED.\n However, if you retry it when the status is DELETE_IN_PROGRESS, there is no change\n in the status.

" + "smithy.api#documentation": "

The status. You can retry the operation if the status is DELETE_FAILED.\n However, if you retry it while the status is DELETE_IN_PROGRESS, there is no change\n in the status.

" } }, "arn": { @@ -2087,7 +2087,7 @@ } }, "traits": { - "smithy.api#documentation": "

Information about an action that returns a custom HTTP response.

" + "smithy.api#documentation": "

Describes an action that returns a custom HTTP response.

" } }, "com.amazonaws.vpclattice#ForwardAction": { @@ -2096,7 +2096,7 @@ "targetGroups": { "target": "com.amazonaws.vpclattice#WeightedTargetGroupList", "traits": { - "smithy.api#documentation": "

The target groups. Traffic matching the rule is forwarded to the specified target groups.\n With forward actions, you can assign a weight that controls the prioritization and selection of\n each target group. This means that requests are distributed to individual target groups based on\n their weights. For example, if two target groups have the same weight, each target group receives\n half of the traffic.

\n

The default value is 1. This means that if only one target group is provided, there is no\n need to set the weight; 100% of traffic will go to that target group.

", + "smithy.api#documentation": "

The target groups. Traffic matching the rule is forwarded to the specified target groups.\n With forward actions, you can assign a weight that controls the prioritization and selection of\n each target group. This means that requests are distributed to individual target groups based on\n their weights. For example, if two target groups have the same weight, each target group receives\n half of the traffic.

\n

The default value is 1. This means that if only one target group is provided, there is no\n need to set the weight; 100% of the traffic goes to that target group.

", "smithy.api#required": {} } } @@ -2267,7 +2267,7 @@ "state": { "target": "com.amazonaws.vpclattice#AuthPolicyState", "traits": { - "smithy.api#documentation": "

The state of the auth policy. The auth policy is only active when the auth type is set to\n Amazon Web Services_IAM. If you provide a policy, then authentication and\n authorization decisions are made based on this policy and the client's IAM policy. If the auth\n type is NONE, then any auth policy you provide will remain inactive. For more\n information, see Create a service\n network in the Amazon VPC Lattice User Guide.

" + "smithy.api#documentation": "

The state of the auth policy. The auth policy is only active when the auth type is set to\n AWS_IAM. If you provide a policy, then authentication and authorization decisions\n are made based on this policy and the client's IAM policy. If the auth type is NONE,\n then any auth policy that you provide remains inactive. For more information, see Create a service network in the Amazon VPC Lattice User Guide.

" } }, "createdAt": { @@ -2431,7 +2431,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information about the resource policy. The resource policy is an IAM policy\n created by AWS RAM on behalf of the resource owner when they share a resource.

", + "smithy.api#documentation": "

Retrieves information about the resource policy. The resource policy is an IAM policy\n created on behalf of the resource owner when they share a resource.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -2446,7 +2446,7 @@ "resourceArn": { "target": "com.amazonaws.vpclattice#ResourceArn", "traits": { - "smithy.api#documentation": "

An IAM policy.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the service network or service.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2459,7 +2459,7 @@ "policy": { "target": "com.amazonaws.vpclattice#PolicyString", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the service network or service.

" + "smithy.api#documentation": "

An IAM policy.

" } } } @@ -2849,7 +2849,7 @@ "customDomainName": { "target": "com.amazonaws.vpclattice#ServiceCustomDomainName", "traits": { - "smithy.api#documentation": "

The custom domain name of the service.

" + "smithy.api#documentation": "

The custom domain name of the service.

" } }, "failureMessage": { @@ -3226,7 +3226,7 @@ "caseSensitive": { "target": "com.amazonaws.vpclattice#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether the match is case sensitive. Defaults to false.

" + "smithy.api#documentation": "

Indicates whether the match is case sensitive.

" } } }, @@ -3288,24 +3288,24 @@ "exact": { "target": "com.amazonaws.vpclattice#HeaderMatchExact", "traits": { - "smithy.api#documentation": "

Specifies an exact type match.

" + "smithy.api#documentation": "

An exact type match.

" } }, "prefix": { "target": "com.amazonaws.vpclattice#HeaderMatchPrefix", "traits": { - "smithy.api#documentation": "

Specifies a prefix type match. Matches the value with the prefix.

" + "smithy.api#documentation": "

A prefix type match. Matches the value with the prefix.

" } }, "contains": { "target": "com.amazonaws.vpclattice#HeaderMatchContains", "traits": { - "smithy.api#documentation": "

Specifies a contains type match.

" + "smithy.api#documentation": "

A contains type match.

" } } }, "traits": { - "smithy.api#documentation": "

Describes a header match type. Only one can be provided.

" + "smithy.api#documentation": "

Describes a header match type.

" } }, "com.amazonaws.vpclattice#HealthCheckConfig": { @@ -3368,12 +3368,12 @@ "matcher": { "target": "com.amazonaws.vpclattice#Matcher", "traits": { - "smithy.api#documentation": "

The codes to use when checking for a successful response from a target. These are called\n Success codes in the console.

" + "smithy.api#documentation": "

The codes to use when checking for a successful response from a target.

" } } }, "traits": { - "smithy.api#documentation": "

The health check configuration of a target group. Health check configurations aren't used\n for LAMBDA and ALB target groups.

" + "smithy.api#documentation": "

Describes the health check configuration of a target group. Health check configurations aren't used\n for target groups of type LAMBDA or ALB.

" } }, "com.amazonaws.vpclattice#HealthCheckIntervalSeconds": { @@ -3621,7 +3621,7 @@ "items": { "target": "com.amazonaws.vpclattice#AccessLogSubscriptionList", "traits": { - "smithy.api#documentation": "

The access log subscriptions.

", + "smithy.api#documentation": "

Information about the access log subscriptions.

", "smithy.api#required": {} } }, @@ -3836,7 +3836,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the associations between the service network and the service. You can filter the list\n either by service or service network. You must provide either the service network identifier or\n the service identifier.

\n

Every association in Amazon VPC Lattice is given a unique Amazon Resource Name (ARN), such as when a\n service network is associated with a VPC or when a service is associated with a service network.\n If the association is for a resource that is shared with another account, the association will\n include the local account ID as the prefix in the ARN for each account the resource is shared\n with.

", + "smithy.api#documentation": "

Lists the associations between the service network and the service. You can filter the list\n either by service or service network. You must provide either the service network identifier or\n the service identifier.

\n

Every association in Amazon VPC Lattice is given a unique Amazon Resource Name (ARN), such as when a\n service network is associated with a VPC or when a service is associated with a service network.\n If the association is for a resource that is shared with another account, the association\n includes the local account ID as the prefix in the ARN for each account the resource is shared\n with.

", "smithy.api#http": { "code": 200, "uri": "/servicenetworkserviceassociations", @@ -4129,7 +4129,7 @@ "items": { "target": "com.amazonaws.vpclattice#ServiceList", "traits": { - "smithy.api#documentation": "

The services.

" + "smithy.api#documentation": "

Information about the services.

" } }, "nextToken": { @@ -4190,7 +4190,7 @@ "tags": { "target": "com.amazonaws.vpclattice#TagMap", "traits": { - "smithy.api#documentation": "

The tags.

" + "smithy.api#documentation": "

Information about the tags.

" } } } @@ -4253,7 +4253,7 @@ "vpcIdentifier": { "target": "com.amazonaws.vpclattice#VpcId", "traits": { - "smithy.api#documentation": "

The ID or Amazon Resource Name (ARN) of the service.

", + "smithy.api#documentation": "

The ID or Amazon Resource Name (ARN) of the VPC.

", "smithy.api#httpQuery": "vpcIdentifier" } }, @@ -4309,7 +4309,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the targets for the target group. By default, all targets are included. You can use\n this API to check the health status of targets. You can also filter the results by target.

", + "smithy.api#documentation": "

Lists the targets for the target group. By default, all targets are included. You can use\n this API to check the health status of targets. You can also filter the results by target.

", "smithy.api#http": { "code": 200, "uri": "/targetgroups/{targetGroupIdentifier}/listtargets", @@ -4352,7 +4352,7 @@ "targets": { "target": "com.amazonaws.vpclattice#TargetList", "traits": { - "smithy.api#documentation": "

The targets to list.

", + "smithy.api#documentation": "

The targets.

", "smithy.api#length": { "min": 0, "max": 20 @@ -4458,6 +4458,11 @@ "name": "HTTPS", "value": "HTTPS", "documentation": "Indicates HTTPS protocol" + }, + { + "name": "TLS_PASSTHROUGH", + "value": "TLS_PASSTHROUGH", + "documentation": "Indicates TLS_PASSTHROUGH protocol" } ] } @@ -4529,7 +4534,7 @@ } }, "traits": { - "smithy.api#documentation": "

The codes to use when checking for a successful response from a target for health\n checks.

" + "smithy.api#documentation": "

Describes the codes to use when checking for a successful response from a target for health\n checks.

" } }, "com.amazonaws.vpclattice#MaxResults": { @@ -4672,7 +4677,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4691,7 +4695,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -4719,13 +4722,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -4738,7 +4742,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4752,7 +4755,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4775,7 +4777,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4810,11 +4811,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4825,16 +4824,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -4848,14 +4850,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -4864,15 +4864,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4883,16 +4882,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -4906,7 +4908,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -4926,11 +4927,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4941,20 +4940,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -4965,18 +4966,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] }, @@ -5318,7 +5323,7 @@ "caseSensitive": { "target": "com.amazonaws.vpclattice#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether the match is case sensitive. Defaults to false.

" + "smithy.api#documentation": "

Indicates whether the match is case sensitive.

" } } }, @@ -5411,7 +5416,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates the auth policy.

", + "smithy.api#documentation": "

Creates or updates the auth policy. The policy string in JSON must not contain newlines or\n blank lines.

\n

For more information, see Auth policies \n in the Amazon VPC Lattice User Guide.

", "smithy.api#http": { "code": 200, "method": "PUT", @@ -5433,7 +5438,7 @@ "policy": { "target": "com.amazonaws.vpclattice#AuthPolicyString", "traits": { - "smithy.api#documentation": "

The auth policy.

", + "smithy.api#documentation": "

The auth policy. The policy string in JSON must not contain newlines or blank lines.

", "smithy.api#required": {} } } @@ -5445,13 +5450,13 @@ "policy": { "target": "com.amazonaws.vpclattice#AuthPolicyString", "traits": { - "smithy.api#documentation": "

The auth policy.

" + "smithy.api#documentation": "

The auth policy. The policy string in JSON must not contain newlines or blank lines.

" } }, "state": { "target": "com.amazonaws.vpclattice#AuthPolicyState", "traits": { - "smithy.api#documentation": "

The state of the auth policy. The auth policy is only active when the auth type is set to\n Amazon Web Services_IAM. If you provide a policy, then authentication and\n authorization decisions are made based on this policy and the client's IAM policy. If the Auth\n type is NONE, then, any auth policy you provide will remain inactive. For more\n information, see Create a service\n network in the Amazon VPC Lattice User Guide.

" + "smithy.api#documentation": "

The state of the auth policy. The auth policy is only active when the auth type is set to\n AWS_IAM. If you provide a policy, then authentication and authorization decisions\n are made based on this policy and the client's IAM policy. If the Auth type is NONE,\n then, any auth policy that you provide remains inactive. For more information, see Create a service network in the Amazon VPC Lattice User Guide.

" } } } @@ -5505,7 +5510,7 @@ "policy": { "target": "com.amazonaws.vpclattice#PolicyString", "traits": { - "smithy.api#documentation": "

An IAM policy.

", + "smithy.api#documentation": "

An IAM policy. The policy string in JSON must not contain newlines or blank\n lines.

", "smithy.api#required": {} } } @@ -5697,12 +5702,12 @@ "fixedResponse": { "target": "com.amazonaws.vpclattice#FixedResponseAction", "traits": { - "smithy.api#documentation": "

Describes the rule action that returns a custom HTTP response.

" + "smithy.api#documentation": "

The fixed response action. The rule returns a custom HTTP response.

" } } }, "traits": { - "smithy.api#documentation": "

Describes the action for a rule. Each rule must include exactly one of the following types\n of actions: forward or fixed-response, and it must be the last action\n to be performed.

" + "smithy.api#documentation": "

Describes the action for a rule.

" } }, "com.amazonaws.vpclattice#RuleArn": { @@ -5792,13 +5797,13 @@ "isDefault": { "target": "com.amazonaws.vpclattice#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether this is the default rule. Listener rules are created when you create a\n listener. Each listener has a default rule for checking connection requests.\n

" + "smithy.api#documentation": "

Indicates whether this is the default listener rule.

" } }, "priority": { "target": "com.amazonaws.vpclattice#RulePriority", "traits": { - "smithy.api#documentation": "

The priority of the rule.

" + "smithy.api#documentation": "

The priority of the rule.

" } }, "createdAt": { @@ -5854,7 +5859,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents an object when updating a rule.

" + "smithy.api#documentation": "

Describes a rule update.

" } }, "com.amazonaws.vpclattice#RuleUpdateFailure": { @@ -5943,7 +5948,7 @@ "action": { "target": "com.amazonaws.vpclattice#RuleAction", "traits": { - "smithy.api#documentation": "

The action for the default rule.

" + "smithy.api#documentation": "

The action for the rule.

" } } }, @@ -6276,13 +6281,13 @@ "dnsEntry": { "target": "com.amazonaws.vpclattice#DnsEntry", "traits": { - "smithy.api#documentation": "

DNS information about the service.

" + "smithy.api#documentation": "

The DNS information.

" } }, "customDomainName": { "target": "com.amazonaws.vpclattice#ServiceCustomDomainName", "traits": { - "smithy.api#documentation": "

The custom domain name of the service.

" + "smithy.api#documentation": "

The custom domain name of the service.

" } } }, @@ -6620,13 +6625,13 @@ "dnsEntry": { "target": "com.amazonaws.vpclattice#DnsEntry", "traits": { - "smithy.api#documentation": "

DNS information about the service.

" + "smithy.api#documentation": "

The DNS information.

" } }, "customDomainName": { "target": "com.amazonaws.vpclattice#ServiceCustomDomainName", "traits": { - "smithy.api#documentation": "

The custom domain name of the service.

" + "smithy.api#documentation": "

The custom domain name of the service.

" } }, "status": { @@ -6753,7 +6758,7 @@ "id": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The ID of the target. If the target type of the target group is INSTANCE, this\n is an instance ID. If the target type is IP , this is an IP address. If the target\n type is LAMBDA, this is the ARN of the Lambda function. If the target type is\n ALB, this is the ARN of the Application Load Balancer.

", + "smithy.api#documentation": "

The ID of the target. If the target group type is INSTANCE, this is\n an instance ID. If the target group type is IP, this is an IP address. If the target\n group type is LAMBDA, this is the ARN of a Lambda function. If the target group type \n is ALB, this is the ARN of an Application Load Balancer.

", "smithy.api#length": { "min": 1, "max": 200 @@ -6764,7 +6769,7 @@ "port": { "target": "com.amazonaws.vpclattice#Port", "traits": { - "smithy.api#documentation": "

The port on which the target is listening. For HTTP, the default is 80. For\n HTTPS, the default is 443.

" + "smithy.api#documentation": "

The port on which the target is listening. For HTTP, the default is 80. For\n HTTPS, the default is 443.

" } } }, @@ -6778,7 +6783,7 @@ "id": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The ID of the target. If the target type of the target group is INSTANCE, this\n is an instance ID. If the target type is IP , this is an IP address. If the target\n type is LAMBDA, this is the ARN of the Lambda function. If the target type is\n ALB, this is the ARN of the Application Load Balancer.

" + "smithy.api#documentation": "

The ID of the target. If the target group type is INSTANCE, this is\n an instance ID. If the target group type is IP, this is an IP address. If the target\n group type is LAMBDA, this is the ARN of a Lambda function. If the target group type is\n ALB, this is the ARN of an Application Load Balancer.

" } }, "port": { @@ -6860,48 +6865,48 @@ "port": { "target": "com.amazonaws.vpclattice#Port", "traits": { - "smithy.api#documentation": "

The port on which the targets are listening. For HTTP, the default is 80. For\n HTTPS, the default is 443\n

" + "smithy.api#documentation": "

The port on which the targets are listening. For HTTP, the default is 80. For\n HTTPS, the default is 443. Not supported if the target group type is LAMBDA.

" } }, "protocol": { "target": "com.amazonaws.vpclattice#TargetGroupProtocol", "traits": { - "smithy.api#documentation": "

The protocol to use for routing traffic to the targets. Default is the protocol of a target\n group.

" + "smithy.api#documentation": "

The protocol to use for routing traffic to the targets. The default is the protocol of the target\n group. Not supported if the target group type is LAMBDA.

" } }, "protocolVersion": { "target": "com.amazonaws.vpclattice#TargetGroupProtocolVersion", "traits": { - "smithy.api#documentation": "

The protocol version. Default value is HTTP1.

" + "smithy.api#documentation": "

The protocol version. The default is HTTP1.\n Not supported if the target group type is LAMBDA.

" } }, "ipAddressType": { "target": "com.amazonaws.vpclattice#IpAddressType", "traits": { - "smithy.api#documentation": "

The type of IP address used for the target group. The possible values are ipv4 and ipv6.\n This is an optional parameter. If not specified, the IP address type defaults to ipv4.

" + "smithy.api#documentation": "

The type of IP address used for the target group. Supported only if the target group\n type is IP. The default is IPV4.

" } }, "vpcIdentifier": { "target": "com.amazonaws.vpclattice#VpcId", "traits": { - "smithy.api#documentation": "

The ID of the VPC.

" + "smithy.api#documentation": "

The ID of the VPC. Not supported if the target group type is LAMBDA.

" } }, "healthCheck": { "target": "com.amazonaws.vpclattice#HealthCheckConfig", "traits": { - "smithy.api#documentation": "

The health check configuration.

" + "smithy.api#documentation": "

The health check configuration. Not supported if the target group type is\n LAMBDA or ALB.

" } }, "lambdaEventStructureVersion": { "target": "com.amazonaws.vpclattice#LambdaEventStructureVersion", "traits": { - "smithy.api#documentation": "Lambda event structure version" + "smithy.api#documentation": "

The version of the event structure that your Lambda function receives. \n Supported only if the target group type is LAMBDA. The default is V1.

" } } }, "traits": { - "smithy.api#documentation": "

Describes the configuration of a target group. Lambda functions don't support target group\n configuration.

" + "smithy.api#documentation": "

Describes the configuration of a target group.

\n

For more information, see Target groups in the\n Amazon VPC Lattice User Guide.

" } }, "com.amazonaws.vpclattice#TargetGroupId": { @@ -6953,6 +6958,11 @@ "name": "HTTPS", "value": "HTTPS", "documentation": "Indicates HTTPS protocol" + }, + { + "name": "TCP", + "value": "TCP", + "documentation": "Indicates TCP protocol" } ] } @@ -7059,7 +7069,7 @@ "ipAddressType": { "target": "com.amazonaws.vpclattice#IpAddressType", "traits": { - "smithy.api#documentation": "

The type of IP address used for the target group. The possible values are ipv4 and ipv6.\n This is an optional parameter. If not specified, the IP address type defaults to ipv4.

" + "smithy.api#documentation": "

The type of IP address used for the target group. The possible values are IPV4\n and IPV6. This is an optional parameter. If not specified, the default is\n IPV4.

" } }, "vpcIdentifier": { @@ -7083,18 +7093,18 @@ "serviceArns": { "target": "com.amazonaws.vpclattice#ServiceArnList", "traits": { - "smithy.api#documentation": "

The list of Amazon Resource Names (ARNs) of the service.

" + "smithy.api#documentation": "

The Amazon Resource Names (ARNs) of the service.

" } }, "lambdaEventStructureVersion": { "target": "com.amazonaws.vpclattice#LambdaEventStructureVersion", "traits": { - "smithy.api#documentation": "Lambda event structure version" + "smithy.api#documentation": "

The version of the event structure that your Lambda function receives.\n Supported only if the target group type is LAMBDA.

" } } }, "traits": { - "smithy.api#documentation": "

Summary information about a target group.

" + "smithy.api#documentation": "

Summary information about a target group.

\n

For more information, see Target groups in the\n Amazon VPC Lattice User Guide.

" } }, "com.amazonaws.vpclattice#TargetGroupType": { @@ -7182,7 +7192,7 @@ "id": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The ID of the target. If the target type of the target group is INSTANCE, this\n is an instance ID. If the target type is IP , this is an IP address. If the target\n type is LAMBDA, this is the ARN of the Lambda function. If the target type is\n ALB, this is the ARN of the Application Load Balancer.

" + "smithy.api#documentation": "

The ID of the target. If the target group type is INSTANCE, this is\n an instance ID. If the target group type is IP, this is an IP address. If the target\n group type is LAMBDA, this is the ARN of a Lambda function. If the target type is\n ALB, this is the ARN of an Application Load Balancer.

" } }, "port": { @@ -7194,7 +7204,7 @@ "status": { "target": "com.amazonaws.vpclattice#TargetStatus", "traits": { - "smithy.api#documentation": "

The status of the target.

\n
    \n
  • \n

    \n Draining: The target is being deregistered. No new connections will be sent\n to this target while current connections are being drained. Default draining time is 5\n minutes.

    \n
  • \n
  • \n

    \n Unavailable: Health checks are unavailable for the target group.

    \n
  • \n
  • \n

    \n Healthy: The target is healthy.

    \n
  • \n
  • \n

    \n Unhealthy: The target is unhealthy.

    \n
  • \n
  • \n

    \n Initial: Initial health checks on the target are being performed.

    \n
  • \n
  • \n

    \n Unused: Target group is not used in a service.

    \n
  • \n
" + "smithy.api#documentation": "

The status of the target.

\n
    \n
  • \n

    \n DRAINING: The target is being deregistered. No new connections are sent\n to this target while current connections are being drained. The default draining time is 5\n minutes.

    \n
  • \n
  • \n

    \n UNAVAILABLE: Health checks are unavailable for the target group.

    \n
  • \n
  • \n

    \n HEALTHY: The target is healthy.

    \n
  • \n
  • \n

    \n UNHEALTHY: The target is unhealthy.

    \n
  • \n
  • \n

    \n INITIAL: Initial health checks on the target are being performed.

    \n
  • \n
  • \n

    \n UNUSED: Target group is not used in a service.

    \n
  • \n
" } }, "reasonCode": { @@ -7825,7 +7835,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the service network and VPC association. Once you add a security group, it cannot be\n removed.

", + "smithy.api#documentation": "

Updates the service network and VPC association. If you add a security group to the service\n network and VPC association, the association must continue to always have at least one security\n group. You can add or edit security groups at any time. However, to remove all security groups,\n you must first delete the association and recreate it without security groups.

", "smithy.api#http": { "code": 200, "uri": "/servicenetworkvpcassociations/{serviceNetworkVpcAssociationIdentifier}", @@ -7848,7 +7858,7 @@ "securityGroupIds": { "target": "com.amazonaws.vpclattice#SecurityGroupList", "traits": { - "smithy.api#documentation": "

The IDs of the security groups. Once you add a security group, it cannot be removed.

", + "smithy.api#documentation": "

The IDs of the security groups.

", "smithy.api#length": { "min": 1, "max": 5 @@ -7907,7 +7917,7 @@ "certificateArn": { "target": "com.amazonaws.vpclattice#CertificateArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the certificate.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the certificate.

" } }, "authType": { @@ -7948,7 +7958,7 @@ "certificateArn": { "target": "com.amazonaws.vpclattice#CertificateArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the certificate.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the certificate.

" } }, "authType": { @@ -8100,7 +8110,7 @@ "message": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

Additional details about why the validation failed.

", + "smithy.api#documentation": "

Additional information about why the validation failed.

", "smithy.api#required": {} } } @@ -8161,7 +8171,7 @@ "weight": { "target": "com.amazonaws.vpclattice#TargetGroupWeight", "traits": { - "smithy.api#documentation": "

Only required if you specify multiple target groups for a forward action. The \"weight\"\n determines how requests are distributed to the target group. For example, if you specify two\n target groups, each with a weight of 10, each target group receives half the requests. If you\n specify two target groups, one with a weight of 10 and the other with a weight of 20, the target\n group with a weight of 20 receives twice as many requests as the other target group. If there's\n only one target group specified, then the default value is 100.

" + "smithy.api#documentation": "

Only required if you specify multiple target groups for a forward action. The weight\n determines how requests are distributed to the target group. For example, if you specify two\n target groups, each with a weight of 10, each target group receives half the requests. If you\n specify two target groups, one with a weight of 10 and the other with a weight of 20, the target\n group with a weight of 20 receives twice as many requests as the other target group. If there's\n only one target group specified, then the default value is 100.

" } } }, @@ -8177,7 +8187,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 2 + "max": 10 } } } diff --git a/models/waf.json b/models/waf.json index be68ad1a60..197779e7fc 100644 --- a/models/waf.json +++ b/models/waf.json @@ -7266,6 +7266,21 @@ ] } } + ], + "smithy.test#smokeTests": [ + { + "id": "ListRulesSuccess", + "params": { + "Limit": 20 + }, + "vendorParams": { + "region": "us-east-1" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } ] } }, diff --git a/models/wafv2.json b/models/wafv2.json index fcf56f5884..a1d41b158c 100644 --- a/models/wafv2.json +++ b/models/wafv2.json @@ -1901,7 +1901,7 @@ "SearchString": { "target": "com.amazonaws.wafv2#SearchString", "traits": { - "smithy.api#documentation": "

A string value that you want WAF to search for. WAF searches only in the part of\n web requests that you designate for inspection in FieldToMatch. The\n maximum length of the value is 200 bytes.

\n

Valid values depend on the component that you specify for inspection in\n FieldToMatch:

\n
    \n
  • \n

    \n Method: The HTTP method that you want WAF to search for. This\n indicates the type of operation specified in the request.

    \n
  • \n
  • \n

    \n UriPath: The value that you want WAF to search for in the URI path,\n for example, /images/daily-ad.jpg.

    \n
  • \n
  • \n

    \n JA3Fingerprint: Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to \n EXACTLY.

    \n

    You can obtain the JA3 fingerprint for client requests from the web ACL logs. \n\t\t\t\t\t\tIf WAF is able to calculate the fingerprint, it includes it in the logs. \n\t\t\t\t\t\tFor information about the logging fields, \nsee Log fields in the WAF Developer Guide.

    \n
  • \n
  • \n

    \n HeaderOrder: The list of header names to match for. WAF creates a \n string that contains the ordered list of header names, from the headers in the web request, and then matches against that string.

    \n
  • \n
\n

If SearchString includes alphabetic characters A-Z and a-z, note that the\n value is case sensitive.

\n

\n If you're using the WAF API\n

\n

Specify a base64-encoded version of the value. The maximum length of the value before\n you base64-encode it is 200 bytes.

\n

For example, suppose the value of Type is HEADER and the value\n of Data is User-Agent. If you want to search the\n User-Agent header for the value BadBot, you base64-encode\n BadBot using MIME base64-encoding and include the resulting value,\n QmFkQm90, in the value of SearchString.

\n

\n If you're using the CLI or one of the Amazon Web Services SDKs\n

\n

The value that you want WAF to search for. The SDK automatically base64 encodes the\n value.

", + "smithy.api#documentation": "

A string value that you want WAF to search for. WAF searches only in the part of\n web requests that you designate for inspection in FieldToMatch. The\n maximum length of the value is 200 bytes.

\n

Valid values depend on the component that you specify for inspection in\n FieldToMatch:

\n
    \n
  • \n

    \n Method: The HTTP method that you want WAF to search for. This\n indicates the type of operation specified in the request.

    \n
  • \n
  • \n

    \n UriPath: The value that you want WAF to search for in the URI path,\n for example, /images/daily-ad.jpg.

    \n
  • \n
  • \n

    \n JA3Fingerprint: Available for use with Amazon CloudFront distributions and Application Load Balancers. Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to \n EXACTLY.

    \n

    You can obtain the JA3 fingerprint for client requests from the web ACL logs. \n\t\t\t\t\t\tIf WAF is able to calculate the fingerprint, it includes it in the logs. \n\t\t\t\t\t\tFor information about the logging fields, \nsee Log fields in the WAF Developer Guide.

    \n
  • \n
  • \n

    \n HeaderOrder: The list of header names to match for. WAF creates a \n string that contains the ordered list of header names, from the headers in the web request, and then matches against that string.

    \n
  • \n
\n

If SearchString includes alphabetic characters A-Z and a-z, note that the\n value is case sensitive.

\n

\n If you're using the WAF API\n

\n

Specify a base64-encoded version of the value. The maximum length of the value before\n you base64-encode it is 200 bytes.

\n

For example, suppose the value of Type is HEADER and the value\n of Data is User-Agent. If you want to search the\n User-Agent header for the value BadBot, you base64-encode\n BadBot using MIME base64-encoding and include the resulting value,\n QmFkQm90, in the value of SearchString.

\n

\n If you're using the CLI or one of the Amazon Web Services SDKs\n

\n

The value that you want WAF to search for. The SDK automatically base64 encodes the\n value.

", "smithy.api#required": {} } }, @@ -4739,6 +4739,18 @@ "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the web ACL from which you want to delete the LoggingConfiguration.

", "smithy.api#required": {} } + }, + "LogType": { + "target": "com.amazonaws.wafv2#LogType", + "traits": { + "smithy.api#documentation": "

Used to distinguish between various logging options. Currently, there is one option.

\n

Default: WAF_LOGS\n

" + } + }, + "LogScope": { + "target": "com.amazonaws.wafv2#LogScope", + "traits": { + "smithy.api#documentation": "

The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage.

\n

The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see \n Collecting data from Amazon Web Services services\n in the Amazon Security Lake user guide.

\n

Default: CUSTOMER\n

" + } } }, "traits": { @@ -5554,12 +5566,12 @@ "JA3Fingerprint": { "target": "com.amazonaws.wafv2#JA3Fingerprint", "traits": { - "smithy.api#documentation": "

Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. WAF calculates and logs this fingerprint for each\n\t\t\t\t\t\trequest that has enough TLS Client Hello information for the calculation. Almost \n all web requests include this information.

\n \n

You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to \n EXACTLY.

\n
\n

You can obtain the JA3 fingerprint for client requests from the web ACL logs. \n\t\t\t\t\t\tIf WAF is able to calculate the fingerprint, it includes it in the logs. \n\t\t\t\t\t\tFor information about the logging fields, \nsee Log fields in the WAF Developer Guide.

\n

Provide the JA3 fingerprint string from the logs in your string match statement\n\t\t\t\t\t\t\tspecification, to match with any future requests that have the same TLS configuration.

" + "smithy.api#documentation": "

Available for use with Amazon CloudFront distributions and Application Load Balancers. Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. WAF calculates and logs this fingerprint for each\n\t\t\t\t\t\trequest that has enough TLS Client Hello information for the calculation. Almost \n all web requests include this information.

\n \n

You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to \n EXACTLY.

\n
\n

You can obtain the JA3 fingerprint for client requests from the web ACL logs. \n\t\t\t\t\t\tIf WAF is able to calculate the fingerprint, it includes it in the logs. \n\t\t\t\t\t\tFor information about the logging fields, \nsee Log fields in the WAF Developer Guide.

\n

Provide the JA3 fingerprint string from the logs in your string match statement\n\t\t\t\t\t\t\tspecification, to match with any future requests that have the same TLS configuration.

" } } }, "traits": { - "smithy.api#documentation": "

Specifies a web request component to be used in a rule match statement or in a logging configuration.

\n
    \n
  • \n

    In a rule statement, this is the part of the web request that you want WAF to inspect. Include the single\n FieldToMatch type that you want to inspect, with additional specifications\n as needed, according to the type. You specify a single request component in\n FieldToMatch for each rule statement that requires it. To inspect more than\n one component of the web request, create a separate rule statement for each\n component.

    \n

    Example JSON for a QueryString field to match:

    \n

    \n \"FieldToMatch\": { \"QueryString\": {} }\n

    \n

    Example JSON for a Method field to match specification:

    \n

    \n \"FieldToMatch\": { \"Method\": { \"Name\": \"DELETE\" } }\n

    \n
  • \n
  • \n

    In a logging configuration, this is used in the RedactedFields property to specify a field to \n redact from the logging records. For this use case, note the following:

    \n
      \n
    • \n

      Even though all FieldToMatch settings \n are available, the only valid settings for field redaction are UriPath, QueryString, SingleHeader, and Method.

      \n
    • \n
    • \n

      In this documentation, the descriptions of the individual fields talk about specifying the web request component to inspect, \n but for field redaction, you are specifying the component type to redact from the logs.

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

Specifies a web request component to be used in a rule match statement or in a logging configuration.

\n
    \n
  • \n

    In a rule statement, this is the part of the web request that you want WAF to inspect. Include the single\n FieldToMatch type that you want to inspect, with additional specifications\n as needed, according to the type. You specify a single request component in\n FieldToMatch for each rule statement that requires it. To inspect more than\n one component of the web request, create a separate rule statement for each\n component.

    \n

    Example JSON for a QueryString field to match:

    \n

    \n \"FieldToMatch\": { \"QueryString\": {} }\n

    \n

    Example JSON for a Method field to match specification:

    \n

    \n \"FieldToMatch\": { \"Method\": { \"Name\": \"DELETE\" } }\n

    \n
  • \n
  • \n

    In a logging configuration, this is used in the RedactedFields property to specify a field to \n redact from the logging records. For this use case, note the following:

    \n
      \n
    • \n

      Even though all FieldToMatch settings \n are available, the only valid settings for field redaction are UriPath, QueryString, SingleHeader, and Method.

      \n
    • \n
    • \n

      In this documentation, the descriptions of the individual fields talk about specifying the web request component to inspect, \n but for field redaction, you are specifying the component type to redact from the logs.

      \n
    • \n
    • \n

      If you have request sampling enabled, the redacted fields configuration for logging has no impact on sampling. \n The only way to exclude fields from request sampling is by disabling sampling in the web ACL visibility configuration.

      \n
    • \n
    \n
  • \n
" } }, "com.amazonaws.wafv2#FieldToMatchData": { @@ -6034,6 +6046,18 @@ "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the web ACL for which you want to get the LoggingConfiguration.

", "smithy.api#required": {} } + }, + "LogType": { + "target": "com.amazonaws.wafv2#LogType", + "traits": { + "smithy.api#documentation": "

Used to distinguish between various logging options. Currently, there is one option.

\n

Default: WAF_LOGS\n

" + } + }, + "LogScope": { + "target": "com.amazonaws.wafv2#LogScope", + "traits": { + "smithy.api#documentation": "

The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage.

\n

The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see \n Collecting data from Amazon Web Services services\n in the Amazon Security Lake user guide.

\n

Default: CUSTOMER\n

" + } } }, "traits": { @@ -7106,7 +7130,7 @@ } }, "traits": { - "smithy.api#documentation": "

Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. WAF calculates and logs this fingerprint for each\n\t\t\t\t\t\trequest that has enough TLS Client Hello information for the calculation. Almost \n all web requests include this information.

\n \n

You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to \n EXACTLY.

\n
\n

You can obtain the JA3 fingerprint for client requests from the web ACL logs. \n\t\t\t\t\t\tIf WAF is able to calculate the fingerprint, it includes it in the logs. \n\t\t\t\t\t\tFor information about the logging fields, \nsee Log fields in the WAF Developer Guide.

\n

Provide the JA3 fingerprint string from the logs in your string match statement\n\t\t\t\t\t\t\tspecification, to match with any future requests that have the same TLS configuration.

" + "smithy.api#documentation": "

Available for use with Amazon CloudFront distributions and Application Load Balancers. Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request. This fingerprint serves as a unique identifier for the client's TLS configuration. WAF calculates and logs this fingerprint for each\n\t\t\t\t\t\trequest that has enough TLS Client Hello information for the calculation. Almost \n all web requests include this information.

\n \n

You can use this choice only with a string match ByteMatchStatement with the PositionalConstraint set to \n EXACTLY.

\n
\n

You can obtain the JA3 fingerprint for client requests from the web ACL logs. \n\t\t\t\t\t\tIf WAF is able to calculate the fingerprint, it includes it in the logs. \n\t\t\t\t\t\tFor information about the logging fields, \nsee Log fields in the WAF Developer Guide.

\n

Provide the JA3 fingerprint string from the logs in your string match statement\n\t\t\t\t\t\t\tspecification, to match with any future requests that have the same TLS configuration.

" } }, "com.amazonaws.wafv2#JsonBody": { @@ -7688,6 +7712,12 @@ "traits": { "smithy.api#documentation": "

The maximum number of objects that you want WAF to return for this request. If more \n objects are available, in the response, WAF provides a \n NextMarker value that you can use in a subsequent call to get the next batch of objects.

" } + }, + "LogScope": { + "target": "com.amazonaws.wafv2#LogScope", + "traits": { + "smithy.api#documentation": "

The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage.

\n

The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see \n Collecting data from Amazon Web Services services\n in the Amazon Security Lake user guide.

\n

Default: CUSTOMER\n

" + } } }, "traits": { @@ -8251,6 +8281,34 @@ } } }, + "com.amazonaws.wafv2#LogScope": { + "type": "enum", + "members": { + "CUSTOMER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOMER" + } + }, + "SECURITY_LAKE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SECURITY_LAKE" + } + } + } + }, + "com.amazonaws.wafv2#LogType": { + "type": "enum", + "members": { + "WAF_LOGS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WAF_LOGS" + } + } + } + }, "com.amazonaws.wafv2#LoggingConfiguration": { "type": "structure", "members": { @@ -8271,7 +8329,7 @@ "RedactedFields": { "target": "com.amazonaws.wafv2#RedactedFields", "traits": { - "smithy.api#documentation": "

The parts of the request that you want to keep out of the logs.

\n

For example, if you\n redact the SingleHeader field, the HEADER field in the logs will\n be REDACTED for all rules that use the SingleHeader\n FieldToMatch setting.

\n

Redaction applies only to the component that's specified in the rule's FieldToMatch setting, so the SingleHeader redaction \n doesn't apply to rules that use the Headers\n FieldToMatch.

\n \n

You can specify only the following fields for redaction: UriPath,\n QueryString, SingleHeader, and Method.

\n
" + "smithy.api#documentation": "

The parts of the request that you want to keep out of the logs.

\n

For example, if you\n redact the SingleHeader field, the HEADER field in the logs will\n be REDACTED for all rules that use the SingleHeader\n FieldToMatch setting.

\n

Redaction applies only to the component that's specified in the rule's FieldToMatch setting, so the SingleHeader redaction \n doesn't apply to rules that use the Headers\n FieldToMatch.

\n \n

You can specify only the following fields for redaction: UriPath,\n QueryString, SingleHeader, and Method.

\n
\n \n

This setting has no impact on request sampling. With request sampling, \n the only way to exclude fields is by disabling sampling in the web ACL visibility configuration.

\n
" } }, "ManagedByFirewallManager": { @@ -8286,6 +8344,18 @@ "traits": { "smithy.api#documentation": "

Filtering that specifies which web requests are kept in the logs and which are dropped.\n You can filter on the rule action and on the web request labels that were applied by\n matching rules during web ACL evaluation.

" } + }, + "LogType": { + "target": "com.amazonaws.wafv2#LogType", + "traits": { + "smithy.api#documentation": "

Used to distinguish between various logging options. Currently, there is one option.

\n

Default: WAF_LOGS\n

" + } + }, + "LogScope": { + "target": "com.amazonaws.wafv2#LogScope", + "traits": { + "smithy.api#documentation": "

The owner of the logging configuration, which must be set to CUSTOMER for the configurations that you manage.

\n

The log scope SECURITY_LAKE indicates a configuration that is managed through Amazon Security Lake. You can use Security Lake to collect log and event data from various sources for normalization, analysis, and management. For information, see \n Collecting data from Amazon Web Services services\n in the Amazon Security Lake user guide.

\n

Default: CUSTOMER\n

" + } } }, "traits": { @@ -12533,7 +12603,7 @@ "target": "com.amazonaws.wafv2#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Indicates whether WAF should store a sampling of the web requests that\n match the rules. You can view the sampled requests through the WAF console.

", + "smithy.api#documentation": "

Indicates whether WAF should store a sampling of the web requests that\n match the rules. You can view the sampled requests through the WAF console.

\n \n

Request sampling doesn't provide a field redaction option, and any field redaction that you specify in your logging configuration doesn't affect sampling. \n The only way to exclude fields from request sampling is by disabling sampling in the web ACL visibility configuration.

\n
", "smithy.api#required": {} } }, diff --git a/models/workspaces-thin-client.json b/models/workspaces-thin-client.json index 5b1c36302b..20322a9b47 100644 --- a/models/workspaces-thin-client.json +++ b/models/workspaces-thin-client.json @@ -196,6 +196,12 @@ "traits": { "smithy.api#documentation": "

A map of the key-value pairs of the tag or tags to assign to the resource.

" } + }, + "deviceCreationTags": { + "target": "com.amazonaws.workspacesthinclient#DeviceCreationTagsMap", + "traits": { + "smithy.api#documentation": "

A map of the key-value pairs of the tag or tags to assign to the newly created devices for this environment.

" + } } }, "traits": { @@ -665,6 +671,42 @@ "smithy.api#documentation": "

Describes a thin client device.

" } }, + "com.amazonaws.workspacesthinclient#DeviceCreationTagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^(?!aws:)[A-Za-z0-9 _=@:.+-/]+$" + } + }, + "com.amazonaws.workspacesthinclient#DeviceCreationTagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9 _=@:.+-/]+$" + } + }, + "com.amazonaws.workspacesthinclient#DeviceCreationTagsMap": { + "type": "map", + "key": { + "target": "com.amazonaws.workspacesthinclient#DeviceCreationTagKey" + }, + "value": { + "target": "com.amazonaws.workspacesthinclient#DeviceCreationTagValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.workspacesthinclient#DeviceId": { "type": "string", "traits": { @@ -950,6 +992,12 @@ "traits": { "smithy.api#documentation": "

The tag keys and optional values for the resource.

" } + }, + "deviceCreationTags": { + "target": "com.amazonaws.workspacesthinclient#DeviceCreationTagsMap", + "traits": { + "smithy.api#documentation": "

\"The tag keys and optional values for the newly created devices for this environment.\"

" + } } }, "traits": { @@ -3167,6 +3215,12 @@ "traits": { "smithy.api#documentation": "

The ID of the software set to apply.

" } + }, + "deviceCreationTags": { + "target": "com.amazonaws.workspacesthinclient#DeviceCreationTagsMap", + "traits": { + "smithy.api#documentation": "

A map of the key-value pairs of the tag or tags to assign to the newly created devices for this environment.

" + } } }, "traits": { diff --git a/models/workspaces-web.json b/models/workspaces-web.json index 15865eb8b1..10dae7296a 100644 --- a/models/workspaces-web.json +++ b/models/workspaces-web.json @@ -64,7 +64,7 @@ "name": "workspaces-web" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

WorkSpaces Web is a low cost, fully managed WorkSpace built specifically to facilitate\n secure, web-based workloads. WorkSpaces Web makes it easy for customers to safely provide\n their employees with access to internal websites and SaaS web applications without the\n administrative burden of appliances or specialized client software. WorkSpaces Web provides\n simple policy tools tailored for user interactions, while offloading common tasks like\n capacity management, scaling, and maintaining browser images.

", + "smithy.api#documentation": "

Amazon WorkSpaces Secure Browser is a low cost, fully managed WorkSpace built specifically to facilitate\n secure, web-based workloads. WorkSpaces Secure Browser makes it easy for customers to safely provide\n their employees with access to internal websites and SaaS web applications without the\n administrative burden of appliances or specialized client software. WorkSpaces Secure Browser provides\n simple policy tools tailored for user interactions, while offloading common tasks like\n capacity management, scaling, and maintaining browser images.

", "smithy.api#title": "Amazon WorkSpaces Web", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1008,6 +1008,9 @@ { "target": "com.amazonaws.workspacesweb#AccessDeniedException" }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, { "target": "com.amazonaws.workspacesweb#InternalServerException" }, @@ -1822,6 +1825,12 @@ "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token returns the result from the original successful request.

\n

If you do not specify a client token, one is automatically generated by the Amazon Web Services\n SDK.

", "smithy.api#idempotencyToken": {} } + }, + "tags": { + "target": "com.amazonaws.workspacesweb#TagList", + "traits": { + "smithy.api#documentation": "

The tags to add to the identity provider resource. A tag is a key-value pair.

" + } } }, "traits": { @@ -1899,7 +1908,7 @@ "target": "com.amazonaws.workspacesweb#TagList", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "

The tags to add to the browser settings resource. A tag is a key-value pair.

" + "smithy.api#documentation": "

The tags to add to the IP access settings resource. A tag is a key-value pair.

" } }, "customerManagedKey": { @@ -2445,6 +2454,12 @@ "traits": { "smithy.api#documentation": "

The additional encryption context of the user settings.

" } + }, + "deepLinkAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + } } }, "traits": { @@ -2953,6 +2968,9 @@ { "target": "com.amazonaws.workspacesweb#AccessDeniedException" }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, { "target": "com.amazonaws.workspacesweb#InternalServerException" }, @@ -3011,6 +3029,9 @@ { "target": "com.amazonaws.workspacesweb#AccessDeniedException" }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, { "target": "com.amazonaws.workspacesweb#InternalServerException" }, @@ -3069,6 +3090,9 @@ { "target": "com.amazonaws.workspacesweb#AccessDeniedException" }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, { "target": "com.amazonaws.workspacesweb#InternalServerException" }, @@ -3127,6 +3151,9 @@ { "target": "com.amazonaws.workspacesweb#AccessDeniedException" }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, { "target": "com.amazonaws.workspacesweb#InternalServerException" }, @@ -3185,6 +3212,9 @@ { "target": "com.amazonaws.workspacesweb#AccessDeniedException" }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, { "target": "com.amazonaws.workspacesweb#InternalServerException" }, @@ -3243,6 +3273,9 @@ { "target": "com.amazonaws.workspacesweb#AccessDeniedException" }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, { "target": "com.amazonaws.workspacesweb#InternalServerException" }, @@ -4106,6 +4139,9 @@ }, "identityProviderDetails": { "target": "com.amazonaws.workspacesweb#IdentityProviderDetails" + }, + "tags": { + "target": "com.amazonaws.workspacesweb#TagList" } }, "create": { @@ -4128,6 +4164,9 @@ "template": "{identityProviderArn}", "absolute": true }, + "aws.api#taggable": { + "property": "tags" + }, "aws.cloudformation#cfnResource": { "name": "IdentityProvider" } @@ -7081,6 +7120,12 @@ "traits": { "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

\n

If the allowlist and blocklist are empty, the configuration becomes null.

" } + }, + "deepLinkAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + } } }, "traits": { @@ -7283,6 +7328,12 @@ "traits": { "smithy.api#documentation": "

The additional encryption context of the user settings.

" } + }, + "deepLinkAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + } } }, "traits": { @@ -7338,6 +7389,9 @@ }, "cookieSynchronizationConfiguration": { "target": "com.amazonaws.workspacesweb#CookieSynchronizationConfiguration" + }, + "deepLinkAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType" } }, "create": { @@ -7428,6 +7482,12 @@ "traits": { "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

" } + }, + "deepLinkAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + } } }, "traits": {